Skip to content

Commit 688d253

Browse files
committed
Create quota before _data dir for volumes
This resolves an ordering issue that prevented quotas from being applied. XFS quotas are applied recursively, but only for subdirectories created after the quota is applied; if we create `_data` before the quota, and then use `_data` for all data in the volume, the quota will never be used by the volume. Also, add a test that volume quotas are working as designed using an XFS formatted loop device in the system tests. This should prevent any further regressions on basic quota functionality, such as quotas being shared between volumes. Fixes containers#25368 Signed-off-by: Matt Heon <[email protected]>
1 parent e44ba88 commit 688d253

File tree

2 files changed

+54
-11
lines changed

2 files changed

+54
-11
lines changed

libpod/runtime_volume_common.go

+16-11
Original file line numberDiff line numberDiff line change
@@ -168,16 +168,11 @@ func (r *Runtime) newVolume(ctx context.Context, noCreatePluginVolume bool, opti
168168
if err := idtools.SafeChown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
169169
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", volPathRoot, volume.config.UID, volume.config.GID, err)
170170
}
171-
fullVolPath := filepath.Join(volPathRoot, "_data")
172-
if err := os.MkdirAll(fullVolPath, 0755); err != nil {
173-
return nil, fmt.Errorf("creating volume directory %q: %w", fullVolPath, err)
174-
}
175-
if err := idtools.SafeChown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
176-
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", fullVolPath, volume.config.UID, volume.config.GID, err)
177-
}
178-
if err := LabelVolumePath(fullVolPath, volume.config.MountLabel); err != nil {
179-
return nil, err
180-
}
171+
172+
// Setting quotas must happen *before* the _data inner directory
173+
// is created, as the volume must be empty for the quota to be
174+
// properly applied - if any subdirectories exist before the
175+
// quota is applied, the quota will not be applied to them.
181176
switch {
182177
case volume.config.DisableQuota:
183178
if volume.config.Size > 0 || volume.config.Inodes > 0 {
@@ -206,10 +201,20 @@ func (r *Runtime) newVolume(ctx context.Context, noCreatePluginVolume bool, opti
206201
// subdirectory - so the quota ID assignment logic works
207202
// properly.
208203
if err := q.SetQuota(volPathRoot, quota); err != nil {
209-
return nil, fmt.Errorf("failed to set size quota size=%d inodes=%d for volume directory %q: %w", volume.config.Size, volume.config.Inodes, fullVolPath, err)
204+
return nil, fmt.Errorf("failed to set size quota size=%d inodes=%d for volume directory %q: %w", volume.config.Size, volume.config.Inodes, volPathRoot, err)
210205
}
211206
}
212207

208+
fullVolPath := filepath.Join(volPathRoot, "_data")
209+
if err := os.MkdirAll(fullVolPath, 0755); err != nil {
210+
return nil, fmt.Errorf("creating volume directory %q: %w", fullVolPath, err)
211+
}
212+
if err := idtools.SafeChown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
213+
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", fullVolPath, volume.config.UID, volume.config.GID, err)
214+
}
215+
if err := LabelVolumePath(fullVolPath, volume.config.MountLabel); err != nil {
216+
return nil, err
217+
}
213218
volume.config.MountPoint = fullVolPath
214219
}
215220

test/system/160-volumes.bats

+38
Original file line numberDiff line numberDiff line change
@@ -619,4 +619,42 @@ EOF
619619
run_podman volume rm $volume_name --force
620620
}
621621

622+
@test "podman volumes with XFS quotas" {
623+
skip_if_rootless "Quotas are only possible with root"
624+
625+
loop=$PODMAN_TMPDIR/loopdevice
626+
dd if=/dev/zero of=$(loopfile) bs=1M count=25
627+
loop_dev=$(losetup -f)
628+
losetup $loop_dev $loop
629+
mkfs.xfs $loop_dev
630+
631+
safe_opts=$(podman_isolation_opts ${PODMAN_TMPDIR})
632+
vol_path=$PODMAN_TMPDIR/volpath
633+
safe_opts="$safe_opts --volumepath=$vol_path"
634+
mount -t xfs -o defaults,pquota $loop_dev $vol_path
635+
636+
vol_one="testvol1"
637+
run_podman $safe_opts volume create --opt o=size=2m $vol_one
638+
639+
vol_two="testvol2"
640+
run_podman $safe_opts volume create --opt o=size=4m $vol_two
641+
642+
ctrname="testctr"
643+
run_podman $safe_opts run --name=$ctrname -t -i -v $vol_one:/one -v $vol_two:/two $IMAGE top
644+
645+
run_podman $safe_opts exec $ctrname dd if=/dev/zero of=/one/onemb bs=1M count=1
646+
run_podman 2 $safe_opts exec $ctrname dd if=/dev/zero of=/one/twomb bs=1M count=1
647+
assert "$output" =~ "No space left on device"
648+
run_podman $safe_opts exec $ctrname dd if=/dev/zero of=/two/threemb bs=1M count=3
649+
run_podman 2 $safe_opts exec $ctrname dd if=/dev/zero of=/two/onemb bs=1M count=1
650+
assert "$output" =~ "No space left on device"
651+
652+
run_podman rm -f -t 0 $ctrname
653+
run_podman $safe_opts volume rm -af
654+
655+
umount $vol_path
656+
losetup -d $loop_dev
657+
rm -f $loop
658+
}
659+
622660
# vim: filetype=sh

0 commit comments

Comments
 (0)