2
0
mirror of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2025-09-04 20:19:47 +08:00

fixes for several recent mount-related regressions

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQqUNBr3gm4hGXdBJlZ7Krx/gZQ6wUCaKShewAKCRBZ7Krx/gZQ
 6+p0AQD0vMecDWcRALFSInfGofuozEZ+98i3G0stlzU8KrvvpQD/a2jEfxOZOBph
 0RL73MLt+eSl03tnsbqGRoHrcH9epQM=
 =3DGl
 -----END PGP SIGNATURE-----

Merge tag 'pull-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull mount fixes from Al Viro:
 "Fixes for several recent mount-related regressions"

* tag 'pull-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  change_mnt_propagation(): calculate propagation source only if we'll need it
  use uniform permission checks for all mount propagation changes
  propagate_umount(): only surviving overmounts should be reparented
  fix the softlockups in attach_recursive_mnt()
This commit is contained in:
Linus Torvalds 2025-08-19 10:12:10 -07:00
commit b19a97d57c
2 changed files with 28 additions and 23 deletions

View File

@ -1197,10 +1197,7 @@ static void commit_tree(struct mount *mnt)
if (!mnt_ns_attached(mnt)) { if (!mnt_ns_attached(mnt)) {
for (struct mount *m = mnt; m; m = next_mnt(m, mnt)) for (struct mount *m = mnt; m; m = next_mnt(m, mnt))
if (unlikely(mnt_ns_attached(m))) mnt_add_to_ns(n, m);
m = skip_mnt_tree(m);
else
mnt_add_to_ns(n, m);
n->nr_mounts += n->pending_mounts; n->nr_mounts += n->pending_mounts;
n->pending_mounts = 0; n->pending_mounts = 0;
} }
@ -2704,6 +2701,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
lock_mnt_tree(child); lock_mnt_tree(child);
q = __lookup_mnt(&child->mnt_parent->mnt, q = __lookup_mnt(&child->mnt_parent->mnt,
child->mnt_mountpoint); child->mnt_mountpoint);
commit_tree(child);
if (q) { if (q) {
struct mountpoint *mp = root.mp; struct mountpoint *mp = root.mp;
struct mount *r = child; struct mount *r = child;
@ -2713,7 +2711,6 @@ static int attach_recursive_mnt(struct mount *source_mnt,
mp = shorter; mp = shorter;
mnt_change_mountpoint(r, mp, q); mnt_change_mountpoint(r, mp, q);
} }
commit_tree(child);
} }
unpin_mountpoint(&root); unpin_mountpoint(&root);
unlock_mount_hash(); unlock_mount_hash();
@ -2862,6 +2859,19 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
return attach_recursive_mnt(mnt, p, mp); return attach_recursive_mnt(mnt, p, mp);
} }
static int may_change_propagation(const struct mount *m)
{
struct mnt_namespace *ns = m->mnt_ns;
// it must be mounted in some namespace
if (IS_ERR_OR_NULL(ns)) // is_mounted()
return -EINVAL;
// and the caller must be admin in userns of that namespace
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/* /*
* Sanity check the flags to change_mnt_propagation. * Sanity check the flags to change_mnt_propagation.
*/ */
@ -2898,10 +2908,10 @@ static int do_change_type(struct path *path, int ms_flags)
return -EINVAL; return -EINVAL;
namespace_lock(); namespace_lock();
if (!check_mnt(mnt)) { err = may_change_propagation(mnt);
err = -EINVAL; if (err)
goto out_unlock; goto out_unlock;
}
if (type == MS_SHARED) { if (type == MS_SHARED) {
err = invent_group_ids(mnt, recurse); err = invent_group_ids(mnt, recurse);
if (err) if (err)
@ -3347,18 +3357,11 @@ static int do_set_group(struct path *from_path, struct path *to_path)
namespace_lock(); namespace_lock();
err = -EINVAL; err = may_change_propagation(from);
/* To and From must be mounted */ if (err)
if (!is_mounted(&from->mnt))
goto out; goto out;
if (!is_mounted(&to->mnt)) err = may_change_propagation(to);
goto out; if (err)
err = -EPERM;
/* We should be allowed to modify mount namespaces of both mounts */
if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
goto out;
if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
goto out; goto out;
err = -EINVAL; err = -EINVAL;

View File

@ -111,7 +111,8 @@ void change_mnt_propagation(struct mount *mnt, int type)
return; return;
} }
if (IS_MNT_SHARED(mnt)) { if (IS_MNT_SHARED(mnt)) {
m = propagation_source(mnt); if (type == MS_SLAVE || !hlist_empty(&mnt->mnt_slave_list))
m = propagation_source(mnt);
if (list_empty(&mnt->mnt_share)) { if (list_empty(&mnt->mnt_share)) {
mnt_release_group_id(mnt); mnt_release_group_id(mnt);
} else { } else {
@ -637,10 +638,11 @@ void propagate_umount(struct list_head *set)
} }
// now to_umount consists of all acceptable candidates // now to_umount consists of all acceptable candidates
// deal with reparenting of remaining overmounts on those // deal with reparenting of surviving overmounts on those
list_for_each_entry(m, &to_umount, mnt_list) { list_for_each_entry(m, &to_umount, mnt_list) {
if (m->overmount) struct mount *over = m->overmount;
reparent(m->overmount); if (over && !will_be_unmounted(over))
reparent(over);
} }
// and fold them into the set // and fold them into the set