LCOV - code coverage report
Current view: top level - fs - pnode.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 225 272 82.7 %
Date: 2021-04-22 12:43:58 Functions: 17 17 100.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  *  linux/fs/pnode.c
       4             :  *
       5             :  * (C) Copyright IBM Corporation 2005.
       6             :  *      Author : Ram Pai (linuxram@us.ibm.com)
       7             :  */
       8             : #include <linux/mnt_namespace.h>
       9             : #include <linux/mount.h>
      10             : #include <linux/fs.h>
      11             : #include <linux/nsproxy.h>
      12             : #include <uapi/linux/mount.h>
      13             : #include "internal.h"
      14             : #include "pnode.h"
      15             : 
      16             : /* return the next shared peer mount of @p */
      17        4931 : static inline struct mount *next_peer(struct mount *p)
      18             : {
      19        4931 :         return list_entry(p->mnt_share.next, struct mount, mnt_share);
      20             : }
      21             : 
      22         185 : static inline struct mount *first_slave(struct mount *p)
      23             : {
      24         185 :         return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
      25             : }
      26             : 
      27           0 : static inline struct mount *last_slave(struct mount *p)
      28             : {
      29           0 :         return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
      30             : }
      31             : 
      32        1113 : static inline struct mount *next_slave(struct mount *p)
      33             : {
      34        1113 :         return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
      35             : }
      36             : 
      37        2893 : static struct mount *get_peer_under_root(struct mount *mnt,
      38             :                                          struct mnt_namespace *ns,
      39             :                                          const struct path *root)
      40             : {
      41        2893 :         struct mount *m = mnt;
      42             : 
      43        2893 :         do {
      44             :                 /* Check the namespace first for optimization */
      45        2893 :                 if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
      46           0 :                         return m;
      47             : 
      48        2893 :                 m = next_peer(m);
      49        2893 :         } while (m != mnt);
      50             : 
      51             :         return NULL;
      52             : }
      53             : 
      54             : /*
      55             :  * Get ID of closest dominating peer group having a representative
      56             :  * under the given root.
      57             :  *
      58             :  * Caller must hold namespace_sem
      59             :  */
      60        2893 : int get_dominating_id(struct mount *mnt, const struct path *root)
      61             : {
      62        2893 :         struct mount *m;
      63             : 
      64        5786 :         for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
      65        2893 :                 struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
      66        2893 :                 if (d)
      67           0 :                         return d->mnt_group_id;
      68             :         }
      69             : 
      70             :         return 0;
      71             : }
      72             : 
      73        1149 : static int do_make_slave(struct mount *mnt)
      74             : {
      75        1149 :         struct mount *master, *slave_mnt;
      76             : 
      77        1149 :         if (list_empty(&mnt->mnt_share)) {
      78         420 :                 if (IS_MNT_SHARED(mnt)) {
      79         161 :                         mnt_release_group_id(mnt);
      80         161 :                         CLEAR_MNT_SHARED(mnt);
      81             :                 }
      82         420 :                 master = mnt->mnt_master;
      83         420 :                 if (!master) {
      84         377 :                         struct list_head *p = &mnt->mnt_slave_list;
      85         561 :                         while (!list_empty(p)) {
      86         184 :                                 slave_mnt = list_first_entry(p,
      87             :                                                 struct mount, mnt_slave);
      88         184 :                                 list_del_init(&slave_mnt->mnt_slave);
      89         184 :                                 slave_mnt->mnt_master = NULL;
      90             :                         }
      91             :                         return 0;
      92             :                 }
      93             :         } else {
      94         729 :                 struct mount *m;
      95             :                 /*
      96             :                  * slave 'mnt' to a peer mount that has the
      97             :                  * same root dentry. If none is available then
      98             :                  * slave it to anything that is available.
      99             :                  */
     100         729 :                 for (m = master = next_peer(mnt); m != mnt; m = next_peer(m)) {
     101         729 :                         if (m->mnt.mnt_root == mnt->mnt.mnt_root) {
     102             :                                 master = m;
     103             :                                 break;
     104             :                         }
     105             :                 }
     106         729 :                 list_del_init(&mnt->mnt_share);
     107         729 :                 mnt->mnt_group_id = 0;
     108         729 :                 CLEAR_MNT_SHARED(mnt);
     109             :         }
     110         772 :         list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
     111           0 :                 slave_mnt->mnt_master = master;
     112         772 :         list_move(&mnt->mnt_slave, &master->mnt_slave_list);
     113         772 :         list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
     114         772 :         INIT_LIST_HEAD(&mnt->mnt_slave_list);
     115         772 :         mnt->mnt_master = master;
     116         772 :         return 0;
     117             : }
     118             : 
     119             : /*
     120             :  * vfsmount lock must be held for write
     121             :  */
     122        1225 : void change_mnt_propagation(struct mount *mnt, int type)
     123             : {
     124        1225 :         if (type == MS_SHARED) {
     125          76 :                 set_mnt_shared(mnt);
     126          76 :                 return;
     127             :         }
     128        1149 :         do_make_slave(mnt);
     129        1149 :         if (type != MS_SLAVE) {
     130        1110 :                 list_del_init(&mnt->mnt_slave);
     131        1110 :                 mnt->mnt_master = NULL;
     132        1110 :                 if (type == MS_UNBINDABLE)
     133           0 :                         mnt->mnt.mnt_flags |= MNT_UNBINDABLE;
     134             :                 else
     135        1110 :                         mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE;
     136             :         }
     137             : }
     138             : 
     139             : /*
     140             :  * get the next mount in the propagation tree.
     141             :  * @m: the mount seen last
     142             :  * @origin: the original mount from where the tree walk initiated
     143             :  *
     144             :  * Note that peer groups form contiguous segments of slave lists.
     145             :  * We rely on that in get_source() to be able to find out if
     146             :  * vfsmount found while iterating with propagation_next() is
     147             :  * a peer of one we'd found earlier.
     148             :  */
     149        1421 : static struct mount *propagation_next(struct mount *m,
     150             :                                          struct mount *origin)
     151             : {
     152             :         /* are there any slaves of this mount? */
     153        1421 :         if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
     154         138 :                 return first_slave(m);
     155             : 
     156        1421 :         while (1) {
     157        1421 :                 struct mount *master = m->mnt_master;
     158             : 
     159        1421 :                 if (master == origin->mnt_master) {
     160         455 :                         struct mount *next = next_peer(m);
     161         455 :                         return (next == origin) ? NULL : next;
     162         966 :                 } else if (m->mnt_slave.next != &master->mnt_slave_list)
     163         828 :                         return next_slave(m);
     164             : 
     165             :                 /* back at master */
     166             :                 m = master;
     167             :         }
     168             : }
     169             : 
     170           0 : static struct mount *skip_propagation_subtree(struct mount *m,
     171             :                                                 struct mount *origin)
     172             : {
     173             :         /*
     174             :          * Advance m such that propagation_next will not return
     175             :          * the slaves of m.
     176             :          */
     177           0 :         if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
     178           0 :                 m = last_slave(m);
     179             : 
     180           0 :         return m;
     181             : }
     182             : 
     183         381 : static struct mount *next_group(struct mount *m, struct mount *origin)
     184             : {
     185         427 :         while (1) {
     186         427 :                 while (1) {
     187         427 :                         struct mount *next;
     188         427 :                         if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
     189          47 :                                 return first_slave(m);
     190         380 :                         next = next_peer(m);
     191         380 :                         if (m->mnt_group_id == origin->mnt_group_id) {
     192          48 :                                 if (next == origin)
     193             :                                         return NULL;
     194         332 :                         } else if (m->mnt_slave.next != &next->mnt_slave)
     195             :                                 break;
     196             :                         m = next;
     197             :                 }
     198             :                 /* m is the last peer */
     199         332 :                 while (1) {
     200         332 :                         struct mount *master = m->mnt_master;
     201         332 :                         if (m->mnt_slave.next != &master->mnt_slave_list)
     202         285 :                                 return next_slave(m);
     203          47 :                         m = next_peer(master);
     204          47 :                         if (master->mnt_group_id == origin->mnt_group_id)
     205             :                                 break;
     206           0 :                         if (master->mnt_slave.next == &m->mnt_slave)
     207             :                                 break;
     208             :                         m = master;
     209             :                 }
     210          47 :                 if (m == origin)
     211             :                         return NULL;
     212             :         }
     213             : }
     214             : 
     215             : /* all accesses are serialized by namespace_sem */
     216             : static struct mount *last_dest, *first_source, *last_source, *dest_master;
     217             : static struct mountpoint *mp;
     218             : static struct hlist_head *list;
     219             : 
     220         421 : static inline bool peers(struct mount *m1, struct mount *m2)
     221             : {
     222         233 :         return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
     223             : }
     224             : 
     225         378 : static int propagate_one(struct mount *m)
     226             : {
     227         378 :         struct mount *child;
     228         378 :         int type;
     229             :         /* skip ones added by this propagate_mnt() */
     230         378 :         if (IS_MNT_NEW(m))
     231             :                 return 0;
     232             :         /* skip if mountpoint isn't covered by it */
     233         378 :         if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
     234             :                 return 0;
     235         468 :         if (peers(m, last_dest)) {
     236             :                 type = CL_MAKE_SHARED;
     237             :         } else {
     238             :                 struct mount *n, *p;
     239             :                 bool done;
     240             :                 for (n = m; ; n = p) {
     241         235 :                         p = n->mnt_master;
     242         235 :                         if (p == dest_master || IS_MNT_MARKED(p))
     243             :                                 break;
     244             :                 }
     245         188 :                 do {
     246         188 :                         struct mount *parent = last_source->mnt_parent;
     247         188 :                         if (last_source == first_source)
     248             :                                 break;
     249         187 :                         done = parent->mnt_master == p;
     250         374 :                         if (done && peers(n, parent))
     251             :                                 break;
     252         141 :                         last_source = last_source->mnt_master;
     253         141 :                 } while (!done);
     254             : 
     255         188 :                 type = CL_SLAVE;
     256             :                 /* beginning of peer group among the slaves? */
     257         188 :                 if (IS_MNT_SHARED(m))
     258          94 :                         type |= CL_MAKE_SHARED;
     259             :         }
     260             :                 
     261         234 :         child = copy_tree(last_source, last_source->mnt.mnt_root, type);
     262         234 :         if (IS_ERR(child))
     263           0 :                 return PTR_ERR(child);
     264         234 :         read_seqlock_excl(&mount_lock);
     265         234 :         mnt_set_mountpoint(m, mp, child);
     266         234 :         if (m->mnt_master != dest_master)
     267         188 :                 SET_MNT_MARK(m->mnt_master);
     268         234 :         read_sequnlock_excl(&mount_lock);
     269         234 :         last_dest = m;
     270         234 :         last_source = child;
     271         234 :         hlist_add_head(&child->mnt_hash, list);
     272         234 :         return count_mounts(m->mnt_ns, child);
     273             : }
     274             : 
     275             : /*
     276             :  * mount 'source_mnt' under the destination 'dest_mnt' at
     277             :  * dentry 'dest_dentry'. And propagate that mount to
     278             :  * all the peer and slave mounts of 'dest_mnt'.
     279             :  * Link all the new mounts into a propagation tree headed at
     280             :  * source_mnt. Also link all the new mounts using ->mnt_list
     281             :  * headed at source_mnt's ->mnt_list
     282             :  *
     283             :  * @dest_mnt: destination mount.
     284             :  * @dest_dentry: destination dentry.
     285             :  * @source_mnt: source mount.
     286             :  * @tree_list : list of heads of trees to be attached.
     287             :  */
     288          49 : int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
     289             :                     struct mount *source_mnt, struct hlist_head *tree_list)
     290             : {
     291          49 :         struct mount *m, *n;
     292          49 :         int ret = 0;
     293             : 
     294             :         /*
     295             :          * we don't want to bother passing tons of arguments to
     296             :          * propagate_one(); everything is serialized by namespace_sem,
     297             :          * so globals will do just fine.
     298             :          */
     299          49 :         last_dest = dest_mnt;
     300          49 :         first_source = source_mnt;
     301          49 :         last_source = source_mnt;
     302          49 :         mp = dest_mp;
     303          49 :         list = tree_list;
     304          49 :         dest_master = dest_mnt->mnt_master;
     305             : 
     306             :         /* all peers of dest_mnt, except dest_mnt itself */
     307          95 :         for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
     308          46 :                 ret = propagate_one(n);
     309          46 :                 if (ret)
     310           0 :                         goto out;
     311             :         }
     312             : 
     313             :         /* all slave groups */
     314         381 :         for (m = next_group(dest_mnt, dest_mnt); m;
     315         332 :                         m = next_group(m, dest_mnt)) {
     316             :                 /* everything in that slave group */
     317             :                 n = m;
     318         332 :                 do {
     319         332 :                         ret = propagate_one(n);
     320         332 :                         if (ret)
     321           0 :                                 goto out;
     322         332 :                         n = next_peer(n);
     323         332 :                 } while (n != m);
     324             :         }
     325          49 : out:
     326          49 :         read_seqlock_excl(&mount_lock);
     327         519 :         hlist_for_each_entry(n, tree_list, mnt_hash) {
     328         234 :                 m = n->mnt_parent;
     329         234 :                 if (m->mnt_master != dest_mnt->mnt_master)
     330         188 :                         CLEAR_MNT_MARK(m->mnt_master);
     331             :         }
     332          49 :         read_sequnlock_excl(&mount_lock);
     333          49 :         return ret;
     334             : }
     335             : 
     336         230 : static struct mount *find_topper(struct mount *mnt)
     337             : {
     338             :         /* If there is exactly one mount covering mnt completely return it. */
     339         230 :         struct mount *child;
     340             : 
     341         230 :         if (!list_is_singular(&mnt->mnt_mounts))
     342             :                 return NULL;
     343             : 
     344           0 :         child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child);
     345           0 :         if (child->mnt_mountpoint != mnt->mnt.mnt_root)
     346           0 :                 return NULL;
     347             : 
     348             :         return child;
     349             : }
     350             : 
     351             : /*
     352             :  * return true if the refcount is greater than count
     353             :  */
     354         335 : static inline int do_refcount_check(struct mount *mnt, int count)
     355             : {
     356         105 :         return mnt_get_count(mnt) > count;
     357             : }
     358             : 
     359             : /*
     360             :  * check if the mount 'mnt' can be unmounted successfully.
     361             :  * @mnt: the mount to be checked for unmount
     362             :  * NOTE: unmounting 'mnt' would naturally propagate to all
     363             :  * other mounts its parent propagates to.
     364             :  * Check if any of these mounts that **do not have submounts**
     365             :  * have more references than 'refcnt'. If so return busy.
     366             :  *
     367             :  * vfsmount lock must be held for write
     368             :  */
     369         111 : int propagate_mount_busy(struct mount *mnt, int refcnt)
     370             : {
     371         111 :         struct mount *m, *child, *topper;
     372         111 :         struct mount *parent = mnt->mnt_parent;
     373             : 
     374         111 :         if (mnt == parent)
     375           0 :                 return do_refcount_check(mnt, refcnt);
     376             : 
     377             :         /*
     378             :          * quickly check if the current mount can be unmounted.
     379             :          * If not, we don't have to go checking for all other
     380             :          * mounts
     381             :          */
     382         216 :         if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
     383           6 :                 return 1;
     384             : 
     385         473 :         for (m = propagation_next(parent, parent); m;
     386         368 :                         m = propagation_next(m, parent)) {
     387         368 :                 int count = 1;
     388         368 :                 child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
     389         368 :                 if (!child)
     390         138 :                         continue;
     391             : 
     392             :                 /* Is there exactly one mount on the child that covers
     393             :                  * it completely whose reference should be ignored?
     394             :                  */
     395         230 :                 topper = find_topper(child);
     396         230 :                 if (topper)
     397             :                         count += 1;
     398         230 :                 else if (!list_empty(&child->mnt_mounts))
     399           0 :                         continue;
     400             : 
     401         230 :                 if (do_refcount_check(child, count))
     402             :                         return 1;
     403             :         }
     404             :         return 0;
     405             : }
     406             : 
     407             : /*
     408             :  * Clear MNT_LOCKED when it can be shown to be safe.
     409             :  *
     410             :  * mount_lock lock must be held for write
     411             :  */
     412         106 : void propagate_mount_unlock(struct mount *mnt)
     413             : {
     414         106 :         struct mount *parent = mnt->mnt_parent;
     415         106 :         struct mount *m, *child;
     416             : 
     417         106 :         BUG_ON(parent == mnt);
     418             : 
     419         474 :         for (m = propagation_next(parent, parent); m;
     420         368 :                         m = propagation_next(m, parent)) {
     421         368 :                 child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
     422         368 :                 if (child)
     423         230 :                         child->mnt.mnt_flags &= ~MNT_LOCKED;
     424             :         }
     425         106 : }
     426             : 
     427         230 : static void umount_one(struct mount *mnt, struct list_head *to_umount)
     428             : {
     429         230 :         CLEAR_MNT_MARK(mnt);
     430         230 :         mnt->mnt.mnt_flags |= MNT_UMOUNT;
     431         230 :         list_del_init(&mnt->mnt_child);
     432         230 :         list_del_init(&mnt->mnt_umounting);
     433         230 :         list_move_tail(&mnt->mnt_list, to_umount);
     434         230 : }
     435             : 
     436             : /*
     437             :  * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
     438             :  * parent propagates to.
     439             :  */
     440         230 : static bool __propagate_umount(struct mount *mnt,
     441             :                                struct list_head *to_umount,
     442             :                                struct list_head *to_restore)
     443             : {
     444         230 :         bool progress = false;
     445         230 :         struct mount *child;
     446             : 
     447             :         /*
     448             :          * The state of the parent won't change if this mount is
     449             :          * already unmounted or marked as without children.
     450             :          */
     451         230 :         if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
     452           0 :                 goto out;
     453             : 
     454             :         /* Verify topper is the only grandchild that has not been
     455             :          * speculatively unmounted.
     456             :          */
     457         230 :         list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
     458           0 :                 if (child->mnt_mountpoint == mnt->mnt.mnt_root)
     459           0 :                         continue;
     460           0 :                 if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
     461           0 :                         continue;
     462             :                 /* Found a mounted child */
     463           0 :                 goto children;
     464             :         }
     465             : 
     466             :         /* Mark mounts that can be unmounted if not locked */
     467         230 :         SET_MNT_MARK(mnt);
     468         230 :         progress = true;
     469             : 
     470             :         /* If a mount is without children and not locked umount it. */
     471         230 :         if (!IS_MNT_LOCKED(mnt)) {
     472         230 :                 umount_one(mnt, to_umount);
     473             :         } else {
     474           0 : children:
     475           0 :                 list_move_tail(&mnt->mnt_umounting, to_restore);
     476             :         }
     477         230 : out:
     478         230 :         return progress;
     479             : }
     480             : 
     481         106 : static void umount_list(struct list_head *to_umount,
     482             :                         struct list_head *to_restore)
     483             : {
     484         106 :         struct mount *mnt, *child, *tmp;
     485         336 :         list_for_each_entry(mnt, to_umount, mnt_list) {
     486         230 :                 list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
     487             :                         /* topper? */
     488           0 :                         if (child->mnt_mountpoint == mnt->mnt.mnt_root)
     489           0 :                                 list_move_tail(&child->mnt_umounting, to_restore);
     490             :                         else
     491           0 :                                 umount_one(child, to_umount);
     492             :                 }
     493             :         }
     494         106 : }
     495             : 
     496         106 : static void restore_mounts(struct list_head *to_restore)
     497             : {
     498             :         /* Restore mounts to a clean working state */
     499         212 :         while (!list_empty(to_restore)) {
     500           0 :                 struct mount *mnt, *parent;
     501           0 :                 struct mountpoint *mp;
     502             : 
     503           0 :                 mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
     504           0 :                 CLEAR_MNT_MARK(mnt);
     505           0 :                 list_del_init(&mnt->mnt_umounting);
     506             : 
     507             :                 /* Should this mount be reparented? */
     508           0 :                 mp = mnt->mnt_mp;
     509           0 :                 parent = mnt->mnt_parent;
     510           0 :                 while (parent->mnt.mnt_flags & MNT_UMOUNT) {
     511           0 :                         mp = parent->mnt_mp;
     512           0 :                         parent = parent->mnt_parent;
     513             :                 }
     514           0 :                 if (parent != mnt->mnt_parent)
     515           0 :                         mnt_change_mountpoint(parent, mp, mnt);
     516             :         }
     517         106 : }
     518             : 
     519         106 : static void cleanup_umount_visitations(struct list_head *visited)
     520             : {
     521         106 :         while (!list_empty(visited)) {
     522         106 :                 struct mount *mnt =
     523         106 :                         list_first_entry(visited, struct mount, mnt_umounting);
     524         212 :                 list_del_init(&mnt->mnt_umounting);
     525             :         }
     526         106 : }
     527             : 
     528             : /*
     529             :  * collect all mounts that receive propagation from the mount in @list,
     530             :  * and return these additional mounts in the same list.
     531             :  * @list: the list of mounts to be unmounted.
     532             :  *
     533             :  * vfsmount lock must be held for write
     534             :  */
     535         106 : int propagate_umount(struct list_head *list)
     536             : {
     537         106 :         struct mount *mnt;
     538         106 :         LIST_HEAD(to_restore);
     539         106 :         LIST_HEAD(to_umount);
     540         106 :         LIST_HEAD(visited);
     541             : 
     542             :         /* Find candidates for unmounting */
     543         212 :         list_for_each_entry_reverse(mnt, list, mnt_list) {
     544         106 :                 struct mount *parent = mnt->mnt_parent;
     545         106 :                 struct mount *m;
     546             : 
     547             :                 /*
     548             :                  * If this mount has already been visited it is known that it's
     549             :                  * entire peer group and all of their slaves in the propagation
     550             :                  * tree for the mountpoint has already been visited and there is
     551             :                  * no need to visit them again.
     552             :                  */
     553         106 :                 if (!list_empty(&mnt->mnt_umounting))
     554           0 :                         continue;
     555             : 
     556         106 :                 list_add_tail(&mnt->mnt_umounting, &visited);
     557         474 :                 for (m = propagation_next(parent, parent); m;
     558         368 :                      m = propagation_next(m, parent)) {
     559         368 :                         struct mount *child = __lookup_mnt(&m->mnt,
     560             :                                                            mnt->mnt_mountpoint);
     561         368 :                         if (!child)
     562         138 :                                 continue;
     563             : 
     564         230 :                         if (!list_empty(&child->mnt_umounting)) {
     565             :                                 /*
     566             :                                  * If the child has already been visited it is
     567             :                                  * know that it's entire peer group and all of
     568             :                                  * their slaves in the propgation tree for the
     569             :                                  * mountpoint has already been visited and there
     570             :                                  * is no need to visit this subtree again.
     571             :                                  */
     572           0 :                                 m = skip_propagation_subtree(m, parent);
     573           0 :                                 continue;
     574         230 :                         } else if (child->mnt.mnt_flags & MNT_UMOUNT) {
     575             :                                 /*
     576             :                                  * We have come accross an partially unmounted
     577             :                                  * mount in list that has not been visited yet.
     578             :                                  * Remember it has been visited and continue
     579             :                                  * about our merry way.
     580             :                                  */
     581           0 :                                 list_add_tail(&child->mnt_umounting, &visited);
     582           0 :                                 continue;
     583             :                         }
     584             : 
     585             :                         /* Check the child and parents while progress is made */
     586         230 :                         while (__propagate_umount(child,
     587             :                                                   &to_umount, &to_restore)) {
     588             :                                 /* Is the parent a umount candidate? */
     589         230 :                                 child = child->mnt_parent;
     590         230 :                                 if (list_empty(&child->mnt_umounting))
     591             :                                         break;
     592             :                         }
     593             :                 }
     594             :         }
     595             : 
     596         106 :         umount_list(&to_umount, &to_restore);
     597         106 :         restore_mounts(&to_restore);
     598         106 :         cleanup_umount_visitations(&visited);
     599         106 :         list_splice_tail(&to_umount, list);
     600             : 
     601         106 :         return 0;
     602             : }

Generated by: LCOV version 1.14