LCOV - code coverage report
Current view: top level - kernel/rcu - rcu_segcblist.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 131 206 63.6 %
Date: 2021-04-22 12:43:58 Functions: 14 26 53.8 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0+
       2             : /*
       3             :  * RCU segmented callback lists, function definitions
       4             :  *
       5             :  * Copyright IBM Corporation, 2017
       6             :  *
       7             :  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
       8             :  */
       9             : 
      10             : #include <linux/cpu.h>
      11             : #include <linux/interrupt.h>
      12             : #include <linux/kernel.h>
      13             : #include <linux/types.h>
      14             : 
      15             : #include "rcu_segcblist.h"
      16             : 
      17             : /* Initialize simple callback list. */
      18          60 : void rcu_cblist_init(struct rcu_cblist *rclp)
      19             : {
      20          60 :         rclp->head = NULL;
      21          60 :         rclp->tail = &rclp->head;
      22          60 :         rclp->len = 0;
      23          60 : }
      24             : 
      25             : /*
      26             :  * Enqueue an rcu_head structure onto the specified callback list.
      27             :  */
      28           0 : void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp)
      29             : {
      30           0 :         *rclp->tail = rhp;
      31           0 :         rclp->tail = &rhp->next;
      32           0 :         WRITE_ONCE(rclp->len, rclp->len + 1);
      33           0 : }
      34             : 
      35             : /*
      36             :  * Flush the second rcu_cblist structure onto the first one, obliterating
      37             :  * any contents of the first.  If rhp is non-NULL, enqueue it as the sole
      38             :  * element of the second rcu_cblist structure, but ensuring that the second
      39             :  * rcu_cblist structure, if initially non-empty, always appears non-empty
      40             :  * throughout the process.  If rdp is NULL, the second rcu_cblist structure
      41             :  * is instead initialized to empty.
      42             :  */
      43           0 : void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
      44             :                               struct rcu_cblist *srclp,
      45             :                               struct rcu_head *rhp)
      46             : {
      47           0 :         drclp->head = srclp->head;
      48           0 :         if (drclp->head)
      49           0 :                 drclp->tail = srclp->tail;
      50             :         else
      51           0 :                 drclp->tail = &drclp->head;
      52           0 :         drclp->len = srclp->len;
      53           0 :         if (!rhp) {
      54           0 :                 rcu_cblist_init(srclp);
      55             :         } else {
      56           0 :                 rhp->next = NULL;
      57           0 :                 srclp->head = rhp;
      58           0 :                 srclp->tail = &rhp->next;
      59           0 :                 WRITE_ONCE(srclp->len, 1);
      60             :         }
      61           0 : }
      62             : 
      63             : /*
      64             :  * Dequeue the oldest rcu_head structure from the specified callback
      65             :  * list.
      66             :  */
      67      630749 : struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp)
      68             : {
      69      630749 :         struct rcu_head *rhp;
      70             : 
      71      630749 :         rhp = rclp->head;
      72      630749 :         if (!rhp)
      73             :                 return NULL;
      74      625773 :         rclp->len--;
      75      625773 :         rclp->head = rhp->next;
      76      625773 :         if (!rclp->head)
      77        5260 :                 rclp->tail = &rclp->head;
      78             :         return rhp;
      79             : }
      80             : 
      81             : /* Set the length of an rcu_segcblist structure. */
      82          16 : static void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v)
      83             : {
      84             : #ifdef CONFIG_RCU_NOCB_CPU
      85             :         atomic_long_set(&rsclp->len, v);
      86             : #else
      87          16 :         WRITE_ONCE(rsclp->len, v);
      88             : #endif
      89             : }
      90             : 
      91             : /* Get the length of a segment of the rcu_segcblist structure. */
      92      274182 : static long rcu_segcblist_get_seglen(struct rcu_segcblist *rsclp, int seg)
      93             : {
      94      274182 :         return READ_ONCE(rsclp->seglen[seg]);
      95             : }
      96             : 
      97             : /* Return number of callbacks in segmented callback list by summing seglen. */
      98       48808 : long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp)
      99             : {
     100       48808 :         long len = 0;
     101       48808 :         int i;
     102             : 
     103      244031 :         for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
     104      195223 :                 len += rcu_segcblist_get_seglen(rsclp, i);
     105             : 
     106       48808 :         return len;
     107             : }
     108             : 
     109             : /* Set the length of a segment of the rcu_segcblist structure. */
     110       69799 : static void rcu_segcblist_set_seglen(struct rcu_segcblist *rsclp, int seg, long v)
     111             : {
     112       69799 :         WRITE_ONCE(rsclp->seglen[seg], v);
     113       69735 : }
     114             : 
     115             : /* Increase the numeric length of a segment by a specified amount. */
     116      693241 : static void rcu_segcblist_add_seglen(struct rcu_segcblist *rsclp, int seg, long v)
     117             : {
     118      693241 :         WRITE_ONCE(rsclp->seglen[seg], rsclp->seglen[seg] + v);
     119             : }
     120             : 
     121             : /* Move from's segment length to to's segment. */
     122       30082 : static void rcu_segcblist_move_seglen(struct rcu_segcblist *rsclp, int from, int to)
     123             : {
     124       30082 :         long len;
     125             : 
     126       21712 :         if (from == to)
     127             :                 return;
     128             : 
     129       30082 :         len = rcu_segcblist_get_seglen(rsclp, from);
     130       21712 :         if (!len)
     131             :                 return;
     132             : 
     133       20858 :         rcu_segcblist_add_seglen(rsclp, to, len);
     134       20858 :         rcu_segcblist_set_seglen(rsclp, from, 0);
     135             : }
     136             : 
     137             : /* Increment segment's length. */
     138      628788 : static void rcu_segcblist_inc_seglen(struct rcu_segcblist *rsclp, int seg)
     139             : {
     140      628788 :         rcu_segcblist_add_seglen(rsclp, seg, 1);
     141             : }
     142             : 
     143             : /*
     144             :  * Increase the numeric length of an rcu_segcblist structure by the
     145             :  * specified amount, which can be negative.  This can cause the ->len
     146             :  * field to disagree with the actual number of callbacks on the structure.
     147             :  * This increase is fully ordered with respect to the callers accesses
     148             :  * both before and after.
     149             :  *
     150             :  * So why on earth is a memory barrier required both before and after
     151             :  * the update to the ->len field???
     152             :  *
     153             :  * The reason is that rcu_barrier() locklessly samples each CPU's ->len
     154             :  * field, and if a given CPU's field is zero, avoids IPIing that CPU.
     155             :  * This can of course race with both queuing and invoking of callbacks.
     156             :  * Failing to correctly handle either of these races could result in
     157             :  * rcu_barrier() failing to IPI a CPU that actually had callbacks queued
     158             :  * which rcu_barrier() was obligated to wait on.  And if rcu_barrier()
     159             :  * failed to wait on such a callback, unloading certain kernel modules
     160             :  * would result in calls to functions whose code was no longer present in
     161             :  * the kernel, for but one example.
     162             :  *
     163             :  * Therefore, ->len transitions from 1->0 and 0->1 have to be carefully
     164             :  * ordered with respect with both list modifications and the rcu_barrier().
     165             :  *
     166             :  * The queuing case is CASE 1 and the invoking case is CASE 2.
     167             :  *
     168             :  * CASE 1: Suppose that CPU 0 has no callbacks queued, but invokes
     169             :  * call_rcu() just as CPU 1 invokes rcu_barrier().  CPU 0's ->len field
     170             :  * will transition from 0->1, which is one of the transitions that must
     171             :  * be handled carefully.  Without the full memory barriers after the ->len
     172             :  * update and at the beginning of rcu_barrier(), the following could happen:
     173             :  *
     174             :  * CPU 0                                CPU 1
     175             :  *
     176             :  * call_rcu().
     177             :  *                                      rcu_barrier() sees ->len as 0.
     178             :  * set ->len = 1.
     179             :  *                                      rcu_barrier() does nothing.
     180             :  *                                      module is unloaded.
     181             :  * callback invokes unloaded function!
     182             :  *
     183             :  * With the full barriers, any case where rcu_barrier() sees ->len as 0 will
     184             :  * have unambiguously preceded the return from the racing call_rcu(), which
     185             :  * means that this call_rcu() invocation is OK to not wait on.  After all,
     186             :  * you are supposed to make sure that any problematic call_rcu() invocations
     187             :  * happen before the rcu_barrier().
     188             :  *
     189             :  *
     190             :  * CASE 2: Suppose that CPU 0 is invoking its last callback just as
     191             :  * CPU 1 invokes rcu_barrier().  CPU 0's ->len field will transition from
     192             :  * 1->0, which is one of the transitions that must be handled carefully.
     193             :  * Without the full memory barriers before the ->len update and at the
     194             :  * end of rcu_barrier(), the following could happen:
     195             :  *
     196             :  * CPU 0                                CPU 1
     197             :  *
     198             :  * start invoking last callback
     199             :  * set ->len = 0 (reordered)
     200             :  *                                      rcu_barrier() sees ->len as 0
     201             :  *                                      rcu_barrier() does nothing.
     202             :  *                                      module is unloaded
     203             :  * callback executing after unloaded!
     204             :  *
     205             :  * With the full barriers, any case where rcu_barrier() sees ->len as 0
     206             :  * will be fully ordered after the completion of the callback function,
     207             :  * so that the module unloading operation is completely safe.
     208             :  *
     209             :  */
     210      677699 : void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v)
     211             : {
     212             : #ifdef CONFIG_RCU_NOCB_CPU
     213             :         smp_mb__before_atomic(); // Read header comment above.
     214             :         atomic_long_add(v, &rsclp->len);
     215             :         smp_mb__after_atomic();  // Read header comment above.
     216             : #else
     217       48864 :         smp_mb(); // Read header comment above.
     218      677868 :         WRITE_ONCE(rsclp->len, rsclp->len + v);
     219      677868 :         smp_mb(); // Read header comment above.
     220             : #endif
     221       48879 : }
     222             : 
     223             : /*
     224             :  * Increase the numeric length of an rcu_segcblist structure by one.
     225             :  * This can cause the ->len field to disagree with the actual number of
     226             :  * callbacks on the structure.  This increase is fully ordered with respect
     227             :  * to the callers accesses both before and after.
     228             :  */
     229      628835 : void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp)
     230             : {
     231           0 :         rcu_segcblist_add_len(rsclp, 1);
     232           0 : }
     233             : 
     234             : /*
     235             :  * Initialize an rcu_segcblist structure.
     236             :  */
     237          16 : void rcu_segcblist_init(struct rcu_segcblist *rsclp)
     238             : {
     239          16 :         int i;
     240             : 
     241          16 :         BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq));
     242          16 :         BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq));
     243          16 :         rsclp->head = NULL;
     244          80 :         for (i = 0; i < RCU_CBLIST_NSEGS; i++) {
     245          64 :                 rsclp->tails[i] = &rsclp->head;
     246          64 :                 rcu_segcblist_set_seglen(rsclp, i, 0);
     247             :         }
     248          16 :         rcu_segcblist_set_len(rsclp, 0);
     249          16 :         rcu_segcblist_set_flags(rsclp, SEGCBLIST_ENABLED);
     250          16 : }
     251             : 
     252             : /*
     253             :  * Disable the specified rcu_segcblist structure, so that callbacks can
     254             :  * no longer be posted to it.  This structure must be empty.
     255             :  */
     256           0 : void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
     257             : {
     258           0 :         WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
     259           0 :         WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
     260           0 :         rcu_segcblist_clear_flags(rsclp, SEGCBLIST_ENABLED);
     261           0 : }
     262             : 
     263             : /*
     264             :  * Mark the specified rcu_segcblist structure as offloaded.  This
     265             :  * structure must be empty.
     266             :  */
     267           0 : void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload)
     268             : {
     269           0 :         if (offload) {
     270           0 :                 rcu_segcblist_clear_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY);
     271           0 :                 rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED);
     272             :         } else {
     273           0 :                 rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED);
     274             :         }
     275           0 : }
     276             : 
     277             : /*
     278             :  * Does the specified rcu_segcblist structure contain callbacks that
     279             :  * are ready to be invoked?
     280             :  */
     281      222950 : bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp)
     282             : {
     283      174077 :         return rcu_segcblist_is_enabled(rsclp) &&
     284      222878 :                &rsclp->head != READ_ONCE(rsclp->tails[RCU_DONE_TAIL]);
     285             : }
     286             : 
     287             : /*
     288             :  * Does the specified rcu_segcblist structure contain callbacks that
     289             :  * are still pending, that is, not yet ready to be invoked?
     290             :  */
     291       26430 : bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp)
     292             : {
     293       26430 :         return rcu_segcblist_is_enabled(rsclp) &&
     294       26430 :                !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL);
     295             : }
     296             : 
     297             : /*
     298             :  * Return a pointer to the first callback in the specified rcu_segcblist
     299             :  * structure.  This is useful for diagnostics.
     300             :  */
     301           0 : struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp)
     302             : {
     303           0 :         if (rcu_segcblist_is_enabled(rsclp))
     304           0 :                 return rsclp->head;
     305             :         return NULL;
     306             : }
     307             : 
     308             : /*
     309             :  * Return a pointer to the first pending callback in the specified
     310             :  * rcu_segcblist structure.  This is useful just after posting a given
     311             :  * callback -- if that callback is the first pending callback, then
     312             :  * you cannot rely on someone else having already started up the required
     313             :  * grace period.
     314             :  */
     315           0 : struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp)
     316             : {
     317           0 :         if (rcu_segcblist_is_enabled(rsclp))
     318           0 :                 return *rsclp->tails[RCU_DONE_TAIL];
     319             :         return NULL;
     320             : }
     321             : 
     322             : /*
     323             :  * Return false if there are no CBs awaiting grace periods, otherwise,
     324             :  * return true and store the nearest waited-upon grace period into *lp.
     325             :  */
     326           0 : bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp)
     327             : {
     328           0 :         if (!rcu_segcblist_pend_cbs(rsclp))
     329             :                 return false;
     330           0 :         *lp = rsclp->gp_seq[RCU_WAIT_TAIL];
     331           0 :         return true;
     332             : }
     333             : 
     334             : /*
     335             :  * Enqueue the specified callback onto the specified rcu_segcblist
     336             :  * structure, updating accounting as needed.  Note that the ->len
     337             :  * field may be accessed locklessly, hence the WRITE_ONCE().
     338             :  * The ->len field is used by rcu_barrier() and friends to determine
     339             :  * if it must post a callback on this structure, and it is OK
     340             :  * for rcu_barrier() to sometimes post callbacks needlessly, but
     341             :  * absolutely not OK for it to ever miss posting a callback.
     342             :  */
     343      628833 : void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
     344             :                            struct rcu_head *rhp)
     345             : {
     346      628833 :         rcu_segcblist_inc_len(rsclp);
     347      628786 :         rcu_segcblist_inc_seglen(rsclp, RCU_NEXT_TAIL);
     348      628786 :         rhp->next = NULL;
     349      628786 :         WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp);
     350      628786 :         WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next);
     351      628786 : }
     352             : 
     353             : /*
     354             :  * Entrain the specified callback onto the specified rcu_segcblist at
     355             :  * the end of the last non-empty segment.  If the entire rcu_segcblist
     356             :  * is empty, make no change, but return false.
     357             :  *
     358             :  * This is intended for use by rcu_barrier()-like primitives, -not-
     359             :  * for normal grace-period use.  IMPORTANT:  The callback you enqueue
     360             :  * will wait for all prior callbacks, NOT necessarily for a grace
     361             :  * period.  You have been warned.
     362             :  */
     363           2 : bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
     364             :                            struct rcu_head *rhp)
     365             : {
     366           2 :         int i;
     367             : 
     368           2 :         if (rcu_segcblist_n_cbs(rsclp) == 0)
     369             :                 return false;
     370           2 :         rcu_segcblist_inc_len(rsclp);
     371           2 :         smp_mb(); /* Ensure counts are updated before callback is entrained. */
     372           2 :         rhp->next = NULL;
     373           4 :         for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--)
     374           4 :                 if (rsclp->tails[i] != rsclp->tails[i - 1])
     375             :                         break;
     376           2 :         rcu_segcblist_inc_seglen(rsclp, i);
     377           2 :         WRITE_ONCE(*rsclp->tails[i], rhp);
     378           6 :         for (; i <= RCU_NEXT_TAIL; i++)
     379           4 :                 WRITE_ONCE(rsclp->tails[i], &rhp->next);
     380             :         return true;
     381             : }
     382             : 
     383             : /*
     384             :  * Extract only those callbacks ready to be invoked from the specified
     385             :  * rcu_segcblist structure and place them in the specified rcu_cblist
     386             :  * structure.
     387             :  */
     388       48873 : void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
     389             :                                     struct rcu_cblist *rclp)
     390             : {
     391       48873 :         int i;
     392             : 
     393       97746 :         if (!rcu_segcblist_ready_cbs(rsclp))
     394             :                 return; /* Nothing to do. */
     395       48877 :         rclp->len = rcu_segcblist_get_seglen(rsclp, RCU_DONE_TAIL);
     396       48877 :         *rclp->tail = rsclp->head;
     397       48877 :         WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]);
     398       48877 :         WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
     399       48877 :         rclp->tail = rsclp->tails[RCU_DONE_TAIL];
     400      244384 :         for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--)
     401      195507 :                 if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL])
     402      195507 :                         WRITE_ONCE(rsclp->tails[i], &rsclp->head);
     403       48877 :         rcu_segcblist_set_seglen(rsclp, RCU_DONE_TAIL, 0);
     404             : }
     405             : 
     406             : /*
     407             :  * Extract only those callbacks still pending (not yet ready to be
     408             :  * invoked) from the specified rcu_segcblist structure and place them in
     409             :  * the specified rcu_cblist structure.  Note that this loses information
     410             :  * about any callbacks that might have been partway done waiting for
     411             :  * their grace period.  Too bad!  They will have to start over.
     412             :  */
     413           0 : void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
     414             :                                     struct rcu_cblist *rclp)
     415             : {
     416           0 :         int i;
     417             : 
     418           0 :         if (!rcu_segcblist_pend_cbs(rsclp))
     419             :                 return; /* Nothing to do. */
     420           0 :         rclp->len = 0;
     421           0 :         *rclp->tail = *rsclp->tails[RCU_DONE_TAIL];
     422           0 :         rclp->tail = rsclp->tails[RCU_NEXT_TAIL];
     423           0 :         WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL);
     424           0 :         for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) {
     425           0 :                 rclp->len += rcu_segcblist_get_seglen(rsclp, i);
     426           0 :                 WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_DONE_TAIL]);
     427           0 :                 rcu_segcblist_set_seglen(rsclp, i, 0);
     428             :         }
     429             : }
     430             : 
     431             : /*
     432             :  * Insert counts from the specified rcu_cblist structure in the
     433             :  * specified rcu_segcblist structure.
     434             :  */
     435           0 : void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
     436             :                                 struct rcu_cblist *rclp)
     437             : {
     438           0 :         rcu_segcblist_add_len(rsclp, rclp->len);
     439           0 : }
     440             : 
     441             : /*
     442             :  * Move callbacks from the specified rcu_cblist to the beginning of the
     443             :  * done-callbacks segment of the specified rcu_segcblist.
     444             :  */
     445       48797 : void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
     446             :                                    struct rcu_cblist *rclp)
     447             : {
     448       48797 :         int i;
     449             : 
     450       48797 :         if (!rclp->head)
     451             :                 return; /* No callbacks to move. */
     452       43595 :         rcu_segcblist_add_seglen(rsclp, RCU_DONE_TAIL, rclp->len);
     453       43595 :         *rclp->tail = rsclp->head;
     454       43595 :         WRITE_ONCE(rsclp->head, rclp->head);
     455       88993 :         for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
     456       88629 :                 if (&rsclp->head == rsclp->tails[i])
     457       45398 :                         WRITE_ONCE(rsclp->tails[i], rclp->tail);
     458             :                 else
     459             :                         break;
     460       43595 :         rclp->head = NULL;
     461       43595 :         rclp->tail = &rclp->head;
     462             : }
     463             : 
     464             : /*
     465             :  * Move callbacks from the specified rcu_cblist to the end of the
     466             :  * new-callbacks segment of the specified rcu_segcblist.
     467             :  */
     468           0 : void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
     469             :                                    struct rcu_cblist *rclp)
     470             : {
     471           0 :         if (!rclp->head)
     472             :                 return; /* Nothing to do. */
     473             : 
     474           0 :         rcu_segcblist_add_seglen(rsclp, RCU_NEXT_TAIL, rclp->len);
     475           0 :         WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rclp->head);
     476           0 :         WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], rclp->tail);
     477             : }
     478             : 
     479             : /*
     480             :  * Advance the callbacks in the specified rcu_segcblist structure based
     481             :  * on the current value passed in for the grace-period counter.
     482             :  */
     483        7466 : void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq)
     484             : {
     485        7466 :         int i, j;
     486             : 
     487        7466 :         WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
     488        7466 :         if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
     489             :                 return;
     490             : 
     491             :         /*
     492             :          * Find all callbacks whose ->gp_seq numbers indicate that they
     493             :          * are ready to invoke, and put them into the RCU_DONE_TAIL segment.
     494             :          */
     495       15836 :         for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
     496       13987 :                 if (ULONG_CMP_LT(seq, rsclp->gp_seq[i]))
     497             :                         break;
     498        8370 :                 WRITE_ONCE(rsclp->tails[RCU_DONE_TAIL], rsclp->tails[i]);
     499       14539 :                 rcu_segcblist_move_seglen(rsclp, i, RCU_DONE_TAIL);
     500             :         }
     501             : 
     502             :         /* If no callbacks moved, nothing more need be done. */
     503        7466 :         if (i == RCU_WAIT_TAIL)
     504             :                 return;
     505             : 
     506             :         /* Clean up tail pointers that might have been misordered above. */
     507       14891 :         for (j = RCU_WAIT_TAIL; j < i; j++)
     508        8370 :                 WRITE_ONCE(rsclp->tails[j], rsclp->tails[RCU_DONE_TAIL]);
     509             : 
     510             :         /*
     511             :          * Callbacks moved, so clean up the misordered ->tails[] pointers
     512             :          * that now point into the middle of the list of ready-to-invoke
     513             :          * callbacks.  The overall effect is to copy down the later pointers
     514             :          * into the gap that was created by the now-ready segments.
     515             :          */
     516       11193 :         for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
     517        4672 :                 if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL])
     518             :                         break;  /* No more callbacks. */
     519        4672 :                 WRITE_ONCE(rsclp->tails[j], rsclp->tails[i]);
     520        4672 :                 rcu_segcblist_move_seglen(rsclp, i, j);
     521        4672 :                 rsclp->gp_seq[j] = rsclp->gp_seq[i];
     522             :         }
     523             : }
     524             : 
     525             : /*
     526             :  * "Accelerate" callbacks based on more-accurate grace-period information.
     527             :  * The reason for this is that RCU does not synchronize the beginnings and
     528             :  * ends of grace periods, and that callbacks are posted locally.  This in
     529             :  * turn means that the callbacks must be labelled conservatively early
     530             :  * on, as getting exact information would degrade both performance and
     531             :  * scalability.  When more accurate grace-period information becomes
     532             :  * available, previously posted callbacks can be "accelerated", marking
     533             :  * them to complete at the end of the earlier grace period.
     534             :  *
     535             :  * This function operates on an rcu_segcblist structure, and also the
     536             :  * grace-period sequence number seq at which new callbacks would become
     537             :  * ready to invoke.  Returns true if there are callbacks that won't be
     538             :  * ready to invoke until seq, false otherwise.
     539             :  */
     540       16754 : bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq)
     541             : {
     542       16754 :         int i, j;
     543             : 
     544       16754 :         WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
     545       16754 :         if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
     546             :                 return false;
     547             : 
     548             :         /*
     549             :          * Find the segment preceding the oldest segment of callbacks
     550             :          * whose ->gp_seq[] completion is at or after that passed in via
     551             :          * "seq", skipping any empty segments.  This oldest segment, along
     552             :          * with any later segments, can be merged in with any newly arrived
     553             :          * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq"
     554             :          * as their ->gp_seq[] grace-period completion sequence number.
     555             :          */
     556       37970 :         for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--)
     557       33390 :                 if (rsclp->tails[i] != rsclp->tails[i - 1] &&
     558       18402 :                     ULONG_CMP_LT(rsclp->gp_seq[i], seq))
     559             :                         break;
     560             : 
     561             :         /*
     562             :          * If all the segments contain callbacks that correspond to
     563             :          * earlier grace-period sequence numbers than "seq", leave.
     564             :          * Assuming that the rcu_segcblist structure has enough
     565             :          * segments in its arrays, this can only happen if some of
     566             :          * the non-done segments contain callbacks that really are
     567             :          * ready to invoke.  This situation will get straightened
     568             :          * out by the next call to rcu_segcblist_advance().
     569             :          *
     570             :          * Also advance to the oldest segment of callbacks whose
     571             :          * ->gp_seq[] completion is at or after that passed in via "seq",
     572             :          * skipping any empty segments.
     573             :          *
     574             :          * Note that segment "i" (and any lower-numbered segments
     575             :          * containing older callbacks) will be unaffected, and their
     576             :          * grace-period numbers remain unchanged.  For example, if i ==
     577             :          * WAIT_TAIL, then neither WAIT_TAIL nor DONE_TAIL will be touched.
     578             :          * Instead, the CBs in NEXT_TAIL will be merged with those in
     579             :          * NEXT_READY_TAIL and the grace-period number of NEXT_READY_TAIL
     580             :          * would be updated.  NEXT_TAIL would then be empty.
     581             :          */
     582       16695 :         if (rcu_segcblist_restempty(rsclp, i) || ++i >= RCU_NEXT_TAIL)
     583             :                 return false;
     584             : 
     585             :         /* Accounting: everything below i is about to get merged into i. */
     586       29500 :         for (j = i + 1; j <= RCU_NEXT_TAIL; j++)
     587       17040 :                 rcu_segcblist_move_seglen(rsclp, j, i);
     588             : 
     589             :         /*
     590             :          * Merge all later callbacks, including newly arrived callbacks,
     591             :          * into the segment located by the for-loop above.  Assign "seq"
     592             :          * as the ->gp_seq[] value in order to correctly handle the case
     593             :          * where there were no pending callbacks in the rcu_segcblist
     594             :          * structure other than in the RCU_NEXT_TAIL segment.
     595             :          */
     596       29500 :         for (; i < RCU_NEXT_TAIL; i++) {
     597       17040 :                 WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_NEXT_TAIL]);
     598       17040 :                 rsclp->gp_seq[i] = seq;
     599             :         }
     600             :         return true;
     601             : }
     602             : 
     603             : /*
     604             :  * Merge the source rcu_segcblist structure into the destination
     605             :  * rcu_segcblist structure, then initialize the source.  Any pending
     606             :  * callbacks from the source get to start over.  It is best to
     607             :  * advance and accelerate both the destination and the source
     608             :  * before merging.
     609             :  */
     610           0 : void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
     611             :                          struct rcu_segcblist *src_rsclp)
     612             : {
     613           0 :         struct rcu_cblist donecbs;
     614           0 :         struct rcu_cblist pendcbs;
     615             : 
     616           0 :         lockdep_assert_cpus_held();
     617             : 
     618           0 :         rcu_cblist_init(&donecbs);
     619           0 :         rcu_cblist_init(&pendcbs);
     620             : 
     621           0 :         rcu_segcblist_extract_done_cbs(src_rsclp, &donecbs);
     622           0 :         rcu_segcblist_extract_pend_cbs(src_rsclp, &pendcbs);
     623             : 
     624             :         /*
     625             :          * No need smp_mb() before setting length to 0, because CPU hotplug
     626             :          * lock excludes rcu_barrier.
     627             :          */
     628           0 :         rcu_segcblist_set_len(src_rsclp, 0);
     629             : 
     630           0 :         rcu_segcblist_insert_count(dst_rsclp, &donecbs);
     631           0 :         rcu_segcblist_insert_count(dst_rsclp, &pendcbs);
     632           0 :         rcu_segcblist_insert_done_cbs(dst_rsclp, &donecbs);
     633           0 :         rcu_segcblist_insert_pend_cbs(dst_rsclp, &pendcbs);
     634             : 
     635           0 :         rcu_segcblist_init(src_rsclp);
     636           0 : }

Generated by: LCOV version 1.14