LCOV - code coverage report
Current view: top level - include/linux - u64_stats_sync.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 20 24 83.3 %
Date: 2021-04-22 12:43:58 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_U64_STATS_SYNC_H
       3             : #define _LINUX_U64_STATS_SYNC_H
       4             : 
       5             : /*
       6             :  * Protect against 64-bit values tearing on 32-bit architectures. This is
       7             :  * typically used for statistics read/update in different subsystems.
       8             :  *
       9             :  * Key points :
      10             :  *
      11             :  * -  Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP.
      12             :  * -  The whole thing is a no-op on 64-bit architectures.
      13             :  *
      14             :  * Usage constraints:
      15             :  *
      16             :  * 1) Write side must ensure mutual exclusion, or one seqcount update could
      17             :  *    be lost, thus blocking readers forever.
      18             :  *
      19             :  * 2) Write side must disable preemption, or a seqcount reader can preempt the
      20             :  *    writer and also spin forever.
      21             :  *
      22             :  * 3) Write side must use the _irqsave() variant if other writers, or a reader,
      23             :  *    can be invoked from an IRQ context.
      24             :  *
      25             :  * 4) If reader fetches several counters, there is no guarantee the whole values
      26             :  *    are consistent w.r.t. each other (remember point #2: seqcounts are not
      27             :  *    used for 64bit architectures).
      28             :  *
      29             :  * 5) Readers are allowed to sleep or be preempted/interrupted: they perform
      30             :  *    pure reads.
      31             :  *
      32             :  * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats
      33             :  *    might be updated from a hardirq or softirq context (remember point #1:
      34             :  *    seqcounts are not used for UP kernels). 32-bit UP stat readers could read
      35             :  *    corrupted 64-bit values otherwise.
      36             :  *
      37             :  * Usage :
      38             :  *
      39             :  * Stats producer (writer) should use following template granted it already got
      40             :  * an exclusive access to counters (a lock is already taken, or per cpu
      41             :  * data is used [in a non preemptable context])
      42             :  *
      43             :  *   spin_lock_bh(...) or other synchronization to get exclusive access
      44             :  *   ...
      45             :  *   u64_stats_update_begin(&stats->syncp);
      46             :  *   u64_stats_add(&stats->bytes64, len); // non atomic operation
      47             :  *   u64_stats_inc(&stats->packets64);    // non atomic operation
      48             :  *   u64_stats_update_end(&stats->syncp);
      49             :  *
      50             :  * While a consumer (reader) should use following template to get consistent
      51             :  * snapshot for each variable (but no guarantee on several ones)
      52             :  *
      53             :  * u64 tbytes, tpackets;
      54             :  * unsigned int start;
      55             :  *
      56             :  * do {
      57             :  *         start = u64_stats_fetch_begin(&stats->syncp);
      58             :  *         tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
      59             :  *         tpackets = u64_stats_read(&stats->packets64); // non atomic operation
      60             :  * } while (u64_stats_fetch_retry(&stats->syncp, start));
      61             :  *
      62             :  *
      63             :  * Example of use in drivers/net/loopback.c, using per_cpu containers,
      64             :  * in BH disabled context.
      65             :  */
      66             : #include <linux/seqlock.h>
      67             : 
      68             : struct u64_stats_sync {
      69             : #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
      70             :         seqcount_t      seq;
      71             : #endif
      72             : };
      73             : 
      74             : #if BITS_PER_LONG == 64
      75             : #include <asm/local64.h>
      76             : 
      77             : typedef struct {
      78             :         local64_t       v;
      79             : } u64_stats_t ;
      80             : 
      81          32 : static inline u64 u64_stats_read(const u64_stats_t *p)
      82             : {
      83          32 :         return local64_read(&p->v);
      84             : }
      85             : 
      86           0 : static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
      87             : {
      88           0 :         local64_add(val, &p->v);
      89             : }
      90             : 
      91           0 : static inline void u64_stats_inc(u64_stats_t *p)
      92             : {
      93           0 :         local64_inc(&p->v);
      94             : }
      95             : 
      96             : #else
      97             : 
      98             : typedef struct {
      99             :         u64             v;
     100             : } u64_stats_t;
     101             : 
     102             : static inline u64 u64_stats_read(const u64_stats_t *p)
     103             : {
     104             :         return p->v;
     105             : }
     106             : 
     107             : static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
     108             : {
     109             :         p->v += val;
     110             : }
     111             : 
     112             : static inline void u64_stats_inc(u64_stats_t *p)
     113             : {
     114             :         p->v++;
     115             : }
     116             : #endif
     117             : 
     118             : #if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
     119             : #define u64_stats_init(syncp)   seqcount_init(&(syncp)->seq)
     120             : #else
     121         241 : static inline void u64_stats_init(struct u64_stats_sync *syncp)
     122             : {
     123          41 : }
     124             : #endif
     125             : 
     126       45376 : static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
     127             : {
     128             : #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
     129             :         write_seqcount_begin(&syncp->seq);
     130             : #endif
     131       45376 : }
     132             : 
     133       44510 : static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
     134             : {
     135             : #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
     136             :         write_seqcount_end(&syncp->seq);
     137             : #endif
     138       44510 : }
     139             : 
     140             : static inline unsigned long
     141           1 : u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
     142             : {
     143           1 :         unsigned long flags = 0;
     144             : 
     145             : #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
     146             :         local_irq_save(flags);
     147             :         write_seqcount_begin(&syncp->seq);
     148             : #endif
     149           1 :         return flags;
     150             : }
     151             : 
     152             : static inline void
     153           1 : u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
     154             :                                 unsigned long flags)
     155             : {
     156             : #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
     157             :         write_seqcount_end(&syncp->seq);
     158             :         local_irq_restore(flags);
     159             : #endif
     160             : }
     161             : 
     162         118 : static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
     163             : {
     164             : #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
     165             :         return read_seqcount_begin(&syncp->seq);
     166             : #else
     167         118 :         return 0;
     168             : #endif
     169             : }
     170             : 
     171             : static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
     172             : {
     173             : #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
     174             :         preempt_disable();
     175             : #endif
     176             :         return __u64_stats_fetch_begin(syncp);
     177             : }
     178             : 
     179         118 : static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
     180             :                                          unsigned int start)
     181             : {
     182             : #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
     183             :         return read_seqcount_retry(&syncp->seq, start);
     184             : #else
     185         118 :         return false;
     186             : #endif
     187             : }
     188             : 
     189             : static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
     190             :                                          unsigned int start)
     191             : {
     192             : #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
     193             :         preempt_enable();
     194             : #endif
     195             :         return __u64_stats_fetch_retry(syncp, start);
     196             : }
     197             : 
     198             : /*
     199             :  * In case irq handlers can update u64 counters, readers can use following helpers
     200             :  * - SMP 32bit arches use seqcount protection, irq safe.
     201             :  * - UP 32bit must disable irqs.
     202             :  * - 64bit have no problem atomically reading u64 values, irq safe.
     203             :  */
     204          40 : static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
     205             : {
     206             : #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
     207             :         local_irq_disable();
     208             : #endif
     209          40 :         return __u64_stats_fetch_begin(syncp);
     210             : }
     211             : 
     212          40 : static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
     213             :                                              unsigned int start)
     214             : {
     215             : #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
     216             :         local_irq_enable();
     217             : #endif
     218          40 :         return __u64_stats_fetch_retry(syncp, start);
     219             : }
     220             : 
     221             : #endif /* _LINUX_U64_STATS_SYNC_H */

Generated by: LCOV version 1.14