LCOV - code coverage report
Current view: top level - lib - flex_proportions.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 63 120 52.5 %
Date: 2021-04-22 12:43:58 Functions: 7 14 50.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  *  Floating proportions with flexible aging period
       4             :  *
       5             :  *   Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
       6             :  *
       7             :  * The goal of this code is: Given different types of event, measure proportion
       8             :  * of each type of event over time. The proportions are measured with
       9             :  * exponentially decaying history to give smooth transitions. A formula
      10             :  * expressing proportion of event of type 'j' is:
      11             :  *
      12             :  *   p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
      13             :  *
      14             :  * Where x_{i,j} is j's number of events in i-th last time period and x_i is
      15             :  * total number of events in i-th last time period.
      16             :  *
      17             :  * Note that p_{j}'s are normalised, i.e.
      18             :  *
      19             :  *   \Sum_{j} p_{j} = 1,
      20             :  *
      21             :  * This formula can be straightforwardly computed by maintaining denominator
      22             :  * (let's call it 'd') and for each event type its numerator (let's call it
      23             :  * 'n_j'). When an event of type 'j' happens, we simply need to do:
      24             :  *   n_j++; d++;
      25             :  *
      26             :  * When a new period is declared, we could do:
      27             :  *   d /= 2
      28             :  *   for each j
      29             :  *     n_j /= 2
      30             :  *
      31             :  * To avoid iteration over all event types, we instead shift numerator of event
      32             :  * j lazily when someone asks for a proportion of event j or when event j
      33             :  * occurs. This can bit trivially implemented by remembering last period in
      34             :  * which something happened with proportion of type j.
      35             :  */
      36             : #include <linux/flex_proportions.h>
      37             : 
      38           1 : int fprop_global_init(struct fprop_global *p, gfp_t gfp)
      39             : {
      40           1 :         int err;
      41             : 
      42           1 :         p->period = 0;
      43             :         /* Use 1 to avoid dealing with periods with 0 events... */
      44           1 :         err = percpu_counter_init(&p->events, 1, gfp);
      45           1 :         if (err)
      46             :                 return err;
      47           1 :         seqcount_init(&p->sequence);
      48           1 :         return 0;
      49             : }
      50             : 
      51           0 : void fprop_global_destroy(struct fprop_global *p)
      52             : {
      53           0 :         percpu_counter_destroy(&p->events);
      54           0 : }
      55             : 
      56             : /*
      57             :  * Declare @periods new periods. It is upto the caller to make sure period
      58             :  * transitions cannot happen in parallel.
      59             :  *
      60             :  * The function returns true if the proportions are still defined and false
      61             :  * if aging zeroed out all events. This can be used to detect whether declaring
      62             :  * further periods has any effect.
      63             :  */
      64          24 : bool fprop_new_period(struct fprop_global *p, int periods)
      65             : {
      66          24 :         s64 events;
      67          24 :         unsigned long flags;
      68             : 
      69          48 :         local_irq_save(flags);
      70          24 :         events = percpu_counter_sum(&p->events);
      71             :         /*
      72             :          * Don't do anything if there are no events.
      73             :          */
      74          24 :         if (events <= 1) {
      75           0 :                 local_irq_restore(flags);
      76           0 :                 return false;
      77             :         }
      78          24 :         write_seqcount_begin(&p->sequence);
      79          24 :         if (periods < 64)
      80          24 :                 events -= events >> periods;
      81             :         /* Use addition to avoid losing events happening between sum and set */
      82          24 :         percpu_counter_add(&p->events, -events);
      83          24 :         p->period += periods;
      84          24 :         write_seqcount_end(&p->sequence);
      85          24 :         local_irq_restore(flags);
      86             : 
      87             :         return true;
      88             : }
      89             : 
      90             : /*
      91             :  * ---- SINGLE ----
      92             :  */
      93             : 
      94           0 : int fprop_local_init_single(struct fprop_local_single *pl)
      95             : {
      96           0 :         pl->events = 0;
      97           0 :         pl->period = 0;
      98           0 :         raw_spin_lock_init(&pl->lock);
      99           0 :         return 0;
     100             : }
     101             : 
     102           0 : void fprop_local_destroy_single(struct fprop_local_single *pl)
     103             : {
     104           0 : }
     105             : 
     106           0 : static void fprop_reflect_period_single(struct fprop_global *p,
     107             :                                         struct fprop_local_single *pl)
     108             : {
     109           0 :         unsigned int period = p->period;
     110           0 :         unsigned long flags;
     111             : 
     112             :         /* Fast path - period didn't change */
     113           0 :         if (pl->period == period)
     114             :                 return;
     115           0 :         raw_spin_lock_irqsave(&pl->lock, flags);
     116             :         /* Someone updated pl->period while we were spinning? */
     117           0 :         if (pl->period >= period) {
     118           0 :                 raw_spin_unlock_irqrestore(&pl->lock, flags);
     119           0 :                 return;
     120             :         }
     121             :         /* Aging zeroed our fraction? */
     122           0 :         if (period - pl->period < BITS_PER_LONG)
     123           0 :                 pl->events >>= period - pl->period;
     124             :         else
     125           0 :                 pl->events = 0;
     126           0 :         pl->period = period;
     127           0 :         raw_spin_unlock_irqrestore(&pl->lock, flags);
     128             : }
     129             : 
     130             : /* Event of type pl happened */
     131           0 : void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
     132             : {
     133           0 :         fprop_reflect_period_single(p, pl);
     134           0 :         pl->events++;
     135           0 :         percpu_counter_add(&p->events, 1);
     136           0 : }
     137             : 
     138             : /* Return fraction of events of type pl */
     139           0 : void fprop_fraction_single(struct fprop_global *p,
     140             :                            struct fprop_local_single *pl,
     141             :                            unsigned long *numerator, unsigned long *denominator)
     142             : {
     143           0 :         unsigned int seq;
     144           0 :         s64 num, den;
     145             : 
     146           0 :         do {
     147           0 :                 seq = read_seqcount_begin(&p->sequence);
     148           0 :                 fprop_reflect_period_single(p, pl);
     149           0 :                 num = pl->events;
     150           0 :                 den = percpu_counter_read_positive(&p->events);
     151           0 :         } while (read_seqcount_retry(&p->sequence, seq));
     152             : 
     153             :         /*
     154             :          * Make fraction <= 1 and denominator > 0 even in presence of percpu
     155             :          * counter errors
     156             :          */
     157           0 :         if (den <= num) {
     158           0 :                 if (num)
     159             :                         den = num;
     160             :                 else
     161           0 :                         den = 1;
     162             :         }
     163           0 :         *denominator = den;
     164           0 :         *numerator = num;
     165           0 : }
     166             : 
     167             : /*
     168             :  * ---- PERCPU ----
     169             :  */
     170             : #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
     171             : 
     172          10 : int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
     173             : {
     174          10 :         int err;
     175             : 
     176          10 :         err = percpu_counter_init(&pl->events, 0, gfp);
     177          10 :         if (err)
     178             :                 return err;
     179          10 :         pl->period = 0;
     180          10 :         raw_spin_lock_init(&pl->lock);
     181          10 :         return 0;
     182             : }
     183             : 
     184           0 : void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
     185             : {
     186           0 :         percpu_counter_destroy(&pl->events);
     187           0 : }
     188             : 
     189        2409 : static void fprop_reflect_period_percpu(struct fprop_global *p,
     190             :                                         struct fprop_local_percpu *pl)
     191             : {
     192        2409 :         unsigned int period = p->period;
     193        2409 :         unsigned long flags;
     194             : 
     195             :         /* Fast path - period didn't change */
     196        2409 :         if (pl->period == period)
     197             :                 return;
     198          15 :         raw_spin_lock_irqsave(&pl->lock, flags);
     199             :         /* Someone updated pl->period while we were spinning? */
     200          15 :         if (pl->period >= period) {
     201           0 :                 raw_spin_unlock_irqrestore(&pl->lock, flags);
     202           0 :                 return;
     203             :         }
     204             :         /* Aging zeroed our fraction? */
     205          15 :         if (period - pl->period < BITS_PER_LONG) {
     206          15 :                 s64 val = percpu_counter_read(&pl->events);
     207             : 
     208          15 :                 if (val < (nr_cpu_ids * PROP_BATCH))
     209           6 :                         val = percpu_counter_sum(&pl->events);
     210             : 
     211          15 :                 percpu_counter_add_batch(&pl->events,
     212          15 :                         -val + (val >> (period-pl->period)), PROP_BATCH);
     213             :         } else
     214           0 :                 percpu_counter_set(&pl->events, 0);
     215          15 :         pl->period = period;
     216          15 :         raw_spin_unlock_irqrestore(&pl->lock, flags);
     217             : }
     218             : 
     219             : /* Event of type pl happened */
     220        2394 : void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
     221             : {
     222        2394 :         fprop_reflect_period_percpu(p, pl);
     223        2394 :         percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
     224        2394 :         percpu_counter_add(&p->events, 1);
     225        2394 : }
     226             : 
     227          15 : void fprop_fraction_percpu(struct fprop_global *p,
     228             :                            struct fprop_local_percpu *pl,
     229             :                            unsigned long *numerator, unsigned long *denominator)
     230             : {
     231          15 :         unsigned int seq;
     232          15 :         s64 num, den;
     233             : 
     234          15 :         do {
     235          15 :                 seq = read_seqcount_begin(&p->sequence);
     236          15 :                 fprop_reflect_period_percpu(p, pl);
     237          15 :                 num = percpu_counter_read_positive(&pl->events);
     238          15 :                 den = percpu_counter_read_positive(&p->events);
     239          15 :         } while (read_seqcount_retry(&p->sequence, seq));
     240             : 
     241             :         /*
     242             :          * Make fraction <= 1 and denominator > 0 even in presence of percpu
     243             :          * counter errors
     244             :          */
     245          15 :         if (den <= num) {
     246          14 :                 if (num)
     247             :                         den = num;
     248             :                 else
     249           0 :                         den = 1;
     250             :         }
     251          15 :         *denominator = den;
     252          15 :         *numerator = num;
     253          15 : }
     254             : 
     255             : /*
     256             :  * Like __fprop_inc_percpu() except that event is counted only if the given
     257             :  * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
     258             :  */
     259        2394 : void __fprop_inc_percpu_max(struct fprop_global *p,
     260             :                             struct fprop_local_percpu *pl, int max_frac)
     261             : {
     262        2394 :         if (unlikely(max_frac < FPROP_FRAC_BASE)) {
     263           0 :                 unsigned long numerator, denominator;
     264             : 
     265           0 :                 fprop_fraction_percpu(p, pl, &numerator, &denominator);
     266           0 :                 if (numerator >
     267           0 :                     (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
     268           0 :                         return;
     269             :         }
     270             : 
     271        2394 :         __fprop_inc_percpu(p, pl);
     272             : }

Generated by: LCOV version 1.14