LCOV - code coverage report
Current view: top level - kernel/sched - pelt.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 34 34 100.0 %
Date: 2021-04-22 12:43:58 Functions: 3 3 100.0 %

          Line data    Source code
       1             : #ifdef CONFIG_SMP
       2             : #include "sched-pelt.h"
       3             : 
       4             : int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
       5             : int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
       6             : int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
       7             : int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
       8             : int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
       9             : 
      10             : #ifdef CONFIG_SCHED_THERMAL_PRESSURE
      11             : int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
      12             : 
      13             : static inline u64 thermal_load_avg(struct rq *rq)
      14             : {
      15             :         return READ_ONCE(rq->avg_thermal.load_avg);
      16             : }
      17             : #else
      18             : static inline int
      19       37419 : update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
      20             : {
      21       37419 :         return 0;
      22             : }
      23             : 
      24       20103 : static inline u64 thermal_load_avg(struct rq *rq)
      25             : {
      26       20103 :         return 0;
      27             : }
      28             : #endif
      29             : 
      30             : #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
      31             : int update_irq_load_avg(struct rq *rq, u64 running);
      32             : #else
      33             : static inline int
      34             : update_irq_load_avg(struct rq *rq, u64 running)
      35             : {
      36             :         return 0;
      37             : }
      38             : #endif
      39             : 
      40      115260 : static inline u32 get_pelt_divider(struct sched_avg *avg)
      41             : {
      42      115260 :         return LOAD_AVG_MAX - 1024 + avg->period_contrib;
      43             : }
      44             : 
      45             : /*
      46             :  * When a task is dequeued, its estimated utilization should not be update if
      47             :  * its util_avg has not been updated at least once.
      48             :  * This flag is used to synchronize util_avg updates with util_est updates.
      49             :  * We map this information into the LSB bit of the utilization saved at
      50             :  * dequeue time (i.e. util_est.dequeued).
      51             :  */
      52             : #define UTIL_AVG_UNCHANGED 0x1
      53             : 
      54       38937 : static inline void cfs_se_util_change(struct sched_avg *avg)
      55             : {
      56       38937 :         unsigned int enqueued;
      57             : 
      58       38937 :         if (!sched_feat(UTIL_EST))
      59             :                 return;
      60             : 
      61             :         /* Avoid store if the flag has been already set */
      62       38937 :         enqueued = avg->util_est.enqueued;
      63       38937 :         if (!(enqueued & UTIL_AVG_UNCHANGED))
      64             :                 return;
      65             : 
      66             :         /* Reset flag to report util_avg has been updated */
      67        8824 :         enqueued &= ~UTIL_AVG_UNCHANGED;
      68       38937 :         WRITE_ONCE(avg->util_est.enqueued, enqueued);
      69             : }
      70             : 
      71             : /*
      72             :  * The clock_pelt scales the time to reflect the effective amount of
      73             :  * computation done during the running delta time but then sync back to
      74             :  * clock_task when rq is idle.
      75             :  *
      76             :  *
      77             :  * absolute time   | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
      78             :  * @ max capacity  ------******---------------******---------------
      79             :  * @ half capacity ------************---------************---------
      80             :  * clock pelt      | 1| 2|    3|    4| 7| 8| 9|   10|   11|14|15|16
      81             :  *
      82             :  */
      83       72930 : static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
      84             : {
      85       72930 :         if (unlikely(is_idle_task(rq->curr))) {
      86             :                 /* The rq is idle, we can sync to clock_task */
      87       24405 :                 rq->clock_pelt  = rq_clock_task(rq);
      88       24447 :                 return;
      89             :         }
      90             : 
      91             :         /*
      92             :          * When a rq runs at a lower compute capacity, it will need
      93             :          * more time to do the same amount of work than at max
      94             :          * capacity. In order to be invariant, we scale the delta to
      95             :          * reflect how much work has been really done.
      96             :          * Running longer results in stealing idle time that will
      97             :          * disturb the load signal compared to max capacity. This
      98             :          * stolen idle time will be automatically reflected when the
      99             :          * rq will be idle and the clock will be synced with
     100             :          * rq_clock_task.
     101             :          */
     102             : 
     103             :         /*
     104             :          * Scale the elapsed time to reflect the real amount of
     105             :          * computation
     106             :          */
     107       48525 :         delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
     108       48525 :         delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
     109             : 
     110       48525 :         rq->clock_pelt += delta;
     111             : }
     112             : 
     113             : /*
     114             :  * When rq becomes idle, we have to check if it has lost idle time
     115             :  * because it was fully busy. A rq is fully used when the /Sum util_sum
     116             :  * is greater or equal to:
     117             :  * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
     118             :  * For optimization and computing rounding purpose, we don't take into account
     119             :  * the position in the current window (period_contrib) and we use the higher
     120             :  * bound of util_sum to decide.
     121             :  */
     122        6972 : static inline void update_idle_rq_clock_pelt(struct rq *rq)
     123             : {
     124        6972 :         u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
     125        6972 :         u32 util_sum = rq->cfs.avg.util_sum;
     126        6972 :         util_sum += rq->avg_rt.util_sum;
     127        6972 :         util_sum += rq->avg_dl.util_sum;
     128             : 
     129             :         /*
     130             :          * Reflecting stolen time makes sense only if the idle
     131             :          * phase would be present at max capacity. As soon as the
     132             :          * utilization of a rq has reached the maximum value, it is
     133             :          * considered as an always runnig rq without idle time to
     134             :          * steal. This potential idle time is considered as lost in
     135             :          * this case. We keep track of this lost idle time compare to
     136             :          * rq's clock_task.
     137             :          */
     138        6972 :         if (util_sum >= divider)
     139         152 :                 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
     140        6972 : }
     141             : 
     142       99829 : static inline u64 rq_clock_pelt(struct rq *rq)
     143             : {
     144      199902 :         lockdep_assert_held(&rq->lock);
     145      100005 :         assert_clock_updated(rq);
     146             : 
     147      100005 :         return rq->clock_pelt - rq->lost_idle_time;
     148             : }
     149             : 
     150             : #ifdef CONFIG_CFS_BANDWIDTH
     151             : /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
     152             : static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
     153             : {
     154             :         if (unlikely(cfs_rq->throttle_count))
     155             :                 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
     156             : 
     157             :         return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
     158             : }
     159             : #else
     160       87688 : static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
     161             : {
     162       87688 :         return rq_clock_pelt(rq_of(cfs_rq));
     163             : }
     164             : #endif
     165             : 
     166             : #else
     167             : 
     168             : static inline int
     169             : update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
     170             : {
     171             :         return 0;
     172             : }
     173             : 
     174             : static inline int
     175             : update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
     176             : {
     177             :         return 0;
     178             : }
     179             : 
     180             : static inline int
     181             : update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
     182             : {
     183             :         return 0;
     184             : }
     185             : 
     186             : static inline int
     187             : update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
     188             : {
     189             :         return 0;
     190             : }
     191             : 
     192             : static inline u64 thermal_load_avg(struct rq *rq)
     193             : {
     194             :         return 0;
     195             : }
     196             : 
     197             : static inline int
     198             : update_irq_load_avg(struct rq *rq, u64 running)
     199             : {
     200             :         return 0;
     201             : }
     202             : 
     203             : static inline u64 rq_clock_pelt(struct rq *rq)
     204             : {
     205             :         return rq_clock_task(rq);
     206             : }
     207             : 
     208             : static inline void
     209             : update_rq_clock_pelt(struct rq *rq, s64 delta) { }
     210             : 
     211             : static inline void
     212             : update_idle_rq_clock_pelt(struct rq *rq) { }
     213             : 
     214             : #endif
     215             : 
     216             : 

Generated by: LCOV version 1.14