LCOV - code coverage report
Current view: top level - arch/x86/kernel/cpu - aperfmperf.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 0 57 0.0 %
Date: 2021-04-22 12:43:58 Functions: 0 5 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  * x86 APERF/MPERF KHz calculation for
       4             :  * /sys/.../cpufreq/scaling_cur_freq
       5             :  *
       6             :  * Copyright (C) 2017 Intel Corp.
       7             :  * Author: Len Brown <len.brown@intel.com>
       8             :  */
       9             : 
      10             : #include <linux/delay.h>
      11             : #include <linux/ktime.h>
      12             : #include <linux/math64.h>
      13             : #include <linux/percpu.h>
      14             : #include <linux/cpufreq.h>
      15             : #include <linux/smp.h>
      16             : #include <linux/sched/isolation.h>
      17             : #include <linux/rcupdate.h>
      18             : 
      19             : #include "cpu.h"
      20             : 
      21             : struct aperfmperf_sample {
      22             :         unsigned int    khz;
      23             :         atomic_t        scfpending;
      24             :         ktime_t time;
      25             :         u64     aperf;
      26             :         u64     mperf;
      27             : };
      28             : 
      29             : static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
      30             : 
      31             : #define APERFMPERF_CACHE_THRESHOLD_MS   10
      32             : #define APERFMPERF_REFRESH_DELAY_MS     10
      33             : #define APERFMPERF_STALE_THRESHOLD_MS   1000
      34             : 
      35             : /*
      36             :  * aperfmperf_snapshot_khz()
      37             :  * On the current CPU, snapshot APERF, MPERF, and jiffies
      38             :  * unless we already did it within 10ms
      39             :  * calculate kHz, save snapshot
      40             :  */
      41           0 : static void aperfmperf_snapshot_khz(void *dummy)
      42             : {
      43           0 :         u64 aperf, aperf_delta;
      44           0 :         u64 mperf, mperf_delta;
      45           0 :         struct aperfmperf_sample *s = this_cpu_ptr(&samples);
      46           0 :         unsigned long flags;
      47             : 
      48           0 :         local_irq_save(flags);
      49           0 :         rdmsrl(MSR_IA32_APERF, aperf);
      50           0 :         rdmsrl(MSR_IA32_MPERF, mperf);
      51           0 :         local_irq_restore(flags);
      52             : 
      53           0 :         aperf_delta = aperf - s->aperf;
      54           0 :         mperf_delta = mperf - s->mperf;
      55             : 
      56             :         /*
      57             :          * There is no architectural guarantee that MPERF
      58             :          * increments faster than we can read it.
      59             :          */
      60           0 :         if (mperf_delta == 0)
      61             :                 return;
      62             : 
      63           0 :         s->time = ktime_get();
      64           0 :         s->aperf = aperf;
      65           0 :         s->mperf = mperf;
      66           0 :         s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
      67           0 :         atomic_set_release(&s->scfpending, 0);
      68             : }
      69             : 
      70           0 : static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
      71             : {
      72           0 :         s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
      73           0 :         struct aperfmperf_sample *s = per_cpu_ptr(&samples, cpu);
      74             : 
      75             :         /* Don't bother re-computing within the cache threshold time. */
      76           0 :         if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
      77             :                 return true;
      78             : 
      79           0 :         if (!atomic_xchg(&s->scfpending, 1) || wait)
      80           0 :                 smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
      81             : 
      82             :         /* Return false if the previous iteration was too long ago. */
      83           0 :         return time_delta <= APERFMPERF_STALE_THRESHOLD_MS;
      84             : }
      85             : 
      86           0 : unsigned int aperfmperf_get_khz(int cpu)
      87             : {
      88           0 :         if (!cpu_khz)
      89             :                 return 0;
      90             : 
      91           0 :         if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
      92             :                 return 0;
      93             : 
      94           0 :         if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
      95             :                 return 0;
      96             : 
      97           0 :         if (rcu_is_idle_cpu(cpu))
      98             :                 return 0; /* Idle CPUs are completely uninteresting. */
      99             : 
     100           0 :         aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
     101           0 :         return per_cpu(samples.khz, cpu);
     102             : }
     103             : 
     104           0 : void arch_freq_prepare_all(void)
     105             : {
     106           0 :         ktime_t now = ktime_get();
     107           0 :         bool wait = false;
     108           0 :         int cpu;
     109             : 
     110           0 :         if (!cpu_khz)
     111             :                 return;
     112             : 
     113           0 :         if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
     114             :                 return;
     115             : 
     116           0 :         for_each_online_cpu(cpu) {
     117           0 :                 if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
     118             :                         continue;
     119           0 :                 if (rcu_is_idle_cpu(cpu))
     120           0 :                         continue; /* Idle CPUs are completely uninteresting. */
     121           0 :                 if (!aperfmperf_snapshot_cpu(cpu, now, false))
     122           0 :                         wait = true;
     123             :         }
     124             : 
     125           0 :         if (wait)
     126           0 :                 msleep(APERFMPERF_REFRESH_DELAY_MS);
     127             : }
     128             : 
     129           0 : unsigned int arch_freq_get_on_cpu(int cpu)
     130             : {
     131           0 :         struct aperfmperf_sample *s = per_cpu_ptr(&samples, cpu);
     132             : 
     133           0 :         if (!cpu_khz)
     134             :                 return 0;
     135             : 
     136           0 :         if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
     137             :                 return 0;
     138             : 
     139           0 :         if (!housekeeping_cpu(cpu, HK_FLAG_MISC))
     140             :                 return 0;
     141             : 
     142           0 :         if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
     143           0 :                 return per_cpu(samples.khz, cpu);
     144             : 
     145           0 :         msleep(APERFMPERF_REFRESH_DELAY_MS);
     146           0 :         atomic_set(&s->scfpending, 1);
     147           0 :         smp_mb(); /* ->scfpending before smp_call_function_single(). */
     148           0 :         smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
     149             : 
     150           0 :         return per_cpu(samples.khz, cpu);
     151             : }

Generated by: LCOV version 1.14