LCOV - code coverage report
Current view: top level - arch/x86/kernel - smpboot.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 375 813 46.1 %
Date: 2021-04-22 12:43:58 Functions: 35 68 51.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             :  /*
       3             :  *      x86 SMP booting functions
       4             :  *
       5             :  *      (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
       6             :  *      (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
       7             :  *      Copyright 2001 Andi Kleen, SuSE Labs.
       8             :  *
       9             :  *      Much of the core SMP work is based on previous work by Thomas Radke, to
      10             :  *      whom a great many thanks are extended.
      11             :  *
      12             :  *      Thanks to Intel for making available several different Pentium,
      13             :  *      Pentium Pro and Pentium-II/Xeon MP machines.
      14             :  *      Original development of Linux SMP code supported by Caldera.
      15             :  *
      16             :  *      Fixes
      17             :  *              Felix Koop      :       NR_CPUS used properly
      18             :  *              Jose Renau      :       Handle single CPU case.
      19             :  *              Alan Cox        :       By repeated request 8) - Total BogoMIPS report.
      20             :  *              Greg Wright     :       Fix for kernel stacks panic.
      21             :  *              Erich Boleyn    :       MP v1.4 and additional changes.
      22             :  *      Matthias Sattler        :       Changes for 2.1 kernel map.
      23             :  *      Michel Lespinasse       :       Changes for 2.1 kernel map.
      24             :  *      Michael Chastain        :       Change trampoline.S to gnu as.
      25             :  *              Alan Cox        :       Dumb bug: 'B' step PPro's are fine
      26             :  *              Ingo Molnar     :       Added APIC timers, based on code
      27             :  *                                      from Jose Renau
      28             :  *              Ingo Molnar     :       various cleanups and rewrites
      29             :  *              Tigran Aivazian :       fixed "0.00 in /proc/uptime on SMP" bug.
      30             :  *      Maciej W. Rozycki       :       Bits for genuine 82489DX APICs
      31             :  *      Andi Kleen              :       Changed for SMP boot into long mode.
      32             :  *              Martin J. Bligh :       Added support for multi-quad systems
      33             :  *              Dave Jones      :       Report invalid combinations of Athlon CPUs.
      34             :  *              Rusty Russell   :       Hacked into shape for new "hotplug" boot process.
      35             :  *      Andi Kleen              :       Converted to new state machine.
      36             :  *      Ashok Raj               :       CPU hotplug support
      37             :  *      Glauber Costa           :       i386 and x86_64 integration
      38             :  */
      39             : 
      40             : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      41             : 
      42             : #include <linux/init.h>
      43             : #include <linux/smp.h>
      44             : #include <linux/export.h>
      45             : #include <linux/sched.h>
      46             : #include <linux/sched/topology.h>
      47             : #include <linux/sched/hotplug.h>
      48             : #include <linux/sched/task_stack.h>
      49             : #include <linux/percpu.h>
      50             : #include <linux/memblock.h>
      51             : #include <linux/err.h>
      52             : #include <linux/nmi.h>
      53             : #include <linux/tboot.h>
      54             : #include <linux/gfp.h>
      55             : #include <linux/cpuidle.h>
      56             : #include <linux/numa.h>
      57             : #include <linux/pgtable.h>
      58             : #include <linux/overflow.h>
      59             : #include <linux/syscore_ops.h>
      60             : 
      61             : #include <asm/acpi.h>
      62             : #include <asm/desc.h>
      63             : #include <asm/nmi.h>
      64             : #include <asm/irq.h>
      65             : #include <asm/realmode.h>
      66             : #include <asm/cpu.h>
      67             : #include <asm/numa.h>
      68             : #include <asm/tlbflush.h>
      69             : #include <asm/mtrr.h>
      70             : #include <asm/mwait.h>
      71             : #include <asm/apic.h>
      72             : #include <asm/io_apic.h>
      73             : #include <asm/fpu/internal.h>
      74             : #include <asm/setup.h>
      75             : #include <asm/uv/uv.h>
      76             : #include <linux/mc146818rtc.h>
      77             : #include <asm/i8259.h>
      78             : #include <asm/misc.h>
      79             : #include <asm/qspinlock.h>
      80             : #include <asm/intel-family.h>
      81             : #include <asm/cpu_device_id.h>
      82             : #include <asm/spec-ctrl.h>
      83             : #include <asm/hw_irq.h>
      84             : #include <asm/stackprotector.h>
      85             : 
      86             : #ifdef CONFIG_ACPI_CPPC_LIB
      87             : #include <acpi/cppc_acpi.h>
      88             : #endif
      89             : 
      90             : /* representing HT siblings of each logical CPU */
      91             : DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
      92             : EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
      93             : 
      94             : /* representing HT and core siblings of each logical CPU */
      95             : DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
      96             : EXPORT_PER_CPU_SYMBOL(cpu_core_map);
      97             : 
      98             : /* representing HT, core, and die siblings of each logical CPU */
      99             : DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
     100             : EXPORT_PER_CPU_SYMBOL(cpu_die_map);
     101             : 
     102             : DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
     103             : 
     104             : /* Per CPU bogomips and other parameters */
     105             : DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
     106             : EXPORT_PER_CPU_SYMBOL(cpu_info);
     107             : 
     108             : /* Logical package management. We might want to allocate that dynamically */
     109             : unsigned int __max_logical_packages __read_mostly;
     110             : EXPORT_SYMBOL(__max_logical_packages);
     111             : static unsigned int logical_packages __read_mostly;
     112             : static unsigned int logical_die __read_mostly;
     113             : 
     114             : /* Maximum number of SMT threads on any online core */
     115             : int __read_mostly __max_smt_threads = 1;
     116             : 
     117             : /* Flag to indicate if a complete sched domain rebuild is required */
     118             : bool x86_topology_update;
     119             : 
     120           1 : int arch_update_cpu_topology(void)
     121             : {
     122           1 :         int retval = x86_topology_update;
     123             : 
     124           1 :         x86_topology_update = false;
     125           1 :         return retval;
     126             : }
     127             : 
     128           3 : static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
     129             : {
     130           3 :         unsigned long flags;
     131             : 
     132           3 :         spin_lock_irqsave(&rtc_lock, flags);
     133           3 :         CMOS_WRITE(0xa, 0xf);
     134           3 :         spin_unlock_irqrestore(&rtc_lock, flags);
     135           3 :         *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
     136           3 :                                                         start_eip >> 4;
     137           3 :         *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
     138           3 :                                                         start_eip & 0xf;
     139           3 : }
     140             : 
     141           3 : static inline void smpboot_restore_warm_reset_vector(void)
     142             : {
     143           3 :         unsigned long flags;
     144             : 
     145             :         /*
     146             :          * Paranoid:  Set warm reset code and vector here back
     147             :          * to default values.
     148             :          */
     149           3 :         spin_lock_irqsave(&rtc_lock, flags);
     150           3 :         CMOS_WRITE(0, 0xf);
     151           3 :         spin_unlock_irqrestore(&rtc_lock, flags);
     152             : 
     153           3 :         *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
     154           3 : }
     155             : 
     156             : static void init_freq_invariance(bool secondary, bool cppc_ready);
     157             : 
     158             : /*
     159             :  * Report back to the Boot Processor during boot time or to the caller processor
     160             :  * during CPU online.
     161             :  */
     162           3 : static void smp_callin(void)
     163             : {
     164           3 :         int cpuid;
     165             : 
     166             :         /*
     167             :          * If waken up by an INIT in an 82489DX configuration
     168             :          * cpu_callout_mask guarantees we don't get here before
     169             :          * an INIT_deassert IPI reaches our local APIC, so it is
     170             :          * now safe to touch our local APIC.
     171             :          */
     172           3 :         cpuid = smp_processor_id();
     173             : 
     174             :         /*
     175             :          * the boot CPU has finished the init stage and is spinning
     176             :          * on callin_map until we finish. We are free to set up this
     177             :          * CPU, first the APIC. (this is probably redundant on most
     178             :          * boards)
     179             :          */
     180           3 :         apic_ap_setup();
     181             : 
     182             :         /*
     183             :          * Save our processor parameters. Note: this information
     184             :          * is needed for clock calibration.
     185             :          */
     186           3 :         smp_store_cpu_info(cpuid);
     187             : 
     188             :         /*
     189             :          * The topology information must be up to date before
     190             :          * calibrate_delay() and notify_cpu_starting().
     191             :          */
     192           3 :         set_cpu_sibling_map(raw_smp_processor_id());
     193             : 
     194           3 :         init_freq_invariance(true, false);
     195             : 
     196             :         /*
     197             :          * Get our bogomips.
     198             :          * Update loops_per_jiffy in cpu_data. Previous call to
     199             :          * smp_store_cpu_info() stored a value that is close but not as
     200             :          * accurate as the value just calculated.
     201             :          */
     202           3 :         calibrate_delay();
     203           3 :         cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
     204           3 :         pr_debug("Stack at about %p\n", &cpuid);
     205             : 
     206           3 :         wmb();
     207             : 
     208           3 :         notify_cpu_starting(cpuid);
     209             : 
     210             :         /*
     211             :          * Allow the master to continue.
     212             :          */
     213           3 :         cpumask_set_cpu(cpuid, cpu_callin_mask);
     214           3 : }
     215             : 
     216             : static int cpu0_logical_apicid;
     217             : static int enable_start_cpu0;
     218             : /*
     219             :  * Activate a secondary processor.
     220             :  */
     221           3 : static void notrace start_secondary(void *unused)
     222             : {
     223             :         /*
     224             :          * Don't put *anything* except direct CPU state initialization
     225             :          * before cpu_init(), SMP booting is too fragile that we want to
     226             :          * limit the things done here to the most necessary things.
     227             :          */
     228           3 :         cr4_init();
     229             : 
     230             : #ifdef CONFIG_X86_32
     231             :         /* switch away from the initial page table */
     232             :         load_cr3(swapper_pg_dir);
     233             :         __flush_tlb_all();
     234             : #endif
     235           3 :         cpu_init_exception_handling();
     236           3 :         cpu_init();
     237           3 :         rcu_cpu_starting(raw_smp_processor_id());
     238           3 :         x86_cpuinit.early_percpu_clock_init();
     239           3 :         preempt_disable();
     240           3 :         smp_callin();
     241             : 
     242           3 :         enable_start_cpu0 = 0;
     243             : 
     244             :         /* otherwise gcc will move up smp_processor_id before the cpu_init */
     245           3 :         barrier();
     246             :         /*
     247             :          * Check TSC synchronization with the boot CPU:
     248             :          */
     249           3 :         check_tsc_sync_target();
     250             : 
     251           3 :         speculative_store_bypass_ht_init();
     252             : 
     253             :         /*
     254             :          * Lock vector_lock, set CPU online and bring the vector
     255             :          * allocator online. Online must be set with vector_lock held
     256             :          * to prevent a concurrent irq setup/teardown from seeing a
     257             :          * half valid vector space.
     258             :          */
     259           3 :         lock_vector_lock();
     260           3 :         set_cpu_online(smp_processor_id(), true);
     261           3 :         lapic_online();
     262           3 :         unlock_vector_lock();
     263           3 :         cpu_set_state_online(smp_processor_id());
     264           3 :         x86_platform.nmi_init();
     265             : 
     266             :         /* enable local interrupts */
     267           3 :         local_irq_enable();
     268             : 
     269           3 :         x86_cpuinit.setup_percpu_clockev();
     270             : 
     271           3 :         wmb();
     272           3 :         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
     273           0 : }
     274             : 
     275             : /**
     276             :  * topology_is_primary_thread - Check whether CPU is the primary SMT thread
     277             :  * @cpu:        CPU to check
     278             :  */
     279           6 : bool topology_is_primary_thread(unsigned int cpu)
     280             : {
     281           6 :         return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
     282             : }
     283             : 
     284             : /**
     285             :  * topology_smt_supported - Check whether SMT is supported by the CPUs
     286             :  */
     287           1 : bool topology_smt_supported(void)
     288             : {
     289           1 :         return smp_num_siblings > 1;
     290             : }
     291             : 
     292             : /**
     293             :  * topology_phys_to_logical_pkg - Map a physical package id to a logical
     294             :  *
     295             :  * Returns logical package id or -1 if not found
     296             :  */
     297           4 : int topology_phys_to_logical_pkg(unsigned int phys_pkg)
     298             : {
     299           4 :         int cpu;
     300             : 
     301          20 :         for_each_possible_cpu(cpu) {
     302          16 :                 struct cpuinfo_x86 *c = &cpu_data(cpu);
     303             : 
     304          16 :                 if (c->initialized && c->phys_proc_id == phys_pkg)
     305           0 :                         return c->logical_proc_id;
     306             :         }
     307             :         return -1;
     308             : }
     309             : EXPORT_SYMBOL(topology_phys_to_logical_pkg);
     310             : /**
     311             :  * topology_phys_to_logical_die - Map a physical die id to logical
     312             :  *
     313             :  * Returns logical die id or -1 if not found
     314             :  */
     315           4 : int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu)
     316             : {
     317           4 :         int cpu;
     318           4 :         int proc_id = cpu_data(cur_cpu).phys_proc_id;
     319             : 
     320          20 :         for_each_possible_cpu(cpu) {
     321          16 :                 struct cpuinfo_x86 *c = &cpu_data(cpu);
     322             : 
     323          16 :                 if (c->initialized && c->cpu_die_id == die_id &&
     324           6 :                     c->phys_proc_id == proc_id)
     325           0 :                         return c->logical_die_id;
     326             :         }
     327             :         return -1;
     328             : }
     329             : EXPORT_SYMBOL(topology_phys_to_logical_die);
     330             : 
     331             : /**
     332             :  * topology_update_package_map - Update the physical to logical package map
     333             :  * @pkg:        The physical package id as retrieved via CPUID
     334             :  * @cpu:        The cpu for which this is updated
     335             :  */
     336           4 : int topology_update_package_map(unsigned int pkg, unsigned int cpu)
     337             : {
     338           4 :         int new;
     339             : 
     340             :         /* Already available somewhere? */
     341           4 :         new = topology_phys_to_logical_pkg(pkg);
     342           4 :         if (new >= 0)
     343           0 :                 goto found;
     344             : 
     345           4 :         new = logical_packages++;
     346           4 :         if (new != pkg) {
     347           0 :                 pr_info("CPU %u Converting physical %u to logical package %u\n",
     348             :                         cpu, pkg, new);
     349             :         }
     350           4 : found:
     351           4 :         cpu_data(cpu).logical_proc_id = new;
     352           4 :         return 0;
     353             : }
     354             : /**
     355             :  * topology_update_die_map - Update the physical to logical die map
     356             :  * @die:        The die id as retrieved via CPUID
     357             :  * @cpu:        The cpu for which this is updated
     358             :  */
     359           4 : int topology_update_die_map(unsigned int die, unsigned int cpu)
     360             : {
     361           4 :         int new;
     362             : 
     363             :         /* Already available somewhere? */
     364           4 :         new = topology_phys_to_logical_die(die, cpu);
     365           4 :         if (new >= 0)
     366           0 :                 goto found;
     367             : 
     368           4 :         new = logical_die++;
     369           4 :         if (new != die) {
     370           3 :                 pr_info("CPU %u Converting physical %u to logical die %u\n",
     371             :                         cpu, die, new);
     372             :         }
     373           1 : found:
     374           4 :         cpu_data(cpu).logical_die_id = new;
     375           4 :         return 0;
     376             : }
     377             : 
     378           1 : void __init smp_store_boot_cpu_info(void)
     379             : {
     380           1 :         int id = 0; /* CPU 0 */
     381           1 :         struct cpuinfo_x86 *c = &cpu_data(id);
     382             : 
     383           1 :         *c = boot_cpu_data;
     384           1 :         c->cpu_index = id;
     385           1 :         topology_update_package_map(c->phys_proc_id, id);
     386           1 :         topology_update_die_map(c->cpu_die_id, id);
     387           1 :         c->initialized = true;
     388           1 : }
     389             : 
     390             : /*
     391             :  * The bootstrap kernel entry code has set these up. Save them for
     392             :  * a given CPU
     393             :  */
     394           3 : void smp_store_cpu_info(int id)
     395             : {
     396           3 :         struct cpuinfo_x86 *c = &cpu_data(id);
     397             : 
     398             :         /* Copy boot_cpu_data only on the first bringup */
     399           3 :         if (!c->initialized)
     400           3 :                 *c = boot_cpu_data;
     401           3 :         c->cpu_index = id;
     402             :         /*
     403             :          * During boot time, CPU0 has this setup already. Save the info when
     404             :          * bringing up AP or offlined CPU0.
     405             :          */
     406           3 :         identify_secondary_cpu(c);
     407           3 :         c->initialized = true;
     408           3 : }
     409             : 
     410             : static bool
     411           0 : topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
     412             : {
     413           0 :         int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
     414             : 
     415           0 :         return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
     416             : }
     417             : 
     418             : static bool
     419           0 : topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
     420             : {
     421           0 :         int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
     422             : 
     423           0 :         return !WARN_ONCE(!topology_same_node(c, o),
     424             :                 "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
     425             :                 "[node: %d != %d]. Ignoring dependency.\n",
     426             :                 cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
     427             : }
     428             : 
     429             : #define link_mask(mfunc, c1, c2)                                        \
     430             : do {                                                                    \
     431             :         cpumask_set_cpu((c1), mfunc(c2));                               \
     432             :         cpumask_set_cpu((c2), mfunc(c1));                               \
     433             : } while (0)
     434             : 
     435           0 : static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
     436             : {
     437           0 :         if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
     438           0 :                 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
     439             : 
     440           0 :                 if (c->phys_proc_id == o->phys_proc_id &&
     441           0 :                     c->cpu_die_id == o->cpu_die_id &&
     442           0 :                     per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
     443           0 :                         if (c->cpu_core_id == o->cpu_core_id)
     444           0 :                                 return topology_sane(c, o, "smt");
     445             : 
     446           0 :                         if ((c->cu_id != 0xff) &&
     447           0 :                             (o->cu_id != 0xff) &&
     448             :                             (c->cu_id == o->cu_id))
     449           0 :                                 return topology_sane(c, o, "smt");
     450             :                 }
     451             : 
     452           0 :         } else if (c->phys_proc_id == o->phys_proc_id &&
     453           0 :                    c->cpu_die_id == o->cpu_die_id &&
     454           0 :                    c->cpu_core_id == o->cpu_core_id) {
     455           0 :                 return topology_sane(c, o, "smt");
     456             :         }
     457             : 
     458             :         return false;
     459             : }
     460             : 
     461             : /*
     462             :  * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
     463             :  *
     464             :  * These are Intel CPUs that enumerate an LLC that is shared by
     465             :  * multiple NUMA nodes. The LLC on these systems is shared for
     466             :  * off-package data access but private to the NUMA node (half
     467             :  * of the package) for on-package access.
     468             :  *
     469             :  * CPUID (the source of the information about the LLC) can only
     470             :  * enumerate the cache as being shared *or* unshared, but not
     471             :  * this particular configuration. The CPU in this case enumerates
     472             :  * the cache to be shared across the entire package (spanning both
     473             :  * NUMA nodes).
     474             :  */
     475             : 
     476             : static const struct x86_cpu_id snc_cpu[] = {
     477             :         X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
     478             :         {}
     479             : };
     480             : 
     481           0 : static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
     482             : {
     483           0 :         int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
     484             : 
     485             :         /* Do not match if we do not have a valid APICID for cpu: */
     486           0 :         if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
     487             :                 return false;
     488             : 
     489             :         /* Do not match if LLC id does not match: */
     490           0 :         if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
     491             :                 return false;
     492             : 
     493             :         /*
     494             :          * Allow the SNC topology without warning. Return of false
     495             :          * means 'c' does not share the LLC of 'o'. This will be
     496             :          * reflected to userspace.
     497             :          */
     498           0 :         if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
     499             :                 return false;
     500             : 
     501           0 :         return topology_sane(c, o, "llc");
     502             : }
     503             : 
     504             : /*
     505             :  * Unlike the other levels, we do not enforce keeping a
     506             :  * multicore group inside a NUMA node.  If this happens, we will
     507             :  * discard the MC level of the topology later.
     508             :  */
     509           0 : static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
     510             : {
     511           0 :         if (c->phys_proc_id == o->phys_proc_id)
     512           0 :                 return true;
     513             :         return false;
     514             : }
     515             : 
     516           0 : static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
     517             : {
     518           0 :         if ((c->phys_proc_id == o->phys_proc_id) &&
     519           0 :                 (c->cpu_die_id == o->cpu_die_id))
     520             :                 return true;
     521             :         return false;
     522             : }
     523             : 
     524             : 
     525             : #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
     526           8 : static inline int x86_sched_itmt_flags(void)
     527             : {
     528           8 :         return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
     529             : }
     530             : 
     531             : #ifdef CONFIG_SCHED_MC
     532           4 : static int x86_core_flags(void)
     533             : {
     534           4 :         return cpu_core_flags() | x86_sched_itmt_flags();
     535             : }
     536             : #endif
     537             : #ifdef CONFIG_SCHED_SMT
     538           4 : static int x86_smt_flags(void)
     539             : {
     540           4 :         return cpu_smt_flags() | x86_sched_itmt_flags();
     541             : }
     542             : #endif
     543             : #endif
     544             : 
     545             : static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
     546             : #ifdef CONFIG_SCHED_SMT
     547             :         { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
     548             : #endif
     549             : #ifdef CONFIG_SCHED_MC
     550             :         { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
     551             : #endif
     552             :         { NULL, },
     553             : };
     554             : 
     555             : static struct sched_domain_topology_level x86_topology[] = {
     556             : #ifdef CONFIG_SCHED_SMT
     557             :         { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
     558             : #endif
     559             : #ifdef CONFIG_SCHED_MC
     560             :         { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
     561             : #endif
     562             :         { cpu_cpu_mask, SD_INIT_NAME(DIE) },
     563             :         { NULL, },
     564             : };
     565             : 
     566             : /*
     567             :  * Set if a package/die has multiple NUMA nodes inside.
     568             :  * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
     569             :  * Sub-NUMA Clustering have this.
     570             :  */
     571             : static bool x86_has_numa_in_package;
     572             : 
     573           4 : void set_cpu_sibling_map(int cpu)
     574             : {
     575           4 :         bool has_smt = smp_num_siblings > 1;
     576           4 :         bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
     577           4 :         struct cpuinfo_x86 *c = &cpu_data(cpu);
     578           4 :         struct cpuinfo_x86 *o;
     579           4 :         int i, threads;
     580             : 
     581           4 :         cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
     582             : 
     583           4 :         if (!has_mp) {
     584           4 :                 cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
     585           4 :                 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
     586           4 :                 cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
     587           4 :                 cpumask_set_cpu(cpu, topology_die_cpumask(cpu));
     588           4 :                 c->booted_cores = 1;
     589           4 :                 return;
     590             :         }
     591             : 
     592           0 :         for_each_cpu(i, cpu_sibling_setup_mask) {
     593           0 :                 o = &cpu_data(i);
     594             : 
     595           0 :                 if ((i == cpu) || (has_smt && match_smt(c, o)))
     596           0 :                         link_mask(topology_sibling_cpumask, cpu, i);
     597             : 
     598           0 :                 if ((i == cpu) || (has_mp && match_llc(c, o)))
     599           0 :                         link_mask(cpu_llc_shared_mask, cpu, i);
     600             : 
     601             :         }
     602             : 
     603             :         /*
     604             :          * This needs a separate iteration over the cpus because we rely on all
     605             :          * topology_sibling_cpumask links to be set-up.
     606             :          */
     607           0 :         for_each_cpu(i, cpu_sibling_setup_mask) {
     608           0 :                 o = &cpu_data(i);
     609             : 
     610           0 :                 if ((i == cpu) || (has_mp && match_pkg(c, o))) {
     611           0 :                         link_mask(topology_core_cpumask, cpu, i);
     612             : 
     613             :                         /*
     614             :                          *  Does this new cpu bringup a new core?
     615             :                          */
     616           0 :                         if (cpumask_weight(
     617           0 :                             topology_sibling_cpumask(cpu)) == 1) {
     618             :                                 /*
     619             :                                  * for each core in package, increment
     620             :                                  * the booted_cores for this new cpu
     621             :                                  */
     622           0 :                                 if (cpumask_first(
     623           0 :                                     topology_sibling_cpumask(i)) == i)
     624           0 :                                         c->booted_cores++;
     625             :                                 /*
     626             :                                  * increment the core count for all
     627             :                                  * the other cpus in this package
     628             :                                  */
     629           0 :                                 if (i != cpu)
     630           0 :                                         cpu_data(i).booted_cores++;
     631           0 :                         } else if (i != cpu && !c->booted_cores)
     632           0 :                                 c->booted_cores = cpu_data(i).booted_cores;
     633             :                 }
     634           0 :                 if (match_pkg(c, o) && !topology_same_node(c, o))
     635           0 :                         x86_has_numa_in_package = true;
     636             : 
     637           0 :                 if ((i == cpu) || (has_mp && match_die(c, o)))
     638           0 :                         link_mask(topology_die_cpumask, cpu, i);
     639             :         }
     640             : 
     641           0 :         threads = cpumask_weight(topology_sibling_cpumask(cpu));
     642           0 :         if (threads > __max_smt_threads)
     643           0 :                 __max_smt_threads = threads;
     644             : }
     645             : 
     646             : /* maps the cpu to the sched domain representing multi-core */
     647          56 : const struct cpumask *cpu_coregroup_mask(int cpu)
     648             : {
     649          56 :         return cpu_llc_shared_mask(cpu);
     650             : }
     651             : 
     652           1 : static void impress_friends(void)
     653             : {
     654           1 :         int cpu;
     655           1 :         unsigned long bogosum = 0;
     656             :         /*
     657             :          * Allow the user to impress friends.
     658             :          */
     659           1 :         pr_debug("Before bogomips\n");
     660           5 :         for_each_possible_cpu(cpu)
     661           4 :                 if (cpumask_test_cpu(cpu, cpu_callout_mask))
     662           4 :                         bogosum += cpu_data(cpu).loops_per_jiffy;
     663           1 :         pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
     664             :                 num_online_cpus(),
     665             :                 bogosum/(500000/HZ),
     666             :                 (bogosum/(5000/HZ))%100);
     667             : 
     668           1 :         pr_debug("Before bogocount - setting activated=1\n");
     669           1 : }
     670             : 
     671           0 : void __inquire_remote_apic(int apicid)
     672             : {
     673           0 :         unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
     674           0 :         const char * const names[] = { "ID", "VERSION", "SPIV" };
     675           0 :         int timeout;
     676           0 :         u32 status;
     677             : 
     678           0 :         pr_info("Inquiring remote APIC 0x%x...\n", apicid);
     679             : 
     680           0 :         for (i = 0; i < ARRAY_SIZE(regs); i++) {
     681           0 :                 pr_info("... APIC 0x%x %s: ", apicid, names[i]);
     682             : 
     683             :                 /*
     684             :                  * Wait for idle.
     685             :                  */
     686           0 :                 status = safe_apic_wait_icr_idle();
     687           0 :                 if (status)
     688           0 :                         pr_cont("a previous APIC delivery may have failed\n");
     689             : 
     690           0 :                 apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
     691             : 
     692           0 :                 timeout = 0;
     693           0 :                 do {
     694           0 :                         udelay(100);
     695           0 :                         status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
     696           0 :                 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
     697             : 
     698           0 :                 switch (status) {
     699             :                 case APIC_ICR_RR_VALID:
     700           0 :                         status = apic_read(APIC_RRR);
     701           0 :                         pr_cont("%08x\n", status);
     702           0 :                         break;
     703           0 :                 default:
     704           0 :                         pr_cont("failed\n");
     705             :                 }
     706             :         }
     707           0 : }
     708             : 
     709             : /*
     710             :  * The Multiprocessor Specification 1.4 (1997) example code suggests
     711             :  * that there should be a 10ms delay between the BSP asserting INIT
     712             :  * and de-asserting INIT, when starting a remote processor.
     713             :  * But that slows boot and resume on modern processors, which include
     714             :  * many cores and don't require that delay.
     715             :  *
     716             :  * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
     717             :  * Modern processor families are quirked to remove the delay entirely.
     718             :  */
     719             : #define UDELAY_10MS_DEFAULT 10000
     720             : 
     721             : static unsigned int init_udelay = UINT_MAX;
     722             : 
     723           0 : static int __init cpu_init_udelay(char *str)
     724             : {
     725           0 :         get_option(&str, &init_udelay);
     726             : 
     727           0 :         return 0;
     728             : }
     729             : early_param("cpu_init_udelay", cpu_init_udelay);
     730             : 
     731           1 : static void __init smp_quirk_init_udelay(void)
     732             : {
     733             :         /* if cmdline changed it from default, leave it alone */
     734           1 :         if (init_udelay != UINT_MAX)
     735             :                 return;
     736             : 
     737             :         /* if modern processor, use no delay */
     738           1 :         if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
     739           0 :             ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
     740           0 :             ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
     741           1 :                 init_udelay = 0;
     742           1 :                 return;
     743             :         }
     744             :         /* else, use legacy delay */
     745           0 :         init_udelay = UDELAY_10MS_DEFAULT;
     746             : }
     747             : 
     748             : /*
     749             :  * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
     750             :  * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
     751             :  * won't ... remember to clear down the APIC, etc later.
     752             :  */
     753             : int
     754           0 : wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
     755             : {
     756           0 :         u32 dm = apic->dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
     757           0 :         unsigned long send_status, accept_status = 0;
     758           0 :         int maxlvt;
     759             : 
     760             :         /* Target chip */
     761             :         /* Boot on the stack */
     762             :         /* Kick the second */
     763           0 :         apic_icr_write(APIC_DM_NMI | dm, apicid);
     764             : 
     765           0 :         pr_debug("Waiting for send to finish...\n");
     766           0 :         send_status = safe_apic_wait_icr_idle();
     767             : 
     768             :         /*
     769             :          * Give the other CPU some time to accept the IPI.
     770             :          */
     771           0 :         udelay(200);
     772           0 :         if (APIC_INTEGRATED(boot_cpu_apic_version)) {
     773           0 :                 maxlvt = lapic_get_maxlvt();
     774           0 :                 if (maxlvt > 3)                      /* Due to the Pentium erratum 3AP.  */
     775           0 :                         apic_write(APIC_ESR, 0);
     776           0 :                 accept_status = (apic_read(APIC_ESR) & 0xEF);
     777             :         }
     778           0 :         pr_debug("NMI sent\n");
     779             : 
     780           0 :         if (send_status)
     781           0 :                 pr_err("APIC never delivered???\n");
     782           0 :         if (accept_status)
     783           0 :                 pr_err("APIC delivery error (%lx)\n", accept_status);
     784             : 
     785           0 :         return (send_status | accept_status);
     786             : }
     787             : 
     788             : static int
     789           3 : wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
     790             : {
     791           3 :         unsigned long send_status = 0, accept_status = 0;
     792           3 :         int maxlvt, num_starts, j;
     793             : 
     794           3 :         maxlvt = lapic_get_maxlvt();
     795             : 
     796             :         /*
     797             :          * Be paranoid about clearing APIC errors.
     798             :          */
     799           3 :         if (APIC_INTEGRATED(boot_cpu_apic_version)) {
     800           3 :                 if (maxlvt > 3)              /* Due to the Pentium erratum 3AP.  */
     801           3 :                         apic_write(APIC_ESR, 0);
     802           3 :                 apic_read(APIC_ESR);
     803             :         }
     804             : 
     805           3 :         pr_debug("Asserting INIT\n");
     806             : 
     807             :         /*
     808             :          * Turn INIT on target chip
     809             :          */
     810             :         /*
     811             :          * Send IPI
     812             :          */
     813           3 :         apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
     814             :                        phys_apicid);
     815             : 
     816           3 :         pr_debug("Waiting for send to finish...\n");
     817           3 :         send_status = safe_apic_wait_icr_idle();
     818             : 
     819           3 :         udelay(init_udelay);
     820             : 
     821           3 :         pr_debug("Deasserting INIT\n");
     822             : 
     823             :         /* Target chip */
     824             :         /* Send IPI */
     825           3 :         apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
     826             : 
     827           3 :         pr_debug("Waiting for send to finish...\n");
     828           3 :         send_status = safe_apic_wait_icr_idle();
     829             : 
     830           3 :         mb();
     831             : 
     832             :         /*
     833             :          * Should we send STARTUP IPIs ?
     834             :          *
     835             :          * Determine this based on the APIC version.
     836             :          * If we don't have an integrated APIC, don't send the STARTUP IPIs.
     837             :          */
     838           3 :         if (APIC_INTEGRATED(boot_cpu_apic_version))
     839           3 :                 num_starts = 2;
     840             :         else
     841             :                 num_starts = 0;
     842             : 
     843             :         /*
     844             :          * Run STARTUP IPI loop.
     845             :          */
     846           3 :         pr_debug("#startup loops: %d\n", num_starts);
     847             : 
     848          12 :         for (j = 1; j <= num_starts; j++) {
     849           6 :                 pr_debug("Sending STARTUP #%d\n", j);
     850           6 :                 if (maxlvt > 3)              /* Due to the Pentium erratum 3AP.  */
     851           6 :                         apic_write(APIC_ESR, 0);
     852           6 :                 apic_read(APIC_ESR);
     853           6 :                 pr_debug("After apic_write\n");
     854             : 
     855             :                 /*
     856             :                  * STARTUP IPI
     857             :                  */
     858             : 
     859             :                 /* Target chip */
     860             :                 /* Boot on the stack */
     861             :                 /* Kick the second */
     862           6 :                 apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
     863             :                                phys_apicid);
     864             : 
     865             :                 /*
     866             :                  * Give the other CPU some time to accept the IPI.
     867             :                  */
     868           6 :                 if (init_udelay == 0)
     869           6 :                         udelay(10);
     870             :                 else
     871           0 :                         udelay(300);
     872             : 
     873           6 :                 pr_debug("Startup point 1\n");
     874             : 
     875           6 :                 pr_debug("Waiting for send to finish...\n");
     876           6 :                 send_status = safe_apic_wait_icr_idle();
     877             : 
     878             :                 /*
     879             :                  * Give the other CPU some time to accept the IPI.
     880             :                  */
     881           6 :                 if (init_udelay == 0)
     882           6 :                         udelay(10);
     883             :                 else
     884           0 :                         udelay(200);
     885             : 
     886           6 :                 if (maxlvt > 3)              /* Due to the Pentium erratum 3AP.  */
     887           6 :                         apic_write(APIC_ESR, 0);
     888           6 :                 accept_status = (apic_read(APIC_ESR) & 0xEF);
     889           6 :                 if (send_status || accept_status)
     890             :                         break;
     891             :         }
     892           3 :         pr_debug("After Startup\n");
     893             : 
     894           3 :         if (send_status)
     895           0 :                 pr_err("APIC never delivered???\n");
     896           3 :         if (accept_status)
     897           0 :                 pr_err("APIC delivery error (%lx)\n", accept_status);
     898             : 
     899           3 :         return (send_status | accept_status);
     900             : }
     901             : 
     902             : /* reduce the number of lines printed when booting a large cpu count system */
     903           3 : static void announce_cpu(int cpu, int apicid)
     904             : {
     905           3 :         static int current_node = NUMA_NO_NODE;
     906           3 :         int node = early_cpu_to_node(cpu);
     907           3 :         static int width, node_width;
     908             : 
     909           3 :         if (!width)
     910           1 :                 width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
     911             : 
     912           3 :         if (!node_width)
     913           1 :                 node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
     914             : 
     915           3 :         if (cpu == 1)
     916           1 :                 printk(KERN_INFO "x86: Booting SMP configuration:\n");
     917             : 
     918           3 :         if (system_state < SYSTEM_RUNNING) {
     919           3 :                 if (node != current_node) {
     920           1 :                         if (current_node > (-1))
     921           0 :                                 pr_cont("\n");
     922           1 :                         current_node = node;
     923             : 
     924           1 :                         printk(KERN_INFO ".... node %*s#%d, CPUs:  ",
     925           1 :                                node_width - num_digits(node), " ", node);
     926             :                 }
     927             : 
     928             :                 /* Add padding for the BSP */
     929           3 :                 if (cpu == 1)
     930           1 :                         pr_cont("%*s", width + 1, " ");
     931             : 
     932           3 :                 pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
     933             : 
     934             :         } else
     935           0 :                 pr_info("Booting Node %d Processor %d APIC 0x%x\n",
     936             :                         node, cpu, apicid);
     937           3 : }
     938             : 
     939           0 : static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
     940             : {
     941           0 :         int cpu;
     942             : 
     943           0 :         cpu = smp_processor_id();
     944           0 :         if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
     945           0 :                 return NMI_HANDLED;
     946             : 
     947             :         return NMI_DONE;
     948             : }
     949             : 
     950             : /*
     951             :  * Wake up AP by INIT, INIT, STARTUP sequence.
     952             :  *
     953             :  * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS
     954             :  * boot-strap code which is not a desired behavior for waking up BSP. To
     955             :  * void the boot-strap code, wake up CPU0 by NMI instead.
     956             :  *
     957             :  * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined
     958             :  * (i.e. physically hot removed and then hot added), NMI won't wake it up.
     959             :  * We'll change this code in the future to wake up hard offlined CPU0 if
     960             :  * real platform and request are available.
     961             :  */
     962             : static int
     963           3 : wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
     964             :                int *cpu0_nmi_registered)
     965             : {
     966           3 :         int id;
     967           3 :         int boot_error;
     968             : 
     969           3 :         preempt_disable();
     970             : 
     971             :         /*
     972             :          * Wake up AP by INIT, INIT, STARTUP sequence.
     973             :          */
     974           3 :         if (cpu) {
     975           3 :                 boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
     976           3 :                 goto out;
     977             :         }
     978             : 
     979             :         /*
     980             :          * Wake up BSP by nmi.
     981             :          *
     982             :          * Register a NMI handler to help wake up CPU0.
     983             :          */
     984           0 :         boot_error = register_nmi_handler(NMI_LOCAL,
     985             :                                           wakeup_cpu0_nmi, 0, "wake_cpu0");
     986             : 
     987           0 :         if (!boot_error) {
     988           0 :                 enable_start_cpu0 = 1;
     989           0 :                 *cpu0_nmi_registered = 1;
     990           0 :                 id = apic->dest_mode_logical ? cpu0_logical_apicid : apicid;
     991           0 :                 boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
     992             :         }
     993             : 
     994           0 : out:
     995           3 :         preempt_enable();
     996             : 
     997           3 :         return boot_error;
     998             : }
     999             : 
    1000           3 : int common_cpu_up(unsigned int cpu, struct task_struct *idle)
    1001             : {
    1002           3 :         int ret;
    1003             : 
    1004             :         /* Just in case we booted with a single CPU. */
    1005           3 :         alternatives_enable_smp();
    1006             : 
    1007           3 :         per_cpu(current_task, cpu) = idle;
    1008           3 :         cpu_init_stack_canary(cpu, idle);
    1009             : 
    1010             :         /* Initialize the interrupt stack(s) */
    1011           3 :         ret = irq_init_percpu_irqstack(cpu);
    1012           3 :         if (ret)
    1013             :                 return ret;
    1014             : 
    1015             : #ifdef CONFIG_X86_32
    1016             :         /* Stack for startup_32 can be just as for start_secondary onwards */
    1017             :         per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
    1018             : #else
    1019           3 :         initial_gs = per_cpu_offset(cpu);
    1020             : #endif
    1021           3 :         return 0;
    1022             : }
    1023             : 
    1024             : /*
    1025             :  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
    1026             :  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
    1027             :  * Returns zero if CPU booted OK, else error code from
    1028             :  * ->wakeup_secondary_cpu.
    1029             :  */
    1030           3 : static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
    1031             :                        int *cpu0_nmi_registered)
    1032             : {
    1033             :         /* start_ip had better be page-aligned! */
    1034           3 :         unsigned long start_ip = real_mode_header->trampoline_start;
    1035             : 
    1036           3 :         unsigned long boot_error = 0;
    1037           3 :         unsigned long timeout;
    1038             : 
    1039           3 :         idle->thread.sp = (unsigned long)task_pt_regs(idle);
    1040           3 :         early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
    1041           3 :         initial_code = (unsigned long)start_secondary;
    1042           3 :         initial_stack  = idle->thread.sp;
    1043             : 
    1044             :         /* Enable the espfix hack for this CPU */
    1045           3 :         init_espfix_ap(cpu);
    1046             : 
    1047             :         /* So we see what's up */
    1048           3 :         announce_cpu(cpu, apicid);
    1049             : 
    1050             :         /*
    1051             :          * This grunge runs the startup process for
    1052             :          * the targeted processor.
    1053             :          */
    1054             : 
    1055           3 :         if (x86_platform.legacy.warm_reset) {
    1056             : 
    1057           3 :                 pr_debug("Setting warm reset code and vector.\n");
    1058             : 
    1059           3 :                 smpboot_setup_warm_reset_vector(start_ip);
    1060             :                 /*
    1061             :                  * Be paranoid about clearing APIC errors.
    1062             :                 */
    1063           3 :                 if (APIC_INTEGRATED(boot_cpu_apic_version)) {
    1064           3 :                         apic_write(APIC_ESR, 0);
    1065           3 :                         apic_read(APIC_ESR);
    1066             :                 }
    1067             :         }
    1068             : 
    1069             :         /*
    1070             :          * AP might wait on cpu_callout_mask in cpu_init() with
    1071             :          * cpu_initialized_mask set if previous attempt to online
    1072             :          * it timed-out. Clear cpu_initialized_mask so that after
    1073             :          * INIT/SIPI it could start with a clean state.
    1074             :          */
    1075           3 :         cpumask_clear_cpu(cpu, cpu_initialized_mask);
    1076           3 :         smp_mb();
    1077             : 
    1078             :         /*
    1079             :          * Wake up a CPU in difference cases:
    1080             :          * - Use the method in the APIC driver if it's defined
    1081             :          * Otherwise,
    1082             :          * - Use an INIT boot APIC message for APs or NMI for BSP.
    1083             :          */
    1084           3 :         if (apic->wakeup_secondary_cpu)
    1085           0 :                 boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
    1086             :         else
    1087           3 :                 boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
    1088             :                                                      cpu0_nmi_registered);
    1089             : 
    1090           3 :         if (!boot_error) {
    1091             :                 /*
    1092             :                  * Wait 10s total for first sign of life from AP
    1093             :                  */
    1094           3 :                 boot_error = -1;
    1095           3 :                 timeout = jiffies + 10*HZ;
    1096          13 :                 while (time_before(jiffies, timeout)) {
    1097          13 :                         if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
    1098             :                                 /*
    1099             :                                  * Tell AP to proceed with initialization
    1100             :                                  */
    1101           3 :                                 cpumask_set_cpu(cpu, cpu_callout_mask);
    1102           3 :                                 boot_error = 0;
    1103           3 :                                 break;
    1104             :                         }
    1105          10 :                         schedule();
    1106             :                 }
    1107             :         }
    1108             : 
    1109           3 :         if (!boot_error) {
    1110             :                 /*
    1111             :                  * Wait till AP completes initial initialization
    1112             :                  */
    1113         928 :                 while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
    1114             :                         /*
    1115             :                          * Allow other tasks to run while we wait for the
    1116             :                          * AP to come online. This also gives a chance
    1117             :                          * for the MTRR work(triggered by the AP coming online)
    1118             :                          * to be completed in the stop machine context.
    1119             :                          */
    1120         925 :                         schedule();
    1121             :                 }
    1122             :         }
    1123             : 
    1124           3 :         if (x86_platform.legacy.warm_reset) {
    1125             :                 /*
    1126             :                  * Cleanup possible dangling ends...
    1127             :                  */
    1128           3 :                 smpboot_restore_warm_reset_vector();
    1129             :         }
    1130             : 
    1131           3 :         return boot_error;
    1132             : }
    1133             : 
    1134           3 : int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
    1135             : {
    1136           3 :         int apicid = apic->cpu_present_to_apicid(cpu);
    1137           3 :         int cpu0_nmi_registered = 0;
    1138           3 :         unsigned long flags;
    1139           3 :         int err, ret = 0;
    1140             : 
    1141           6 :         lockdep_assert_irqs_enabled();
    1142             : 
    1143           3 :         pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
    1144             : 
    1145           6 :         if (apicid == BAD_APICID ||
    1146           6 :             !physid_isset(apicid, phys_cpu_present_map) ||
    1147           3 :             !apic->apic_id_valid(apicid)) {
    1148           0 :                 pr_err("%s: bad cpu %d\n", __func__, cpu);
    1149           0 :                 return -EINVAL;
    1150             :         }
    1151             : 
    1152             :         /*
    1153             :          * Already booted CPU?
    1154             :          */
    1155           3 :         if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
    1156             :                 pr_debug("do_boot_cpu %d Already started\n", cpu);
    1157             :                 return -ENOSYS;
    1158             :         }
    1159             : 
    1160             :         /*
    1161             :          * Save current MTRR state in case it was changed since early boot
    1162             :          * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
    1163             :          */
    1164           3 :         mtrr_save_state();
    1165             : 
    1166             :         /* x86 CPUs take themselves offline, so delayed offline is OK. */
    1167           3 :         err = cpu_check_up_prepare(cpu);
    1168           3 :         if (err && err != -EBUSY)
    1169             :                 return err;
    1170             : 
    1171             :         /* the FPU context is blank, nobody can own it */
    1172           3 :         per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
    1173             : 
    1174           3 :         err = common_cpu_up(cpu, tidle);
    1175           3 :         if (err)
    1176             :                 return err;
    1177             : 
    1178           3 :         err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
    1179           3 :         if (err) {
    1180           0 :                 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
    1181           0 :                 ret = -EIO;
    1182           0 :                 goto unreg_nmi;
    1183             :         }
    1184             : 
    1185             :         /*
    1186             :          * Check TSC synchronization with the AP (keep irqs disabled
    1187             :          * while doing so):
    1188             :          */
    1189           6 :         local_irq_save(flags);
    1190           3 :         check_tsc_sync_source(cpu);
    1191           3 :         local_irq_restore(flags);
    1192             : 
    1193           3 :         while (!cpu_online(cpu)) {
    1194           0 :                 cpu_relax();
    1195           0 :                 touch_nmi_watchdog();
    1196             :         }
    1197             : 
    1198           3 : unreg_nmi:
    1199             :         /*
    1200             :          * Clean up the nmi handler. Do this after the callin and callout sync
    1201             :          * to avoid impact of possible long unregister time.
    1202             :          */
    1203           3 :         if (cpu0_nmi_registered)
    1204           0 :                 unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
    1205             : 
    1206             :         return ret;
    1207             : }
    1208             : 
    1209             : /**
    1210             :  * arch_disable_smp_support() - disables SMP support for x86 at runtime
    1211             :  */
    1212           0 : void arch_disable_smp_support(void)
    1213             : {
    1214           0 :         disable_ioapic_support();
    1215           0 : }
    1216             : 
    1217             : /*
    1218             :  * Fall back to non SMP mode after errors.
    1219             :  *
    1220             :  * RED-PEN audit/test this more. I bet there is more state messed up here.
    1221             :  */
    1222           0 : static __init void disable_smp(void)
    1223             : {
    1224           0 :         pr_info("SMP disabled\n");
    1225             : 
    1226           0 :         disable_ioapic_support();
    1227             : 
    1228           0 :         init_cpu_present(cpumask_of(0));
    1229           0 :         init_cpu_possible(cpumask_of(0));
    1230             : 
    1231           0 :         if (smp_found_config)
    1232           0 :                 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
    1233             :         else
    1234           0 :                 physid_set_mask_of_physid(0, &phys_cpu_present_map);
    1235           0 :         cpumask_set_cpu(0, topology_sibling_cpumask(0));
    1236           0 :         cpumask_set_cpu(0, topology_core_cpumask(0));
    1237           0 :         cpumask_set_cpu(0, topology_die_cpumask(0));
    1238           0 : }
    1239             : 
    1240             : /*
    1241             :  * Various sanity checks.
    1242             :  */
    1243           1 : static void __init smp_sanity_check(void)
    1244             : {
    1245           1 :         preempt_disable();
    1246             : 
    1247             : #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
    1248             :         if (def_to_bigsmp && nr_cpu_ids > 8) {
    1249             :                 unsigned int cpu;
    1250             :                 unsigned nr;
    1251             : 
    1252             :                 pr_warn("More than 8 CPUs detected - skipping them\n"
    1253             :                         "Use CONFIG_X86_BIGSMP\n");
    1254             : 
    1255             :                 nr = 0;
    1256             :                 for_each_present_cpu(cpu) {
    1257             :                         if (nr >= 8)
    1258             :                                 set_cpu_present(cpu, false);
    1259             :                         nr++;
    1260             :                 }
    1261             : 
    1262             :                 nr = 0;
    1263             :                 for_each_possible_cpu(cpu) {
    1264             :                         if (nr >= 8)
    1265             :                                 set_cpu_possible(cpu, false);
    1266             :                         nr++;
    1267             :                 }
    1268             : 
    1269             :                 nr_cpu_ids = 8;
    1270             :         }
    1271             : #endif
    1272             : 
    1273           1 :         if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
    1274           0 :                 pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
    1275             :                         hard_smp_processor_id());
    1276             : 
    1277           0 :                 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
    1278             :         }
    1279             : 
    1280             :         /*
    1281             :          * Should not be necessary because the MP table should list the boot
    1282             :          * CPU too, but we do it for the sake of robustness anyway.
    1283             :          */
    1284           1 :         if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
    1285           0 :                 pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
    1286             :                           boot_cpu_physical_apicid);
    1287           0 :                 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
    1288             :         }
    1289           1 :         preempt_enable();
    1290           1 : }
    1291             : 
    1292           1 : static void __init smp_cpu_index_default(void)
    1293             : {
    1294           1 :         int i;
    1295           1 :         struct cpuinfo_x86 *c;
    1296             : 
    1297           5 :         for_each_possible_cpu(i) {
    1298           4 :                 c = &cpu_data(i);
    1299             :                 /* mark all to hotplug */
    1300           4 :                 c->cpu_index = nr_cpu_ids;
    1301             :         }
    1302           1 : }
    1303             : 
    1304           1 : static void __init smp_get_logical_apicid(void)
    1305             : {
    1306           1 :         if (x2apic_mode)
    1307             :                 cpu0_logical_apicid = apic_read(APIC_LDR);
    1308             :         else
    1309           1 :                 cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
    1310           1 : }
    1311             : 
    1312             : /*
    1313             :  * Prepare for SMP bootup.
    1314             :  * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
    1315             :  *            for common interface support.
    1316             :  */
    1317           1 : void __init native_smp_prepare_cpus(unsigned int max_cpus)
    1318             : {
    1319           1 :         unsigned int i;
    1320             : 
    1321           1 :         smp_cpu_index_default();
    1322             : 
    1323             :         /*
    1324             :          * Setup boot CPU information
    1325             :          */
    1326           1 :         smp_store_boot_cpu_info(); /* Final full version of the data */
    1327           1 :         cpumask_copy(cpu_callin_mask, cpumask_of(0));
    1328           1 :         mb();
    1329             : 
    1330           6 :         for_each_possible_cpu(i) {
    1331           4 :                 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
    1332           4 :                 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
    1333           4 :                 zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
    1334           5 :                 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
    1335             :         }
    1336             : 
    1337             :         /*
    1338             :          * Set 'default' x86 topology, this matches default_topology() in that
    1339             :          * it has NUMA nodes as a topology level. See also
    1340             :          * native_smp_cpus_done().
    1341             :          *
    1342             :          * Must be done before set_cpus_sibling_map() is ran.
    1343             :          */
    1344           1 :         set_sched_topology(x86_topology);
    1345             : 
    1346           1 :         set_cpu_sibling_map(0);
    1347           1 :         init_freq_invariance(false, false);
    1348           1 :         smp_sanity_check();
    1349             : 
    1350           1 :         switch (apic_intr_mode) {
    1351           0 :         case APIC_PIC:
    1352             :         case APIC_VIRTUAL_WIRE_NO_CONFIG:
    1353           0 :                 disable_smp();
    1354           0 :                 return;
    1355           0 :         case APIC_SYMMETRIC_IO_NO_ROUTING:
    1356           0 :                 disable_smp();
    1357             :                 /* Setup local timer */
    1358           0 :                 x86_init.timers.setup_percpu_clockev();
    1359           0 :                 return;
    1360             :         case APIC_VIRTUAL_WIRE:
    1361             :         case APIC_SYMMETRIC_IO:
    1362             :                 break;
    1363             :         }
    1364             : 
    1365             :         /* Setup local timer */
    1366           1 :         x86_init.timers.setup_percpu_clockev();
    1367             : 
    1368           1 :         smp_get_logical_apicid();
    1369             : 
    1370           1 :         pr_info("CPU0: ");
    1371           1 :         print_cpu_info(&cpu_data(0));
    1372             : 
    1373           1 :         uv_system_init();
    1374             : 
    1375           1 :         set_mtrr_aps_delayed_init();
    1376             : 
    1377           1 :         smp_quirk_init_udelay();
    1378             : 
    1379           1 :         speculative_store_bypass_ht_init();
    1380             : }
    1381             : 
    1382           0 : void arch_thaw_secondary_cpus_begin(void)
    1383             : {
    1384           0 :         set_mtrr_aps_delayed_init();
    1385           0 : }
    1386             : 
    1387           0 : void arch_thaw_secondary_cpus_end(void)
    1388             : {
    1389           0 :         mtrr_aps_init();
    1390           0 : }
    1391             : 
    1392             : /*
    1393             :  * Early setup to make printk work.
    1394             :  */
    1395           1 : void __init native_smp_prepare_boot_cpu(void)
    1396             : {
    1397           1 :         int me = smp_processor_id();
    1398           1 :         switch_to_new_gdt(me);
    1399             :         /* already set me in cpu_online_mask in boot_cpu_init() */
    1400           1 :         cpumask_set_cpu(me, cpu_callout_mask);
    1401           1 :         cpu_set_state_online(me);
    1402           1 :         native_pv_lock_init();
    1403           1 : }
    1404             : 
    1405           1 : void __init calculate_max_logical_packages(void)
    1406             : {
    1407           1 :         int ncpus;
    1408             : 
    1409             :         /*
    1410             :          * Today neither Intel nor AMD support heterogenous systems so
    1411             :          * extrapolate the boot cpu's data to all packages.
    1412             :          */
    1413           1 :         ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
    1414           1 :         __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
    1415           1 :         pr_info("Max logical packages: %u\n", __max_logical_packages);
    1416           1 : }
    1417             : 
    1418           1 : void __init native_smp_cpus_done(unsigned int max_cpus)
    1419             : {
    1420           1 :         pr_debug("Boot done\n");
    1421             : 
    1422           1 :         calculate_max_logical_packages();
    1423             : 
    1424           1 :         if (x86_has_numa_in_package)
    1425           0 :                 set_sched_topology(x86_numa_in_package_topology);
    1426             : 
    1427           1 :         nmi_selftest();
    1428           1 :         impress_friends();
    1429           1 :         mtrr_aps_init();
    1430           1 : }
    1431             : 
    1432             : static int __initdata setup_possible_cpus = -1;
    1433           0 : static int __init _setup_possible_cpus(char *str)
    1434             : {
    1435           0 :         get_option(&str, &setup_possible_cpus);
    1436           0 :         return 0;
    1437             : }
    1438             : early_param("possible_cpus", _setup_possible_cpus);
    1439             : 
    1440             : 
    1441             : /*
    1442             :  * cpu_possible_mask should be static, it cannot change as cpu's
    1443             :  * are onlined, or offlined. The reason is per-cpu data-structures
    1444             :  * are allocated by some modules at init time, and don't expect to
    1445             :  * do this dynamically on cpu arrival/departure.
    1446             :  * cpu_present_mask on the other hand can change dynamically.
    1447             :  * In case when cpu_hotplug is not compiled, then we resort to current
    1448             :  * behaviour, which is cpu_possible == cpu_present.
    1449             :  * - Ashok Raj
    1450             :  *
    1451             :  * Three ways to find out the number of additional hotplug CPUs:
    1452             :  * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
    1453             :  * - The user can overwrite it with possible_cpus=NUM
    1454             :  * - Otherwise don't reserve additional CPUs.
    1455             :  * We do this because additional CPUs waste a lot of memory.
    1456             :  * -AK
    1457             :  */
    1458           1 : __init void prefill_possible_map(void)
    1459             : {
    1460           1 :         int i, possible;
    1461             : 
    1462             :         /* No boot processor was found in mptable or ACPI MADT */
    1463           1 :         if (!num_processors) {
    1464           0 :                 if (boot_cpu_has(X86_FEATURE_APIC)) {
    1465           0 :                         int apicid = boot_cpu_physical_apicid;
    1466           0 :                         int cpu = hard_smp_processor_id();
    1467             : 
    1468           0 :                         pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
    1469             : 
    1470             :                         /* Make sure boot cpu is enumerated */
    1471           0 :                         if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
    1472           0 :                             apic->apic_id_valid(apicid))
    1473           0 :                                 generic_processor_info(apicid, boot_cpu_apic_version);
    1474             :                 }
    1475             : 
    1476           0 :                 if (!num_processors)
    1477           0 :                         num_processors = 1;
    1478             :         }
    1479             : 
    1480           1 :         i = setup_max_cpus ?: 1;
    1481           1 :         if (setup_possible_cpus == -1) {
    1482           1 :                 possible = num_processors;
    1483             : #ifdef CONFIG_HOTPLUG_CPU
    1484           1 :                 if (setup_max_cpus)
    1485           1 :                         possible += disabled_cpus;
    1486             : #else
    1487             :                 if (possible > i)
    1488             :                         possible = i;
    1489             : #endif
    1490             :         } else
    1491             :                 possible = setup_possible_cpus;
    1492             : 
    1493           1 :         total_cpus = max_t(int, possible, num_processors + disabled_cpus);
    1494             : 
    1495             :         /* nr_cpu_ids could be reduced via nr_cpus= */
    1496           1 :         if (possible > nr_cpu_ids) {
    1497           0 :                 pr_warn("%d Processors exceeds NR_CPUS limit of %u\n",
    1498             :                         possible, nr_cpu_ids);
    1499           0 :                 possible = nr_cpu_ids;
    1500             :         }
    1501             : 
    1502             : #ifdef CONFIG_HOTPLUG_CPU
    1503           1 :         if (!setup_max_cpus)
    1504             : #endif
    1505           0 :         if (possible > i) {
    1506           0 :                 pr_warn("%d Processors exceeds max_cpus limit of %u\n",
    1507             :                         possible, setup_max_cpus);
    1508           0 :                 possible = i;
    1509             :         }
    1510             : 
    1511           1 :         nr_cpu_ids = possible;
    1512             : 
    1513           1 :         pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
    1514             :                 possible, max_t(int, possible - num_processors, 0));
    1515             : 
    1516           1 :         reset_cpu_possible_mask();
    1517             : 
    1518           5 :         for (i = 0; i < possible; i++)
    1519           4 :                 set_cpu_possible(i, true);
    1520           1 : }
    1521             : 
    1522             : #ifdef CONFIG_HOTPLUG_CPU
    1523             : 
    1524             : /* Recompute SMT state for all CPUs on offline */
    1525           0 : static void recompute_smt_state(void)
    1526             : {
    1527           0 :         int max_threads, cpu;
    1528             : 
    1529           0 :         max_threads = 0;
    1530           0 :         for_each_online_cpu (cpu) {
    1531           0 :                 int threads = cpumask_weight(topology_sibling_cpumask(cpu));
    1532             : 
    1533           0 :                 if (threads > max_threads)
    1534             :                         max_threads = threads;
    1535             :         }
    1536           0 :         __max_smt_threads = max_threads;
    1537           0 : }
    1538             : 
    1539           0 : static void remove_siblinginfo(int cpu)
    1540             : {
    1541           0 :         int sibling;
    1542           0 :         struct cpuinfo_x86 *c = &cpu_data(cpu);
    1543             : 
    1544           0 :         for_each_cpu(sibling, topology_core_cpumask(cpu)) {
    1545           0 :                 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
    1546             :                 /*/
    1547             :                  * last thread sibling in this cpu core going down
    1548             :                  */
    1549           0 :                 if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
    1550           0 :                         cpu_data(sibling).booted_cores--;
    1551             :         }
    1552             : 
    1553           0 :         for_each_cpu(sibling, topology_die_cpumask(cpu))
    1554           0 :                 cpumask_clear_cpu(cpu, topology_die_cpumask(sibling));
    1555           0 :         for_each_cpu(sibling, topology_sibling_cpumask(cpu))
    1556           0 :                 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
    1557           0 :         for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
    1558           0 :                 cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
    1559           0 :         cpumask_clear(cpu_llc_shared_mask(cpu));
    1560           0 :         cpumask_clear(topology_sibling_cpumask(cpu));
    1561           0 :         cpumask_clear(topology_core_cpumask(cpu));
    1562           0 :         cpumask_clear(topology_die_cpumask(cpu));
    1563           0 :         c->cpu_core_id = 0;
    1564           0 :         c->booted_cores = 0;
    1565           0 :         cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
    1566           0 :         recompute_smt_state();
    1567           0 : }
    1568             : 
    1569           0 : static void remove_cpu_from_maps(int cpu)
    1570             : {
    1571           0 :         set_cpu_online(cpu, false);
    1572           0 :         cpumask_clear_cpu(cpu, cpu_callout_mask);
    1573           0 :         cpumask_clear_cpu(cpu, cpu_callin_mask);
    1574             :         /* was set by cpu_init() */
    1575           0 :         cpumask_clear_cpu(cpu, cpu_initialized_mask);
    1576           0 :         numa_remove_cpu(cpu);
    1577           0 : }
    1578             : 
    1579           0 : void cpu_disable_common(void)
    1580             : {
    1581           0 :         int cpu = smp_processor_id();
    1582             : 
    1583           0 :         remove_siblinginfo(cpu);
    1584             : 
    1585             :         /* It's now safe to remove this processor from the online map */
    1586           0 :         lock_vector_lock();
    1587           0 :         remove_cpu_from_maps(cpu);
    1588           0 :         unlock_vector_lock();
    1589           0 :         fixup_irqs();
    1590           0 :         lapic_offline();
    1591           0 : }
    1592             : 
    1593           0 : int native_cpu_disable(void)
    1594             : {
    1595           0 :         int ret;
    1596             : 
    1597           0 :         ret = lapic_can_unplug_cpu();
    1598           0 :         if (ret)
    1599             :                 return ret;
    1600             : 
    1601           0 :         cpu_disable_common();
    1602             : 
    1603             :         /*
    1604             :          * Disable the local APIC. Otherwise IPI broadcasts will reach
    1605             :          * it. It still responds normally to INIT, NMI, SMI, and SIPI
    1606             :          * messages.
    1607             :          *
    1608             :          * Disabling the APIC must happen after cpu_disable_common()
    1609             :          * which invokes fixup_irqs().
    1610             :          *
    1611             :          * Disabling the APIC preserves already set bits in IRR, but
    1612             :          * an interrupt arriving after disabling the local APIC does not
    1613             :          * set the corresponding IRR bit.
    1614             :          *
    1615             :          * fixup_irqs() scans IRR for set bits so it can raise a not
    1616             :          * yet handled interrupt on the new destination CPU via an IPI
    1617             :          * but obviously it can't do so for IRR bits which are not set.
    1618             :          * IOW, interrupts arriving after disabling the local APIC will
    1619             :          * be lost.
    1620             :          */
    1621           0 :         apic_soft_disable();
    1622             : 
    1623           0 :         return 0;
    1624             : }
    1625             : 
    1626           0 : int common_cpu_die(unsigned int cpu)
    1627             : {
    1628           0 :         int ret = 0;
    1629             : 
    1630             :         /* We don't do anything here: idle task is faking death itself. */
    1631             : 
    1632             :         /* They ack this in play_dead() by setting CPU_DEAD */
    1633           0 :         if (cpu_wait_death(cpu, 5)) {
    1634           0 :                 if (system_state == SYSTEM_RUNNING)
    1635           0 :                         pr_info("CPU %u is now offline\n", cpu);
    1636             :         } else {
    1637           0 :                 pr_err("CPU %u didn't die...\n", cpu);
    1638           0 :                 ret = -1;
    1639             :         }
    1640             : 
    1641           0 :         return ret;
    1642             : }
    1643             : 
    1644           0 : void native_cpu_die(unsigned int cpu)
    1645             : {
    1646           0 :         common_cpu_die(cpu);
    1647           0 : }
    1648             : 
    1649           0 : void play_dead_common(void)
    1650             : {
    1651           0 :         idle_task_exit();
    1652             : 
    1653             :         /* Ack it */
    1654           0 :         (void)cpu_report_death();
    1655             : 
    1656             :         /*
    1657             :          * With physical CPU hotplug, we should halt the cpu
    1658             :          */
    1659           0 :         local_irq_disable();
    1660           0 : }
    1661             : 
    1662           0 : static bool wakeup_cpu0(void)
    1663             : {
    1664           0 :         if (smp_processor_id() == 0 && enable_start_cpu0)
    1665           0 :                 return true;
    1666             : 
    1667             :         return false;
    1668             : }
    1669             : 
    1670             : /*
    1671             :  * We need to flush the caches before going to sleep, lest we have
    1672             :  * dirty data in our caches when we come back up.
    1673             :  */
    1674           0 : static inline void mwait_play_dead(void)
    1675             : {
    1676           0 :         unsigned int eax, ebx, ecx, edx;
    1677           0 :         unsigned int highest_cstate = 0;
    1678           0 :         unsigned int highest_subcstate = 0;
    1679           0 :         void *mwait_ptr;
    1680           0 :         int i;
    1681             : 
    1682           0 :         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
    1683             :             boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
    1684             :                 return;
    1685           0 :         if (!this_cpu_has(X86_FEATURE_MWAIT))
    1686             :                 return;
    1687           0 :         if (!this_cpu_has(X86_FEATURE_CLFLUSH))
    1688             :                 return;
    1689           0 :         if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
    1690             :                 return;
    1691             : 
    1692           0 :         eax = CPUID_MWAIT_LEAF;
    1693           0 :         ecx = 0;
    1694           0 :         native_cpuid(&eax, &ebx, &ecx, &edx);
    1695             : 
    1696             :         /*
    1697             :          * eax will be 0 if EDX enumeration is not valid.
    1698             :          * Initialized below to cstate, sub_cstate value when EDX is valid.
    1699             :          */
    1700           0 :         if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
    1701             :                 eax = 0;
    1702             :         } else {
    1703           0 :                 edx >>= MWAIT_SUBSTATE_SIZE;
    1704           0 :                 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
    1705           0 :                         if (edx & MWAIT_SUBSTATE_MASK) {
    1706           0 :                                 highest_cstate = i;
    1707           0 :                                 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
    1708             :                         }
    1709             :                 }
    1710           0 :                 eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
    1711           0 :                         (highest_subcstate - 1);
    1712             :         }
    1713             : 
    1714             :         /*
    1715             :          * This should be a memory location in a cache line which is
    1716             :          * unlikely to be touched by other processors.  The actual
    1717             :          * content is immaterial as it is not actually modified in any way.
    1718             :          */
    1719           0 :         mwait_ptr = &current_thread_info()->flags;
    1720             : 
    1721           0 :         wbinvd();
    1722             : 
    1723           0 :         while (1) {
    1724             :                 /*
    1725             :                  * The CLFLUSH is a workaround for erratum AAI65 for
    1726             :                  * the Xeon 7400 series.  It's not clear it is actually
    1727             :                  * needed, but it should be harmless in either case.
    1728             :                  * The WBINVD is insufficient due to the spurious-wakeup
    1729             :                  * case where we return around the loop.
    1730             :                  */
    1731           0 :                 mb();
    1732           0 :                 clflush(mwait_ptr);
    1733           0 :                 mb();
    1734           0 :                 __monitor(mwait_ptr, 0, 0);
    1735           0 :                 mb();
    1736           0 :                 __mwait(eax, 0);
    1737             :                 /*
    1738             :                  * If NMI wants to wake up CPU0, start CPU0.
    1739             :                  */
    1740           0 :                 if (wakeup_cpu0())
    1741           0 :                         start_cpu0();
    1742             :         }
    1743             : }
    1744             : 
    1745           0 : void hlt_play_dead(void)
    1746             : {
    1747           0 :         if (__this_cpu_read(cpu_info.x86) >= 4)
    1748           0 :                 wbinvd();
    1749             : 
    1750           0 :         while (1) {
    1751           0 :                 native_halt();
    1752             :                 /*
    1753             :                  * If NMI wants to wake up CPU0, start CPU0.
    1754             :                  */
    1755           0 :                 if (wakeup_cpu0())
    1756           0 :                         start_cpu0();
    1757             :         }
    1758             : }
    1759             : 
    1760           0 : void native_play_dead(void)
    1761             : {
    1762           0 :         play_dead_common();
    1763           0 :         tboot_shutdown(TB_SHUTDOWN_WFS);
    1764             : 
    1765           0 :         mwait_play_dead();      /* Only returns on failure */
    1766           0 :         if (cpuidle_play_dead())
    1767           0 :                 hlt_play_dead();
    1768             : }
    1769             : 
    1770             : #else /* ... !CONFIG_HOTPLUG_CPU */
    1771             : int native_cpu_disable(void)
    1772             : {
    1773             :         return -ENOSYS;
    1774             : }
    1775             : 
    1776             : void native_cpu_die(unsigned int cpu)
    1777             : {
    1778             :         /* We said "no" in __cpu_disable */
    1779             :         BUG();
    1780             : }
    1781             : 
    1782             : void native_play_dead(void)
    1783             : {
    1784             :         BUG();
    1785             : }
    1786             : 
    1787             : #endif
    1788             : 
    1789             : #ifdef CONFIG_X86_64
    1790             : /*
    1791             :  * APERF/MPERF frequency ratio computation.
    1792             :  *
    1793             :  * The scheduler wants to do frequency invariant accounting and needs a <1
    1794             :  * ratio to account for the 'current' frequency, corresponding to
    1795             :  * freq_curr / freq_max.
    1796             :  *
    1797             :  * Since the frequency freq_curr on x86 is controlled by micro-controller and
    1798             :  * our P-state setting is little more than a request/hint, we need to observe
    1799             :  * the effective frequency 'BusyMHz', i.e. the average frequency over a time
    1800             :  * interval after discarding idle time. This is given by:
    1801             :  *
    1802             :  *   BusyMHz = delta_APERF / delta_MPERF * freq_base
    1803             :  *
    1804             :  * where freq_base is the max non-turbo P-state.
    1805             :  *
    1806             :  * The freq_max term has to be set to a somewhat arbitrary value, because we
    1807             :  * can't know which turbo states will be available at a given point in time:
    1808             :  * it all depends on the thermal headroom of the entire package. We set it to
    1809             :  * the turbo level with 4 cores active.
    1810             :  *
    1811             :  * Benchmarks show that's a good compromise between the 1C turbo ratio
    1812             :  * (freq_curr/freq_max would rarely reach 1) and something close to freq_base,
    1813             :  * which would ignore the entire turbo range (a conspicuous part, making
    1814             :  * freq_curr/freq_max always maxed out).
    1815             :  *
    1816             :  * An exception to the heuristic above is the Atom uarch, where we choose the
    1817             :  * highest turbo level for freq_max since Atom's are generally oriented towards
    1818             :  * power efficiency.
    1819             :  *
    1820             :  * Setting freq_max to anything less than the 1C turbo ratio makes the ratio
    1821             :  * freq_curr / freq_max to eventually grow >1, in which case we clip it to 1.
    1822             :  */
    1823             : 
    1824             : DEFINE_STATIC_KEY_FALSE(arch_scale_freq_key);
    1825             : 
    1826             : static DEFINE_PER_CPU(u64, arch_prev_aperf);
    1827             : static DEFINE_PER_CPU(u64, arch_prev_mperf);
    1828             : static u64 arch_turbo_freq_ratio = SCHED_CAPACITY_SCALE;
    1829             : static u64 arch_max_freq_ratio = SCHED_CAPACITY_SCALE;
    1830             : 
    1831           0 : void arch_set_max_freq_ratio(bool turbo_disabled)
    1832             : {
    1833           0 :         arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
    1834             :                                         arch_turbo_freq_ratio;
    1835           0 : }
    1836             : EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
    1837             : 
    1838           0 : static bool turbo_disabled(void)
    1839             : {
    1840           0 :         u64 misc_en;
    1841           0 :         int err;
    1842             : 
    1843           0 :         err = rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_en);
    1844           0 :         if (err)
    1845             :                 return false;
    1846             : 
    1847           0 :         return (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
    1848             : }
    1849             : 
    1850           0 : static bool slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
    1851             : {
    1852           0 :         int err;
    1853             : 
    1854           0 :         err = rdmsrl_safe(MSR_ATOM_CORE_RATIOS, base_freq);
    1855           0 :         if (err)
    1856             :                 return false;
    1857             : 
    1858           0 :         err = rdmsrl_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq);
    1859           0 :         if (err)
    1860             :                 return false;
    1861             : 
    1862           0 :         *base_freq = (*base_freq >> 16) & 0x3F;     /* max P state */
    1863           0 :         *turbo_freq = *turbo_freq & 0x3F;           /* 1C turbo    */
    1864             : 
    1865           0 :         return true;
    1866             : }
    1867             : 
    1868             : #include <asm/cpu_device_id.h>
    1869             : #include <asm/intel-family.h>
    1870             : 
    1871             : #define X86_MATCH(model)                                        \
    1872             :         X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6,            \
    1873             :                 INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL)
    1874             : 
    1875             : static const struct x86_cpu_id has_knl_turbo_ratio_limits[] = {
    1876             :         X86_MATCH(XEON_PHI_KNL),
    1877             :         X86_MATCH(XEON_PHI_KNM),
    1878             :         {}
    1879             : };
    1880             : 
    1881             : static const struct x86_cpu_id has_skx_turbo_ratio_limits[] = {
    1882             :         X86_MATCH(SKYLAKE_X),
    1883             :         {}
    1884             : };
    1885             : 
    1886             : static const struct x86_cpu_id has_glm_turbo_ratio_limits[] = {
    1887             :         X86_MATCH(ATOM_GOLDMONT),
    1888             :         X86_MATCH(ATOM_GOLDMONT_D),
    1889             :         X86_MATCH(ATOM_GOLDMONT_PLUS),
    1890             :         {}
    1891             : };
    1892             : 
    1893           0 : static bool knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq,
    1894             :                                 int num_delta_fratio)
    1895             : {
    1896           0 :         int fratio, delta_fratio, found;
    1897           0 :         int err, i;
    1898           0 :         u64 msr;
    1899             : 
    1900           0 :         err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
    1901           0 :         if (err)
    1902             :                 return false;
    1903             : 
    1904           0 :         *base_freq = (*base_freq >> 8) & 0xFF;            /* max P state */
    1905             : 
    1906           0 :         err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
    1907           0 :         if (err)
    1908             :                 return false;
    1909             : 
    1910           0 :         fratio = (msr >> 8) & 0xFF;
    1911           0 :         i = 16;
    1912           0 :         found = 0;
    1913           0 :         do {
    1914           0 :                 if (found >= num_delta_fratio) {
    1915           0 :                         *turbo_freq = fratio;
    1916           0 :                         return true;
    1917             :                 }
    1918             : 
    1919           0 :                 delta_fratio = (msr >> (i + 5)) & 0x7;
    1920             : 
    1921           0 :                 if (delta_fratio) {
    1922           0 :                         found += 1;
    1923           0 :                         fratio -= delta_fratio;
    1924             :                 }
    1925             : 
    1926           0 :                 i += 8;
    1927           0 :         } while (i < 64);
    1928             : 
    1929             :         return true;
    1930             : }
    1931             : 
    1932           0 : static bool skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size)
    1933             : {
    1934           0 :         u64 ratios, counts;
    1935           0 :         u32 group_size;
    1936           0 :         int err, i;
    1937             : 
    1938           0 :         err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
    1939           0 :         if (err)
    1940             :                 return false;
    1941             : 
    1942           0 :         *base_freq = (*base_freq >> 8) & 0xFF;      /* max P state */
    1943             : 
    1944           0 :         err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &ratios);
    1945           0 :         if (err)
    1946             :                 return false;
    1947             : 
    1948           0 :         err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT1, &counts);
    1949           0 :         if (err)
    1950             :                 return false;
    1951             : 
    1952           0 :         for (i = 0; i < 64; i += 8) {
    1953           0 :                 group_size = (counts >> i) & 0xFF;
    1954           0 :                 if (group_size >= size) {
    1955           0 :                         *turbo_freq = (ratios >> i) & 0xFF;
    1956           0 :                         return true;
    1957             :                 }
    1958             :         }
    1959             : 
    1960             :         return false;
    1961             : }
    1962             : 
    1963           0 : static bool core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
    1964             : {
    1965           0 :         u64 msr;
    1966           0 :         int err;
    1967             : 
    1968           0 :         err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
    1969           0 :         if (err)
    1970             :                 return false;
    1971             : 
    1972           0 :         err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
    1973           0 :         if (err)
    1974             :                 return false;
    1975             : 
    1976           0 :         *base_freq = (*base_freq >> 8) & 0xFF;    /* max P state */
    1977           0 :         *turbo_freq = (msr >> 24) & 0xFF;         /* 4C turbo    */
    1978             : 
    1979             :         /* The CPU may have less than 4 cores */
    1980           0 :         if (!*turbo_freq)
    1981           0 :                 *turbo_freq = msr & 0xFF;         /* 1C turbo    */
    1982             : 
    1983             :         return true;
    1984             : }
    1985             : 
    1986           0 : static bool intel_set_max_freq_ratio(void)
    1987             : {
    1988           0 :         u64 base_freq, turbo_freq;
    1989           0 :         u64 turbo_ratio;
    1990             : 
    1991           0 :         if (slv_set_max_freq_ratio(&base_freq, &turbo_freq))
    1992           0 :                 goto out;
    1993             : 
    1994           0 :         if (x86_match_cpu(has_glm_turbo_ratio_limits) &&
    1995           0 :             skx_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
    1996           0 :                 goto out;
    1997             : 
    1998           0 :         if (x86_match_cpu(has_knl_turbo_ratio_limits) &&
    1999           0 :             knl_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
    2000           0 :                 goto out;
    2001             : 
    2002           0 :         if (x86_match_cpu(has_skx_turbo_ratio_limits) &&
    2003           0 :             skx_set_max_freq_ratio(&base_freq, &turbo_freq, 4))
    2004           0 :                 goto out;
    2005             : 
    2006           0 :         if (core_set_max_freq_ratio(&base_freq, &turbo_freq))
    2007           0 :                 goto out;
    2008             : 
    2009             :         return false;
    2010             : 
    2011           0 : out:
    2012             :         /*
    2013             :          * Some hypervisors advertise X86_FEATURE_APERFMPERF
    2014             :          * but then fill all MSR's with zeroes.
    2015             :          * Some CPUs have turbo boost but don't declare any turbo ratio
    2016             :          * in MSR_TURBO_RATIO_LIMIT.
    2017             :          */
    2018           0 :         if (!base_freq || !turbo_freq) {
    2019             :                 pr_debug("Couldn't determine cpu base or turbo frequency, necessary for scale-invariant accounting.\n");
    2020             :                 return false;
    2021             :         }
    2022             : 
    2023           0 :         turbo_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq);
    2024           0 :         if (!turbo_ratio) {
    2025             :                 pr_debug("Non-zero turbo and base frequencies led to a 0 ratio.\n");
    2026             :                 return false;
    2027             :         }
    2028             : 
    2029           0 :         arch_turbo_freq_ratio = turbo_ratio;
    2030           0 :         arch_set_max_freq_ratio(turbo_disabled());
    2031             : 
    2032           0 :         return true;
    2033             : }
    2034             : 
    2035             : #ifdef CONFIG_ACPI_CPPC_LIB
    2036             : static bool amd_set_max_freq_ratio(void)
    2037             : {
    2038             :         struct cppc_perf_caps perf_caps;
    2039             :         u64 highest_perf, nominal_perf;
    2040             :         u64 perf_ratio;
    2041             :         int rc;
    2042             : 
    2043             :         rc = cppc_get_perf_caps(0, &perf_caps);
    2044             :         if (rc) {
    2045             :                 pr_debug("Could not retrieve perf counters (%d)\n", rc);
    2046             :                 return false;
    2047             :         }
    2048             : 
    2049             :         highest_perf = perf_caps.highest_perf;
    2050             :         nominal_perf = perf_caps.nominal_perf;
    2051             : 
    2052             :         if (!highest_perf || !nominal_perf) {
    2053             :                 pr_debug("Could not retrieve highest or nominal performance\n");
    2054             :                 return false;
    2055             :         }
    2056             : 
    2057             :         perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
    2058             :         /* midpoint between max_boost and max_P */
    2059             :         perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
    2060             :         if (!perf_ratio) {
    2061             :                 pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
    2062             :                 return false;
    2063             :         }
    2064             : 
    2065             :         arch_turbo_freq_ratio = perf_ratio;
    2066             :         arch_set_max_freq_ratio(false);
    2067             : 
    2068             :         return true;
    2069             : }
    2070             : #else
    2071             : static bool amd_set_max_freq_ratio(void)
    2072             : {
    2073             :         return false;
    2074             : }
    2075             : #endif
    2076             : 
    2077           0 : static void init_counter_refs(void)
    2078             : {
    2079           0 :         u64 aperf, mperf;
    2080             : 
    2081           0 :         rdmsrl(MSR_IA32_APERF, aperf);
    2082           0 :         rdmsrl(MSR_IA32_MPERF, mperf);
    2083             : 
    2084           0 :         this_cpu_write(arch_prev_aperf, aperf);
    2085           0 :         this_cpu_write(arch_prev_mperf, mperf);
    2086           0 : }
    2087             : 
    2088             : #ifdef CONFIG_PM_SLEEP
    2089             : static struct syscore_ops freq_invariance_syscore_ops = {
    2090             :         .resume = init_counter_refs,
    2091             : };
    2092             : 
    2093             : static void register_freq_invariance_syscore_ops(void)
    2094             : {
    2095             :         /* Bail out if registered already. */
    2096             :         if (freq_invariance_syscore_ops.node.prev)
    2097             :                 return;
    2098             : 
    2099             :         register_syscore_ops(&freq_invariance_syscore_ops);
    2100             : }
    2101             : #else
    2102           0 : static inline void register_freq_invariance_syscore_ops(void) {}
    2103             : #endif
    2104             : 
    2105           4 : static void init_freq_invariance(bool secondary, bool cppc_ready)
    2106             : {
    2107           4 :         bool ret = false;
    2108             : 
    2109           4 :         if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
    2110             :                 return;
    2111             : 
    2112           0 :         if (secondary) {
    2113           0 :                 if (static_branch_likely(&arch_scale_freq_key)) {
    2114           0 :                         init_counter_refs();
    2115             :                 }
    2116           0 :                 return;
    2117             :         }
    2118             : 
    2119           0 :         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
    2120           0 :                 ret = intel_set_max_freq_ratio();
    2121             :         else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
    2122             :                 if (!cppc_ready) {
    2123             :                         return;
    2124             :                 }
    2125             :                 ret = amd_set_max_freq_ratio();
    2126             :         }
    2127             : 
    2128           0 :         if (ret) {
    2129           0 :                 init_counter_refs();
    2130           0 :                 static_branch_enable(&arch_scale_freq_key);
    2131           0 :                 register_freq_invariance_syscore_ops();
    2132           0 :                 pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
    2133             :         } else {
    2134             :                 pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
    2135             :         }
    2136             : }
    2137             : 
    2138             : #ifdef CONFIG_ACPI_CPPC_LIB
    2139             : static DEFINE_MUTEX(freq_invariance_lock);
    2140             : 
    2141             : void init_freq_invariance_cppc(void)
    2142             : {
    2143             :         static bool secondary;
    2144             : 
    2145             :         mutex_lock(&freq_invariance_lock);
    2146             : 
    2147             :         init_freq_invariance(secondary, true);
    2148             :         secondary = true;
    2149             : 
    2150             :         mutex_unlock(&freq_invariance_lock);
    2151             : }
    2152             : #endif
    2153             : 
    2154           0 : static void disable_freq_invariance_workfn(struct work_struct *work)
    2155             : {
    2156           0 :         static_branch_disable(&arch_scale_freq_key);
    2157           0 : }
    2158             : 
    2159             : static DECLARE_WORK(disable_freq_invariance_work,
    2160             :                     disable_freq_invariance_workfn);
    2161             : 
    2162             : DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
    2163             : 
    2164       75204 : void arch_scale_freq_tick(void)
    2165             : {
    2166       75204 :         u64 freq_scale = SCHED_CAPACITY_SCALE;
    2167       75204 :         u64 aperf, mperf;
    2168       75204 :         u64 acnt, mcnt;
    2169             : 
    2170       75204 :         if (!arch_scale_freq_invariant())
    2171       75142 :                 return;
    2172             : 
    2173           0 :         rdmsrl(MSR_IA32_APERF, aperf);
    2174           0 :         rdmsrl(MSR_IA32_MPERF, mperf);
    2175             : 
    2176           0 :         acnt = aperf - this_cpu_read(arch_prev_aperf);
    2177           0 :         mcnt = mperf - this_cpu_read(arch_prev_mperf);
    2178             : 
    2179           0 :         this_cpu_write(arch_prev_aperf, aperf);
    2180           0 :         this_cpu_write(arch_prev_mperf, mperf);
    2181             : 
    2182           0 :         if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
    2183           0 :                 goto error;
    2184             : 
    2185           0 :         if (check_mul_overflow(mcnt, arch_max_freq_ratio, &mcnt) || !mcnt)
    2186           0 :                 goto error;
    2187             : 
    2188           0 :         freq_scale = div64_u64(acnt, mcnt);
    2189           0 :         if (!freq_scale)
    2190           0 :                 goto error;
    2191             : 
    2192           0 :         if (freq_scale > SCHED_CAPACITY_SCALE)
    2193             :                 freq_scale = SCHED_CAPACITY_SCALE;
    2194             : 
    2195           0 :         this_cpu_write(arch_freq_scale, freq_scale);
    2196             :         return;
    2197             : 
    2198           0 : error:
    2199           0 :         pr_warn("Scheduler frequency invariance went wobbly, disabling!\n");
    2200           0 :         schedule_work(&disable_freq_invariance_work);
    2201             : }
    2202             : #else
    2203             : static inline void init_freq_invariance(bool secondary, bool cppc_ready)
    2204             : {
    2205             : }
    2206             : #endif /* CONFIG_X86_64 */

Generated by: LCOV version 1.14