LCOV - code coverage report
Current view: top level - arch/x86/kernel/apic - ipi.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 38 94 40.4 %
Date: 2021-04-22 12:43:58 Functions: 8 16 50.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : 
       3             : #include <linux/cpumask.h>
       4             : #include <linux/smp.h>
       5             : #include <asm/io_apic.h>
       6             : 
       7             : #include "local.h"
       8             : 
       9             : DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
      10             : 
      11             : #ifdef CONFIG_SMP
      12             : static int apic_ipi_shorthand_off __ro_after_init;
      13             : 
      14           0 : static __init int apic_ipi_shorthand(char *str)
      15             : {
      16           0 :         get_option(&str, &apic_ipi_shorthand_off);
      17           0 :         return 1;
      18             : }
      19             : __setup("no_ipi_broadcast=", apic_ipi_shorthand);
      20             : 
      21           1 : static int __init print_ipi_mode(void)
      22             : {
      23           2 :         pr_info("IPI shorthand broadcast: %s\n",
      24             :                 apic_ipi_shorthand_off ? "disabled" : "enabled");
      25           1 :         return 0;
      26             : }
      27             : late_initcall(print_ipi_mode);
      28             : 
      29           4 : void apic_smt_update(void)
      30             : {
      31             :         /*
      32             :          * Do not switch to broadcast mode if:
      33             :          * - Disabled on the command line
      34             :          * - Only a single CPU is online
      35             :          * - Not all present CPUs have been at least booted once
      36             :          *
      37             :          * The latter is important as the local APIC might be in some
      38             :          * random state and a broadcast might cause havoc. That's
      39             :          * especially true for NMI broadcasting.
      40             :          */
      41           8 :         if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
      42           3 :             !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
      43           3 :                 static_branch_disable(&apic_use_ipi_shorthand);
      44             :         } else {
      45           1 :                 static_branch_enable(&apic_use_ipi_shorthand);
      46             :         }
      47           4 : }
      48             : 
      49           0 : void apic_send_IPI_allbutself(unsigned int vector)
      50             : {
      51           0 :         if (num_online_cpus() < 2)
      52             :                 return;
      53             : 
      54           0 :         if (static_branch_likely(&apic_use_ipi_shorthand))
      55           0 :                 apic->send_IPI_allbutself(vector);
      56             :         else
      57           0 :                 apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
      58             : }
      59             : 
      60             : /*
      61             :  * Send a 'reschedule' IPI to another CPU. It goes straight through and
      62             :  * wastes no time serializing anything. Worst case is that we lose a
      63             :  * reschedule ...
      64             :  */
      65        2541 : void native_smp_send_reschedule(int cpu)
      66             : {
      67        2541 :         if (unlikely(cpu_is_offline(cpu))) {
      68           0 :                 WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
      69           0 :                 return;
      70             :         }
      71        2541 :         apic->send_IPI(cpu, RESCHEDULE_VECTOR);
      72             : }
      73             : 
      74       11815 : void native_send_call_func_single_ipi(int cpu)
      75             : {
      76       11815 :         apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
      77       11816 : }
      78             : 
      79          62 : void native_send_call_func_ipi(const struct cpumask *mask)
      80             : {
      81          62 :         if (static_branch_likely(&apic_use_ipi_shorthand)) {
      82          62 :                 unsigned int cpu = smp_processor_id();
      83             : 
      84          62 :                 if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
      85          16 :                         goto sendmask;
      86             : 
      87          46 :                 if (cpumask_test_cpu(cpu, mask))
      88           0 :                         apic->send_IPI_all(CALL_FUNCTION_VECTOR);
      89          46 :                 else if (num_online_cpus() > 1)
      90          46 :                         apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
      91          46 :                 return;
      92             :         }
      93             : 
      94           0 : sendmask:
      95          16 :         apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
      96             : }
      97             : 
      98             : #endif /* CONFIG_SMP */
      99             : 
     100           0 : static inline int __prepare_ICR2(unsigned int mask)
     101             : {
     102           0 :         return SET_APIC_DEST_FIELD(mask);
     103             : }
     104             : 
     105             : static inline void __xapic_wait_icr_idle(void)
     106             : {
     107          46 :         while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
     108           0 :                 cpu_relax();
     109             : }
     110             : 
     111          46 : void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
     112             : {
     113             :         /*
     114             :          * Subtle. In the case of the 'never do double writes' workaround
     115             :          * we have to lock out interrupts to be safe.  As we don't care
     116             :          * of the value read we use an atomic rmw access to avoid costly
     117             :          * cli/sti.  Otherwise we use an even cheaper single atomic write
     118             :          * to the APIC.
     119             :          */
     120          46 :         unsigned int cfg;
     121             : 
     122             :         /*
     123             :          * Wait for idle.
     124             :          */
     125          46 :         if (unlikely(vector == NMI_VECTOR))
     126           0 :                 safe_apic_wait_icr_idle();
     127             :         else
     128             :                 __xapic_wait_icr_idle();
     129             : 
     130             :         /*
     131             :          * No need to touch the target chip field. Also the destination
     132             :          * mode is ignored when a shorthand is used.
     133             :          */
     134          46 :         cfg = __prepare_ICR(shortcut, vector, 0);
     135             : 
     136             :         /*
     137             :          * Send the IPI. The write to APIC_ICR fires this off.
     138             :          */
     139          46 :         native_apic_mem_write(APIC_ICR, cfg);
     140          46 : }
     141             : 
     142             : /*
     143             :  * This is used to send an IPI with no shorthand notation (the destination is
     144             :  * specified in bits 56 to 63 of the ICR).
     145             :  */
     146           0 : void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
     147             : {
     148           0 :         unsigned long cfg;
     149             : 
     150             :         /*
     151             :          * Wait for idle.
     152             :          */
     153           0 :         if (unlikely(vector == NMI_VECTOR))
     154           0 :                 safe_apic_wait_icr_idle();
     155             :         else
     156             :                 __xapic_wait_icr_idle();
     157             : 
     158             :         /*
     159             :          * prepare target chip field
     160             :          */
     161           0 :         cfg = __prepare_ICR2(mask);
     162           0 :         native_apic_mem_write(APIC_ICR2, cfg);
     163             : 
     164             :         /*
     165             :          * program the ICR
     166             :          */
     167           0 :         cfg = __prepare_ICR(0, vector, dest);
     168             : 
     169             :         /*
     170             :          * Send the IPI. The write to APIC_ICR fires this off.
     171             :          */
     172           0 :         native_apic_mem_write(APIC_ICR, cfg);
     173           0 : }
     174             : 
     175           0 : void default_send_IPI_single_phys(int cpu, int vector)
     176             : {
     177           0 :         unsigned long flags;
     178             : 
     179           0 :         local_irq_save(flags);
     180           0 :         __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
     181             :                                       vector, APIC_DEST_PHYSICAL);
     182           0 :         local_irq_restore(flags);
     183           0 : }
     184             : 
     185           0 : void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
     186             : {
     187           0 :         unsigned long query_cpu;
     188           0 :         unsigned long flags;
     189             : 
     190             :         /*
     191             :          * Hack. The clustered APIC addressing mode doesn't allow us to send
     192             :          * to an arbitrary mask, so I do a unicast to each CPU instead.
     193             :          * - mbligh
     194             :          */
     195           0 :         local_irq_save(flags);
     196           0 :         for_each_cpu(query_cpu, mask) {
     197           0 :                 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
     198             :                                 query_cpu), vector, APIC_DEST_PHYSICAL);
     199             :         }
     200           0 :         local_irq_restore(flags);
     201           0 : }
     202             : 
     203           0 : void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
     204             :                                                  int vector)
     205             : {
     206           0 :         unsigned int this_cpu = smp_processor_id();
     207           0 :         unsigned int query_cpu;
     208           0 :         unsigned long flags;
     209             : 
     210             :         /* See Hack comment above */
     211             : 
     212           0 :         local_irq_save(flags);
     213           0 :         for_each_cpu(query_cpu, mask) {
     214           0 :                 if (query_cpu == this_cpu)
     215           0 :                         continue;
     216           0 :                 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
     217             :                                  query_cpu), vector, APIC_DEST_PHYSICAL);
     218             :         }
     219           0 :         local_irq_restore(flags);
     220           0 : }
     221             : 
     222             : /*
     223             :  * Helper function for APICs which insist on cpumasks
     224             :  */
     225       14356 : void default_send_IPI_single(int cpu, int vector)
     226             : {
     227       14356 :         apic->send_IPI_mask(cpumask_of(cpu), vector);
     228       14357 : }
     229             : 
     230          46 : void default_send_IPI_allbutself(int vector)
     231             : {
     232          46 :         __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
     233          46 : }
     234             : 
     235           0 : void default_send_IPI_all(int vector)
     236             : {
     237           0 :         __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
     238           0 : }
     239             : 
     240           0 : void default_send_IPI_self(int vector)
     241             : {
     242           0 :         __default_send_IPI_shortcut(APIC_DEST_SELF, vector);
     243           0 : }
     244             : 
     245             : #ifdef CONFIG_X86_32
     246             : 
     247             : void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
     248             :                                                  int vector)
     249             : {
     250             :         unsigned long flags;
     251             :         unsigned int query_cpu;
     252             : 
     253             :         /*
     254             :          * Hack. The clustered APIC addressing mode doesn't allow us to send
     255             :          * to an arbitrary mask, so I do a unicasts to each CPU instead. This
     256             :          * should be modified to do 1 message per cluster ID - mbligh
     257             :          */
     258             : 
     259             :         local_irq_save(flags);
     260             :         for_each_cpu(query_cpu, mask)
     261             :                 __default_send_IPI_dest_field(
     262             :                         early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
     263             :                         vector, APIC_DEST_LOGICAL);
     264             :         local_irq_restore(flags);
     265             : }
     266             : 
     267             : void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
     268             :                                                  int vector)
     269             : {
     270             :         unsigned long flags;
     271             :         unsigned int query_cpu;
     272             :         unsigned int this_cpu = smp_processor_id();
     273             : 
     274             :         /* See Hack comment above */
     275             : 
     276             :         local_irq_save(flags);
     277             :         for_each_cpu(query_cpu, mask) {
     278             :                 if (query_cpu == this_cpu)
     279             :                         continue;
     280             :                 __default_send_IPI_dest_field(
     281             :                         early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
     282             :                         vector, APIC_DEST_LOGICAL);
     283             :                 }
     284             :         local_irq_restore(flags);
     285             : }
     286             : 
     287             : /*
     288             :  * This is only used on smaller machines.
     289             :  */
     290             : void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
     291             : {
     292             :         unsigned long mask = cpumask_bits(cpumask)[0];
     293             :         unsigned long flags;
     294             : 
     295             :         if (!mask)
     296             :                 return;
     297             : 
     298             :         local_irq_save(flags);
     299             :         WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
     300             :         __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
     301             :         local_irq_restore(flags);
     302             : }
     303             : 
     304             : /* must come after the send_IPI functions above for inlining */
     305             : static int convert_apicid_to_cpu(int apic_id)
     306             : {
     307             :         int i;
     308             : 
     309             :         for_each_possible_cpu(i) {
     310             :                 if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
     311             :                         return i;
     312             :         }
     313             :         return -1;
     314             : }
     315             : 
     316             : int safe_smp_processor_id(void)
     317             : {
     318             :         int apicid, cpuid;
     319             : 
     320             :         if (!boot_cpu_has(X86_FEATURE_APIC))
     321             :                 return 0;
     322             : 
     323             :         apicid = hard_smp_processor_id();
     324             :         if (apicid == BAD_APICID)
     325             :                 return 0;
     326             : 
     327             :         cpuid = convert_apicid_to_cpu(apicid);
     328             : 
     329             :         return cpuid >= 0 ? cpuid : 0;
     330             : }
     331             : #endif

Generated by: LCOV version 1.14