Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */ 2 : #ifndef _ASM_X86_SMP_H 3 : #define _ASM_X86_SMP_H 4 : #ifndef __ASSEMBLY__ 5 : #include <linux/cpumask.h> 6 : #include <asm/percpu.h> 7 : 8 : #include <asm/thread_info.h> 9 : #include <asm/cpumask.h> 10 : 11 : extern int smp_num_siblings; 12 : extern unsigned int num_processors; 13 : 14 : DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); 15 : DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); 16 : DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); 17 : /* cpus sharing the last level cache: */ 18 : DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); 19 : DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id); 20 : DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); 21 : 22 60 : static inline struct cpumask *cpu_llc_shared_mask(int cpu) 23 : { 24 4 : return per_cpu(cpu_llc_shared_map, cpu); 25 : } 26 : 27 : DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); 28 : DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid); 29 : DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid); 30 : #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 31 : DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid); 32 : #endif 33 : 34 : struct task_struct; 35 : 36 : struct smp_ops { 37 : void (*smp_prepare_boot_cpu)(void); 38 : void (*smp_prepare_cpus)(unsigned max_cpus); 39 : void (*smp_cpus_done)(unsigned max_cpus); 40 : 41 : void (*stop_other_cpus)(int wait); 42 : void (*crash_stop_other_cpus)(void); 43 : void (*smp_send_reschedule)(int cpu); 44 : 45 : int (*cpu_up)(unsigned cpu, struct task_struct *tidle); 46 : int (*cpu_disable)(void); 47 : void (*cpu_die)(unsigned int cpu); 48 : void (*play_dead)(void); 49 : 50 : void (*send_call_func_ipi)(const struct cpumask *mask); 51 : void (*send_call_func_single_ipi)(int cpu); 52 : }; 53 : 54 : /* Globals due to paravirt */ 55 : extern void set_cpu_sibling_map(int cpu); 56 : 57 : #ifdef CONFIG_SMP 58 : extern struct smp_ops smp_ops; 59 : 60 0 : static inline void smp_send_stop(void) 61 : { 62 0 : smp_ops.stop_other_cpus(0); 63 0 : } 64 : 65 0 : static inline void stop_other_cpus(void) 66 : { 67 0 : smp_ops.stop_other_cpus(1); 68 : } 69 : 70 1 : static inline void smp_prepare_boot_cpu(void) 71 : { 72 1 : smp_ops.smp_prepare_boot_cpu(); 73 1 : } 74 : 75 1 : static inline void smp_prepare_cpus(unsigned int max_cpus) 76 : { 77 1 : smp_ops.smp_prepare_cpus(max_cpus); 78 : } 79 : 80 1 : static inline void smp_cpus_done(unsigned int max_cpus) 81 : { 82 1 : smp_ops.smp_cpus_done(max_cpus); 83 : } 84 : 85 3 : static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle) 86 : { 87 3 : return smp_ops.cpu_up(cpu, tidle); 88 : } 89 : 90 0 : static inline int __cpu_disable(void) 91 : { 92 0 : return smp_ops.cpu_disable(); 93 : } 94 : 95 0 : static inline void __cpu_die(unsigned int cpu) 96 : { 97 0 : smp_ops.cpu_die(cpu); 98 : } 99 : 100 0 : static inline void play_dead(void) 101 : { 102 0 : smp_ops.play_dead(); 103 : } 104 : 105 786 : static inline void smp_send_reschedule(int cpu) 106 : { 107 786 : smp_ops.smp_send_reschedule(cpu); 108 786 : } 109 : 110 7075 : static inline void arch_send_call_function_single_ipi(int cpu) 111 : { 112 7075 : smp_ops.send_call_func_single_ipi(cpu); 113 7075 : } 114 : 115 40 : static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) 116 : { 117 40 : smp_ops.send_call_func_ipi(mask); 118 : } 119 : 120 : void cpu_disable_common(void); 121 : void native_smp_prepare_boot_cpu(void); 122 : void native_smp_prepare_cpus(unsigned int max_cpus); 123 : void calculate_max_logical_packages(void); 124 : void native_smp_cpus_done(unsigned int max_cpus); 125 : int common_cpu_up(unsigned int cpunum, struct task_struct *tidle); 126 : int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); 127 : int native_cpu_disable(void); 128 : int common_cpu_die(unsigned int cpu); 129 : void native_cpu_die(unsigned int cpu); 130 : void hlt_play_dead(void); 131 : void native_play_dead(void); 132 : void play_dead_common(void); 133 : void wbinvd_on_cpu(int cpu); 134 : int wbinvd_on_all_cpus(void); 135 : 136 : void native_smp_send_reschedule(int cpu); 137 : void native_send_call_func_ipi(const struct cpumask *mask); 138 : void native_send_call_func_single_ipi(int cpu); 139 : void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); 140 : 141 : void smp_store_boot_cpu_info(void); 142 : void smp_store_cpu_info(int id); 143 : 144 : asmlinkage __visible void smp_reboot_interrupt(void); 145 : __visible void smp_reschedule_interrupt(struct pt_regs *regs); 146 : __visible void smp_call_function_interrupt(struct pt_regs *regs); 147 : __visible void smp_call_function_single_interrupt(struct pt_regs *r); 148 : 149 : #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 150 : #define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu) 151 : 152 : /* 153 : * This function is needed by all SMP systems. It must _always_ be valid 154 : * from the initial startup. We map APIC_BASE very early in page_setup(), 155 : * so this is correct in the x86 case. 156 : */ 157 : #define raw_smp_processor_id() this_cpu_read(cpu_number) 158 : #define __smp_processor_id() __this_cpu_read(cpu_number) 159 : 160 : #ifdef CONFIG_X86_32 161 : extern int safe_smp_processor_id(void); 162 : #else 163 : # define safe_smp_processor_id() smp_processor_id() 164 : #endif 165 : 166 : #else /* !CONFIG_SMP */ 167 : #define wbinvd_on_cpu(cpu) wbinvd() 168 : static inline int wbinvd_on_all_cpus(void) 169 : { 170 : wbinvd(); 171 : return 0; 172 : } 173 : #endif /* CONFIG_SMP */ 174 : 175 : extern unsigned disabled_cpus; 176 : 177 : #ifdef CONFIG_X86_LOCAL_APIC 178 : extern int hard_smp_processor_id(void); 179 : 180 : #else /* CONFIG_X86_LOCAL_APIC */ 181 : #define hard_smp_processor_id() 0 182 : #endif /* CONFIG_X86_LOCAL_APIC */ 183 : 184 : #ifdef CONFIG_DEBUG_NMI_SELFTEST 185 : extern void nmi_selftest(void); 186 : #else 187 : #define nmi_selftest() do { } while (0) 188 : #endif 189 : 190 : #endif /* __ASSEMBLY__ */ 191 : #endif /* _ASM_X86_SMP_H */