Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only 2 : /* 3 : * x86 FPU boot time init code: 4 : */ 5 : #include <asm/fpu/internal.h> 6 : #include <asm/tlbflush.h> 7 : #include <asm/setup.h> 8 : 9 : #include <linux/sched.h> 10 : #include <linux/sched/task.h> 11 : #include <linux/init.h> 12 : 13 : /* 14 : * Initialize the registers found in all CPUs, CR0 and CR4: 15 : */ 16 5 : static void fpu__init_cpu_generic(void) 17 : { 18 5 : unsigned long cr0; 19 5 : unsigned long cr4_mask = 0; 20 : 21 5 : if (boot_cpu_has(X86_FEATURE_FXSR)) 22 5 : cr4_mask |= X86_CR4_OSFXSR; 23 5 : if (boot_cpu_has(X86_FEATURE_XMM)) 24 5 : cr4_mask |= X86_CR4_OSXMMEXCPT; 25 5 : if (cr4_mask) 26 5 : cr4_set_bits(cr4_mask); 27 : 28 5 : cr0 = read_cr0(); 29 5 : cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */ 30 5 : if (!boot_cpu_has(X86_FEATURE_FPU)) 31 : cr0 |= X86_CR0_EM; 32 5 : write_cr0(cr0); 33 : 34 : /* Flush out any pending x87 state: */ 35 : #ifdef CONFIG_MATH_EMULATION 36 : if (!boot_cpu_has(X86_FEATURE_FPU)) 37 : fpstate_init_soft(¤t->thread.fpu.state.soft); 38 : else 39 : #endif 40 5 : asm volatile ("fninit"); 41 5 : } 42 : 43 : /* 44 : * Enable all supported FPU features. Called when a CPU is brought online: 45 : */ 46 5 : void fpu__init_cpu(void) 47 : { 48 4 : fpu__init_cpu_generic(); 49 5 : fpu__init_cpu_xstate(); 50 4 : } 51 : 52 0 : static bool fpu__probe_without_cpuid(void) 53 : { 54 0 : unsigned long cr0; 55 0 : u16 fsw, fcw; 56 : 57 0 : fsw = fcw = 0xffff; 58 : 59 0 : cr0 = read_cr0(); 60 0 : cr0 &= ~(X86_CR0_TS | X86_CR0_EM); 61 0 : write_cr0(cr0); 62 : 63 0 : asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw)); 64 : 65 0 : pr_info("x86/fpu: Probing for FPU: FSW=0x%04hx FCW=0x%04hx\n", fsw, fcw); 66 : 67 0 : return fsw == 0 && (fcw & 0x103f) == 0x003f; 68 : } 69 : 70 1 : static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) 71 : { 72 1 : if (!boot_cpu_has(X86_FEATURE_CPUID) && 73 0 : !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) { 74 0 : if (fpu__probe_without_cpuid()) 75 0 : setup_force_cpu_cap(X86_FEATURE_FPU); 76 : else 77 0 : setup_clear_cpu_cap(X86_FEATURE_FPU); 78 : } 79 : 80 : #ifndef CONFIG_MATH_EMULATION 81 1 : if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_FPU)) { 82 0 : pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n"); 83 0 : for (;;) 84 0 : asm volatile("hlt"); 85 : } 86 : #endif 87 1 : } 88 : 89 : /* 90 : * Boot time FPU feature detection code: 91 : */ 92 : unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 93 : EXPORT_SYMBOL_GPL(mxcsr_feature_mask); 94 : 95 1 : static void __init fpu__init_system_mxcsr(void) 96 : { 97 1 : unsigned int mask = 0; 98 : 99 1 : if (boot_cpu_has(X86_FEATURE_FXSR)) { 100 : /* Static because GCC does not get 16-byte stack alignment right: */ 101 1 : static struct fxregs_state fxregs __initdata; 102 : 103 1 : asm volatile("fxsave %0" : "+m" (fxregs)); 104 : 105 1 : mask = fxregs.mxcsr_mask; 106 : 107 : /* 108 : * If zero then use the default features mask, 109 : * which has all features set, except the 110 : * denormals-are-zero feature bit: 111 : */ 112 1 : if (mask == 0) 113 0 : mask = 0x0000ffbf; 114 : } 115 1 : mxcsr_feature_mask &= mask; 116 1 : } 117 : 118 : /* 119 : * Once per bootup FPU initialization sequences that will run on most x86 CPUs: 120 : */ 121 1 : static void __init fpu__init_system_generic(void) 122 : { 123 : /* 124 : * Set up the legacy init FPU context. (xstate init might overwrite this 125 : * with a more modern format, if the CPU supports it.) 126 : */ 127 1 : fpstate_init(&init_fpstate); 128 : 129 1 : fpu__init_system_mxcsr(); 130 1 : } 131 : 132 : /* 133 : * Size of the FPU context state. All tasks in the system use the 134 : * same context size, regardless of what portion they use. 135 : * This is inherent to the XSAVE architecture which puts all state 136 : * components into a single, continuous memory block: 137 : */ 138 : unsigned int fpu_kernel_xstate_size; 139 : EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size); 140 : 141 : /* Get alignment of the TYPE. */ 142 : #define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test) 143 : 144 : /* 145 : * Enforce that 'MEMBER' is the last field of 'TYPE'. 146 : * 147 : * Align the computed size with alignment of the TYPE, 148 : * because that's how C aligns structs. 149 : */ 150 : #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \ 151 : BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \ 152 : TYPE_ALIGN(TYPE))) 153 : 154 : /* 155 : * We append the 'struct fpu' to the task_struct: 156 : */ 157 1 : static void __init fpu__init_task_struct_size(void) 158 : { 159 1 : int task_size = sizeof(struct task_struct); 160 : 161 : /* 162 : * Subtract off the static size of the register state. 163 : * It potentially has a bunch of padding. 164 : */ 165 1 : task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state); 166 : 167 : /* 168 : * Add back the dynamically-calculated register state 169 : * size. 170 : */ 171 1 : task_size += fpu_kernel_xstate_size; 172 : 173 : /* 174 : * We dynamically size 'struct fpu', so we require that 175 : * it be at the end of 'thread_struct' and that 176 : * 'thread_struct' be at the end of 'task_struct'. If 177 : * you hit a compile error here, check the structure to 178 : * see if something got added to the end. 179 : */ 180 1 : CHECK_MEMBER_AT_END_OF(struct fpu, state); 181 1 : CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu); 182 1 : CHECK_MEMBER_AT_END_OF(struct task_struct, thread); 183 : 184 1 : arch_task_struct_size = task_size; 185 1 : } 186 : 187 : /* 188 : * Set up the user and kernel xstate sizes based on the legacy FPU context size. 189 : * 190 : * We set this up first, and later it will be overwritten by 191 : * fpu__init_system_xstate() if the CPU knows about xstates. 192 : */ 193 1 : static void __init fpu__init_system_xstate_size_legacy(void) 194 : { 195 1 : static int on_boot_cpu __initdata = 1; 196 : 197 1 : WARN_ON_FPU(!on_boot_cpu); 198 1 : on_boot_cpu = 0; 199 : 200 : /* 201 : * Note that xstate sizes might be overwritten later during 202 : * fpu__init_system_xstate(). 203 : */ 204 : 205 1 : if (!boot_cpu_has(X86_FEATURE_FPU)) { 206 : fpu_kernel_xstate_size = sizeof(struct swregs_state); 207 : } else { 208 1 : if (boot_cpu_has(X86_FEATURE_FXSR)) 209 1 : fpu_kernel_xstate_size = 210 : sizeof(struct fxregs_state); 211 : else 212 : fpu_kernel_xstate_size = 213 : sizeof(struct fregs_state); 214 : } 215 : 216 1 : fpu_user_xstate_size = fpu_kernel_xstate_size; 217 1 : } 218 : 219 : /* 220 : * Find supported xfeatures based on cpu features and command-line input. 221 : * This must be called after fpu__init_parse_early_param() is called and 222 : * xfeatures_mask is enumerated. 223 : */ 224 1 : u64 __init fpu__get_supported_xfeatures_mask(void) 225 : { 226 1 : return XFEATURE_MASK_USER_SUPPORTED | 227 : XFEATURE_MASK_SUPERVISOR_SUPPORTED; 228 : } 229 : 230 : /* Legacy code to initialize eager fpu mode. */ 231 1 : static void __init fpu__init_system_ctx_switch(void) 232 : { 233 1 : static bool on_boot_cpu __initdata = 1; 234 : 235 1 : WARN_ON_FPU(!on_boot_cpu); 236 1 : on_boot_cpu = 0; 237 : } 238 : 239 : /* 240 : * Called on the boot CPU once per system bootup, to set up the initial 241 : * FPU state that is later cloned into all processes: 242 : */ 243 1 : void __init fpu__init_system(struct cpuinfo_x86 *c) 244 : { 245 1 : fpu__init_system_early_generic(c); 246 : 247 : /* 248 : * The FPU has to be operational for some of the 249 : * later FPU init activities: 250 : */ 251 1 : fpu__init_cpu(); 252 : 253 1 : fpu__init_system_generic(); 254 1 : fpu__init_system_xstate_size_legacy(); 255 1 : fpu__init_system_xstate(); 256 1 : fpu__init_task_struct_size(); 257 : 258 1 : fpu__init_system_ctx_switch(); 259 1 : }