Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _ASM_X86_SPECIAL_INSNS_H
3 : #define _ASM_X86_SPECIAL_INSNS_H
4 :
5 :
6 : #ifdef __KERNEL__
7 :
8 : #include <asm/nops.h>
9 : #include <asm/processor-flags.h>
10 : #include <linux/irqflags.h>
11 : #include <linux/jump_label.h>
12 :
13 : /*
14 : * The compiler should not reorder volatile asm statements with respect to each
15 : * other: they should execute in program order. However GCC 4.9.x and 5.x have
16 : * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
17 : * volatile asm. The write functions are not affected since they have memory
18 : * clobbers preventing reordering. To prevent reads from being reordered with
19 : * respect to writes, use a dummy memory operand.
20 : */
21 :
22 : #define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
23 :
24 : void native_write_cr0(unsigned long val);
25 :
26 6 : static inline unsigned long native_read_cr0(void)
27 : {
28 6 : unsigned long val;
29 12 : asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
30 6 : return val;
31 : }
32 :
33 295873 : static __always_inline unsigned long native_read_cr2(void)
34 : {
35 295873 : unsigned long val;
36 591743 : asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
37 295874 : return val;
38 : }
39 :
40 0 : static __always_inline void native_write_cr2(unsigned long val)
41 : {
42 0 : asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
43 : }
44 :
45 26791 : static inline unsigned long __native_read_cr3(void)
46 : {
47 26791 : unsigned long val;
48 5220 : asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
49 26791 : return val;
50 : }
51 :
52 18526 : static inline void native_write_cr3(unsigned long val)
53 : {
54 5209 : asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
55 : }
56 :
57 5 : static inline unsigned long native_read_cr4(void)
58 : {
59 5 : unsigned long val;
60 : #ifdef CONFIG_X86_32
61 : /*
62 : * This could fault if CR4 does not exist. Non-existent CR4
63 : * is functionally equivalent to CR4 == 0. Keep it simple and pretend
64 : * that CR4 == 0 on CPUs that don't have CR4.
65 : */
66 : asm volatile("1: mov %%cr4, %0\n"
67 : "2:\n"
68 : _ASM_EXTABLE(1b, 2b)
69 : : "=r" (val) : "0" (0), __FORCE_ORDER);
70 : #else
71 : /* CR4 always exists on x86_64. */
72 10 : asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
73 : #endif
74 5 : return val;
75 : }
76 :
77 : void native_write_cr4(unsigned long val);
78 :
79 : #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
80 : static inline u32 rdpkru(void)
81 : {
82 : u32 ecx = 0;
83 : u32 edx, pkru;
84 :
85 : /*
86 : * "rdpkru" instruction. Places PKRU contents in to EAX,
87 : * clears EDX and requires that ecx=0.
88 : */
89 : asm volatile(".byte 0x0f,0x01,0xee\n\t"
90 : : "=a" (pkru), "=d" (edx)
91 : : "c" (ecx));
92 : return pkru;
93 : }
94 :
95 : static inline void wrpkru(u32 pkru)
96 : {
97 : u32 ecx = 0, edx = 0;
98 :
99 : /*
100 : * "wrpkru" instruction. Loads contents in EAX to PKRU,
101 : * requires that ecx = edx = 0.
102 : */
103 : asm volatile(".byte 0x0f,0x01,0xef\n\t"
104 : : : "a" (pkru), "c"(ecx), "d"(edx));
105 : }
106 :
107 : static inline void __write_pkru(u32 pkru)
108 : {
109 : /*
110 : * WRPKRU is relatively expensive compared to RDPKRU.
111 : * Avoid WRPKRU when it would not change the value.
112 : */
113 : if (pkru == rdpkru())
114 : return;
115 :
116 : wrpkru(pkru);
117 : }
118 :
119 : #else
120 : static inline u32 rdpkru(void)
121 : {
122 : return 0;
123 : }
124 :
125 : static inline void __write_pkru(u32 pkru)
126 : {
127 : }
128 : #endif
129 :
130 0 : static inline void native_wbinvd(void)
131 : {
132 0 : asm volatile("wbinvd": : :"memory");
133 0 : }
134 :
135 : extern asmlinkage void asm_load_gs_index(unsigned int selector);
136 :
137 3564 : static inline void native_load_gs_index(unsigned int selector)
138 : {
139 3564 : unsigned long flags;
140 :
141 7128 : local_irq_save(flags);
142 3564 : asm_load_gs_index(selector);
143 3564 : local_irq_restore(flags);
144 3564 : }
145 :
146 5 : static inline unsigned long __read_cr4(void)
147 : {
148 5 : return native_read_cr4();
149 : }
150 :
151 : #ifdef CONFIG_PARAVIRT_XXL
152 : #include <asm/paravirt.h>
153 : #else
154 :
155 6 : static inline unsigned long read_cr0(void)
156 : {
157 6 : return native_read_cr0();
158 : }
159 :
160 5 : static inline void write_cr0(unsigned long x)
161 : {
162 5 : native_write_cr0(x);
163 0 : }
164 :
165 295869 : static __always_inline unsigned long read_cr2(void)
166 : {
167 295869 : return native_read_cr2();
168 : }
169 :
170 0 : static __always_inline void write_cr2(unsigned long x)
171 : {
172 0 : native_write_cr2(x);
173 0 : }
174 :
175 : /*
176 : * Careful! CR3 contains more than just an address. You probably want
177 : * read_cr3_pa() instead.
178 : */
179 21585 : static inline unsigned long __read_cr3(void)
180 : {
181 21585 : return __native_read_cr3();
182 : }
183 :
184 13320 : static inline void write_cr3(unsigned long x)
185 : {
186 13320 : native_write_cr3(x);
187 : }
188 :
189 15 : static inline void __write_cr4(unsigned long x)
190 : {
191 15 : native_write_cr4(x);
192 12 : }
193 :
194 0 : static inline void wbinvd(void)
195 : {
196 0 : native_wbinvd();
197 0 : }
198 :
199 : #ifdef CONFIG_X86_64
200 :
201 3564 : static inline void load_gs_index(unsigned int selector)
202 : {
203 3564 : native_load_gs_index(selector);
204 0 : }
205 :
206 : #endif
207 :
208 : #endif /* CONFIG_PARAVIRT_XXL */
209 :
210 0 : static inline void clflush(volatile void *__p)
211 : {
212 0 : asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
213 : }
214 :
215 0 : static inline void clflushopt(volatile void *__p)
216 : {
217 0 : alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
218 : ".byte 0x66; clflush %P0",
219 : X86_FEATURE_CLFLUSHOPT,
220 : "+m" (*(volatile char __force *)__p));
221 : }
222 :
223 0 : static inline void clwb(volatile void *__p)
224 : {
225 0 : volatile struct { char x[64]; } *p = __p;
226 :
227 0 : asm volatile(ALTERNATIVE_2(
228 : ".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])",
229 : ".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
230 : X86_FEATURE_CLFLUSHOPT,
231 : ".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
232 : X86_FEATURE_CLWB)
233 : : [p] "+m" (*p)
234 : : [pax] "a" (p));
235 0 : }
236 :
237 : #define nop() asm volatile ("nop")
238 :
239 0 : static inline void serialize(void)
240 : {
241 : /* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */
242 0 : asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory");
243 : }
244 :
245 : /* The dst parameter must be 64-bytes aligned */
246 : static inline void movdir64b(void __iomem *dst, const void *src)
247 : {
248 : const struct { char _[64]; } *__src = src;
249 : struct { char _[64]; } __iomem *__dst = dst;
250 :
251 : /*
252 : * MOVDIR64B %(rdx), rax.
253 : *
254 : * Both __src and __dst must be memory constraints in order to tell the
255 : * compiler that no other memory accesses should be reordered around
256 : * this one.
257 : *
258 : * Also, both must be supplied as lvalues because this tells
259 : * the compiler what the object is (its size) the instruction accesses.
260 : * I.e., not the pointers but what they point to, thus the deref'ing '*'.
261 : */
262 : asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
263 : : "+m" (*__dst)
264 : : "m" (*__src), "a" (__dst), "d" (__src));
265 : }
266 :
267 : /**
268 : * enqcmds - Enqueue a command in supervisor (CPL0) mode
269 : * @dst: destination, in MMIO space (must be 512-bit aligned)
270 : * @src: 512 bits memory operand
271 : *
272 : * The ENQCMDS instruction allows software to write a 512-bit command to
273 : * a 512-bit-aligned special MMIO region that supports the instruction.
274 : * A return status is loaded into the ZF flag in the RFLAGS register.
275 : * ZF = 0 equates to success, and ZF = 1 indicates retry or error.
276 : *
277 : * This function issues the ENQCMDS instruction to submit data from
278 : * kernel space to MMIO space, in a unit of 512 bits. Order of data access
279 : * is not guaranteed, nor is a memory barrier performed afterwards. It
280 : * returns 0 on success and -EAGAIN on failure.
281 : *
282 : * Warning: Do not use this helper unless your driver has checked that the
283 : * ENQCMDS instruction is supported on the platform and the device accepts
284 : * ENQCMDS.
285 : */
286 : static inline int enqcmds(void __iomem *dst, const void *src)
287 : {
288 : const struct { char _[64]; } *__src = src;
289 : struct { char _[64]; } __iomem *__dst = dst;
290 : int zf;
291 :
292 : /*
293 : * ENQCMDS %(rdx), rax
294 : *
295 : * See movdir64b()'s comment on operand specification.
296 : */
297 : asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90"
298 : CC_SET(z)
299 : : CC_OUT(z) (zf), "+m" (*__dst)
300 : : "m" (*__src), "a" (__dst), "d" (__src));
301 :
302 : /* Submission failure is indicated via EFLAGS.ZF=1 */
303 : if (zf)
304 : return -EAGAIN;
305 :
306 : return 0;
307 : }
308 :
309 : #endif /* __KERNEL__ */
310 :
311 : #endif /* _ASM_X86_SPECIAL_INSNS_H */
|