Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _ASM_X86_PERCPU_H
3 : #define _ASM_X86_PERCPU_H
4 :
5 : #ifdef CONFIG_X86_64
6 : #define __percpu_seg gs
7 : #else
8 : #define __percpu_seg fs
9 : #endif
10 :
11 : #ifdef __ASSEMBLY__
12 :
13 : #ifdef CONFIG_SMP
14 : #define PER_CPU_VAR(var) %__percpu_seg:var
15 : #else /* ! SMP */
16 : #define PER_CPU_VAR(var) var
17 : #endif /* SMP */
18 :
19 : #ifdef CONFIG_X86_64_SMP
20 : #define INIT_PER_CPU_VAR(var) init_per_cpu__##var
21 : #else
22 : #define INIT_PER_CPU_VAR(var) var
23 : #endif
24 :
25 : #else /* ...!ASSEMBLY */
26 :
27 : #include <linux/kernel.h>
28 : #include <linux/stringify.h>
29 :
30 : #ifdef CONFIG_SMP
31 : #define __percpu_prefix "%%"__stringify(__percpu_seg)":"
32 : #define __my_cpu_offset this_cpu_read(this_cpu_off)
33 :
34 : /*
35 : * Compared to the generic __my_cpu_offset version, the following
36 : * saves one instruction and avoids clobbering a temp register.
37 : */
38 : #define arch_raw_cpu_ptr(ptr) \
39 : ({ \
40 : unsigned long tcp_ptr__; \
41 : asm volatile("add " __percpu_arg(1) ", %0" \
42 : : "=r" (tcp_ptr__) \
43 : : "m" (this_cpu_off), "0" (ptr)); \
44 : (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
45 : })
46 : #else
47 : #define __percpu_prefix ""
48 : #endif
49 :
50 : #define __percpu_arg(x) __percpu_prefix "%" #x
51 :
52 : /*
53 : * Initialized pointers to per-cpu variables needed for the boot
54 : * processor need to use these macros to get the proper address
55 : * offset from __per_cpu_load on SMP.
56 : *
57 : * There also must be an entry in vmlinux_64.lds.S
58 : */
59 : #define DECLARE_INIT_PER_CPU(var) \
60 : extern typeof(var) init_per_cpu_var(var)
61 :
62 : #ifdef CONFIG_X86_64_SMP
63 : #define init_per_cpu_var(var) init_per_cpu__##var
64 : #else
65 : #define init_per_cpu_var(var) var
66 : #endif
67 :
68 : /* For arch-specific code, we can use direct single-insn ops (they
69 : * don't give an lvalue though). */
70 :
71 : #define __pcpu_type_1 u8
72 : #define __pcpu_type_2 u16
73 : #define __pcpu_type_4 u32
74 : #define __pcpu_type_8 u64
75 :
76 : #define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff))
77 : #define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff))
78 : #define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff))
79 : #define __pcpu_cast_8(val) ((u64)(val))
80 :
81 : #define __pcpu_op1_1(op, dst) op "b " dst
82 : #define __pcpu_op1_2(op, dst) op "w " dst
83 : #define __pcpu_op1_4(op, dst) op "l " dst
84 : #define __pcpu_op1_8(op, dst) op "q " dst
85 :
86 : #define __pcpu_op2_1(op, src, dst) op "b " src ", " dst
87 : #define __pcpu_op2_2(op, src, dst) op "w " src ", " dst
88 : #define __pcpu_op2_4(op, src, dst) op "l " src ", " dst
89 : #define __pcpu_op2_8(op, src, dst) op "q " src ", " dst
90 :
91 : #define __pcpu_reg_1(mod, x) mod "q" (x)
92 : #define __pcpu_reg_2(mod, x) mod "r" (x)
93 : #define __pcpu_reg_4(mod, x) mod "r" (x)
94 : #define __pcpu_reg_8(mod, x) mod "r" (x)
95 :
96 : #define __pcpu_reg_imm_1(x) "qi" (x)
97 : #define __pcpu_reg_imm_2(x) "ri" (x)
98 : #define __pcpu_reg_imm_4(x) "ri" (x)
99 : #define __pcpu_reg_imm_8(x) "re" (x)
100 :
101 : #define percpu_to_op(size, qual, op, _var, _val) \
102 : do { \
103 : __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \
104 : if (0) { \
105 : typeof(_var) pto_tmp__; \
106 : pto_tmp__ = (_val); \
107 : (void)pto_tmp__; \
108 : } \
109 : asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var])) \
110 : : [var] "+m" (_var) \
111 : : [val] __pcpu_reg_imm_##size(pto_val__)); \
112 : } while (0)
113 :
114 : #define percpu_unary_op(size, qual, op, _var) \
115 : ({ \
116 : asm qual (__pcpu_op1_##size(op, __percpu_arg([var])) \
117 : : [var] "+m" (_var)); \
118 : })
119 :
120 : /*
121 : * Generate a percpu add to memory instruction and optimize code
122 : * if one is added or subtracted.
123 : */
124 : #define percpu_add_op(size, qual, var, val) \
125 : do { \
126 : const int pao_ID__ = (__builtin_constant_p(val) && \
127 : ((val) == 1 || (val) == -1)) ? \
128 : (int)(val) : 0; \
129 : if (0) { \
130 : typeof(var) pao_tmp__; \
131 : pao_tmp__ = (val); \
132 : (void)pao_tmp__; \
133 : } \
134 : if (pao_ID__ == 1) \
135 : percpu_unary_op(size, qual, "inc", var); \
136 : else if (pao_ID__ == -1) \
137 : percpu_unary_op(size, qual, "dec", var); \
138 : else \
139 : percpu_to_op(size, qual, "add", var, val); \
140 : } while (0)
141 :
142 : #define percpu_from_op(size, qual, op, _var) \
143 : ({ \
144 : __pcpu_type_##size pfo_val__; \
145 : asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \
146 : : [val] __pcpu_reg_##size("=", pfo_val__) \
147 : : [var] "m" (_var)); \
148 : (typeof(_var))(unsigned long) pfo_val__; \
149 : })
150 :
151 : #define percpu_stable_op(size, op, _var) \
152 : ({ \
153 : __pcpu_type_##size pfo_val__; \
154 : asm(__pcpu_op2_##size(op, __percpu_arg(P[var]), "%[val]") \
155 : : [val] __pcpu_reg_##size("=", pfo_val__) \
156 : : [var] "p" (&(_var))); \
157 : (typeof(_var))(unsigned long) pfo_val__; \
158 : })
159 :
160 : /*
161 : * Add return operation
162 : */
163 : #define percpu_add_return_op(size, qual, _var, _val) \
164 : ({ \
165 : __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \
166 : asm qual (__pcpu_op2_##size("xadd", "%[tmp]", \
167 : __percpu_arg([var])) \
168 : : [tmp] __pcpu_reg_##size("+", paro_tmp__), \
169 : [var] "+m" (_var) \
170 : : : "memory"); \
171 : (typeof(_var))(unsigned long) (paro_tmp__ + _val); \
172 : })
173 :
174 : /*
175 : * xchg is implemented using cmpxchg without a lock prefix. xchg is
176 : * expensive due to the implied lock prefix. The processor cannot prefetch
177 : * cachelines if xchg is used.
178 : */
179 : #define percpu_xchg_op(size, qual, _var, _nval) \
180 : ({ \
181 : __pcpu_type_##size pxo_old__; \
182 : __pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval); \
183 : asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), \
184 : "%[oval]") \
185 : "\n1:\t" \
186 : __pcpu_op2_##size("cmpxchg", "%[nval]", \
187 : __percpu_arg([var])) \
188 : "\n\tjnz 1b" \
189 : : [oval] "=&a" (pxo_old__), \
190 : [var] "+m" (_var) \
191 : : [nval] __pcpu_reg_##size(, pxo_new__) \
192 : : "memory"); \
193 : (typeof(_var))(unsigned long) pxo_old__; \
194 : })
195 :
196 : /*
197 : * cmpxchg has no such implied lock semantics as a result it is much
198 : * more efficient for cpu local operations.
199 : */
200 : #define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \
201 : ({ \
202 : __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \
203 : __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
204 : asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \
205 : __percpu_arg([var])) \
206 : : [oval] "+a" (pco_old__), \
207 : [var] "+m" (_var) \
208 : : [nval] __pcpu_reg_##size(, pco_new__) \
209 : : "memory"); \
210 : (typeof(_var))(unsigned long) pco_old__; \
211 : })
212 :
213 : /*
214 : * this_cpu_read() makes gcc load the percpu variable every time it is
215 : * accessed while this_cpu_read_stable() allows the value to be cached.
216 : * this_cpu_read_stable() is more efficient and can be used if its value
217 : * is guaranteed to be valid across cpus. The current users include
218 : * get_current() and get_thread_info() both of which are actually
219 : * per-thread variables implemented as per-cpu variables and thus
220 : * stable for the duration of the respective task.
221 : */
222 : #define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp)
223 : #define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
224 : #define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
225 : #define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
226 : #define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
227 :
228 : #define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp)
229 : #define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp)
230 : #define raw_cpu_read_4(pcp) percpu_from_op(4, , "mov", pcp)
231 :
232 : #define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val)
233 : #define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val)
234 : #define raw_cpu_write_4(pcp, val) percpu_to_op(4, , "mov", (pcp), val)
235 : #define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val)
236 : #define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val)
237 : #define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val)
238 : #define raw_cpu_and_1(pcp, val) percpu_to_op(1, , "and", (pcp), val)
239 : #define raw_cpu_and_2(pcp, val) percpu_to_op(2, , "and", (pcp), val)
240 : #define raw_cpu_and_4(pcp, val) percpu_to_op(4, , "and", (pcp), val)
241 : #define raw_cpu_or_1(pcp, val) percpu_to_op(1, , "or", (pcp), val)
242 : #define raw_cpu_or_2(pcp, val) percpu_to_op(2, , "or", (pcp), val)
243 : #define raw_cpu_or_4(pcp, val) percpu_to_op(4, , "or", (pcp), val)
244 :
245 : /*
246 : * raw_cpu_xchg() can use a load-store since it is not required to be
247 : * IRQ-safe.
248 : */
249 : #define raw_percpu_xchg_op(var, nval) \
250 : ({ \
251 : typeof(var) pxo_ret__ = raw_cpu_read(var); \
252 : raw_cpu_write(var, (nval)); \
253 : pxo_ret__; \
254 : })
255 :
256 : #define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val)
257 : #define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val)
258 : #define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val)
259 :
260 : #define this_cpu_read_1(pcp) percpu_from_op(1, volatile, "mov", pcp)
261 : #define this_cpu_read_2(pcp) percpu_from_op(2, volatile, "mov", pcp)
262 : #define this_cpu_read_4(pcp) percpu_from_op(4, volatile, "mov", pcp)
263 : #define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val)
264 : #define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val)
265 : #define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val)
266 : #define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val)
267 : #define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val)
268 : #define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val)
269 : #define this_cpu_and_1(pcp, val) percpu_to_op(1, volatile, "and", (pcp), val)
270 : #define this_cpu_and_2(pcp, val) percpu_to_op(2, volatile, "and", (pcp), val)
271 : #define this_cpu_and_4(pcp, val) percpu_to_op(4, volatile, "and", (pcp), val)
272 : #define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val)
273 : #define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val)
274 : #define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val)
275 : #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(1, volatile, pcp, nval)
276 : #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(2, volatile, pcp, nval)
277 : #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(4, volatile, pcp, nval)
278 :
279 : #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val)
280 : #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val)
281 : #define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val)
282 : #define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval)
283 : #define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval)
284 : #define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval)
285 :
286 : #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val)
287 : #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val)
288 : #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val)
289 : #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
290 : #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
291 : #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
292 :
293 : #ifdef CONFIG_X86_CMPXCHG64
294 : #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \
295 : ({ \
296 : bool __ret; \
297 : typeof(pcp1) __o1 = (o1), __n1 = (n1); \
298 : typeof(pcp2) __o2 = (o2), __n2 = (n2); \
299 : asm volatile("cmpxchg8b "__percpu_arg(1) \
300 : CC_SET(z) \
301 : : CC_OUT(z) (__ret), "+m" (pcp1), "+m" (pcp2), "+a" (__o1), "+d" (__o2) \
302 : : "b" (__n1), "c" (__n2)); \
303 : __ret; \
304 : })
305 :
306 : #define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
307 : #define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
308 : #endif /* CONFIG_X86_CMPXCHG64 */
309 :
310 : /*
311 : * Per cpu atomic 64 bit operations are only available under 64 bit.
312 : * 32 bit must fall back to generic operations.
313 : */
314 : #ifdef CONFIG_X86_64
315 : #define raw_cpu_read_8(pcp) percpu_from_op(8, , "mov", pcp)
316 : #define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val)
317 : #define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val)
318 : #define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val)
319 : #define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val)
320 : #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val)
321 : #define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval)
322 : #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval)
323 :
324 : #define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp)
325 : #define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val)
326 : #define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val)
327 : #define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val)
328 : #define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val)
329 : #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val)
330 : #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval)
331 : #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
332 :
333 : /*
334 : * Pretty complex macro to generate cmpxchg16 instruction. The instruction
335 : * is not supported on early AMD64 processors so we must be able to emulate
336 : * it in software. The address used in the cmpxchg16 instruction must be
337 : * aligned to a 16 byte boundary.
338 : */
339 : #define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \
340 : ({ \
341 : bool __ret; \
342 : typeof(pcp1) __o1 = (o1), __n1 = (n1); \
343 : typeof(pcp2) __o2 = (o2), __n2 = (n2); \
344 : alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
345 : "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \
346 : X86_FEATURE_CX16, \
347 : ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \
348 : "+m" (pcp2), "+d" (__o2)), \
349 : "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \
350 : __ret; \
351 : })
352 :
353 : #define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
354 : #define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
355 :
356 : #endif
357 :
358 8 : static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
359 : const unsigned long __percpu *addr)
360 : {
361 8 : unsigned long __percpu *a =
362 : (unsigned long __percpu *)addr + nr / BITS_PER_LONG;
363 :
364 : #ifdef CONFIG_X86_64
365 8 : return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
366 : #else
367 : return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
368 : #endif
369 : }
370 :
371 : static inline bool x86_this_cpu_variable_test_bit(int nr,
372 : const unsigned long __percpu *addr)
373 : {
374 : bool oldbit;
375 :
376 : asm volatile("btl "__percpu_arg(2)",%1"
377 : CC_SET(c)
378 : : CC_OUT(c) (oldbit)
379 : : "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
380 :
381 : return oldbit;
382 : }
383 :
384 : #define x86_this_cpu_test_bit(nr, addr) \
385 : (__builtin_constant_p((nr)) \
386 : ? x86_this_cpu_constant_test_bit((nr), (addr)) \
387 : : x86_this_cpu_variable_test_bit((nr), (addr)))
388 :
389 :
390 : #include <asm-generic/percpu.h>
391 :
392 : /* We can use this directly for local CPU (faster). */
393 : DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
394 :
395 : #endif /* !__ASSEMBLY__ */
396 :
397 : #ifdef CONFIG_SMP
398 :
399 : /*
400 : * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
401 : * variables that are initialized and accessed before there are per_cpu
402 : * areas allocated.
403 : */
404 :
405 : #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
406 : DEFINE_PER_CPU(_type, _name) = _initvalue; \
407 : __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
408 : { [0 ... NR_CPUS-1] = _initvalue }; \
409 : __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
410 :
411 : #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
412 : DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
413 : __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
414 : { [0 ... NR_CPUS-1] = _initvalue }; \
415 : __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
416 :
417 : #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
418 : EXPORT_PER_CPU_SYMBOL(_name)
419 :
420 : #define DECLARE_EARLY_PER_CPU(_type, _name) \
421 : DECLARE_PER_CPU(_type, _name); \
422 : extern __typeof__(_type) *_name##_early_ptr; \
423 : extern __typeof__(_type) _name##_early_map[]
424 :
425 : #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
426 : DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
427 : extern __typeof__(_type) *_name##_early_ptr; \
428 : extern __typeof__(_type) _name##_early_map[]
429 :
430 : #define early_per_cpu_ptr(_name) (_name##_early_ptr)
431 : #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
432 : #define early_per_cpu(_name, _cpu) \
433 : *(early_per_cpu_ptr(_name) ? \
434 : &early_per_cpu_ptr(_name)[_cpu] : \
435 : &per_cpu(_name, _cpu))
436 :
437 : #else /* !CONFIG_SMP */
438 : #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
439 : DEFINE_PER_CPU(_type, _name) = _initvalue
440 :
441 : #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
442 : DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
443 :
444 : #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
445 : EXPORT_PER_CPU_SYMBOL(_name)
446 :
447 : #define DECLARE_EARLY_PER_CPU(_type, _name) \
448 : DECLARE_PER_CPU(_type, _name)
449 :
450 : #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
451 : DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
452 :
453 : #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
454 : #define early_per_cpu_ptr(_name) NULL
455 : /* no early_per_cpu_map() */
456 :
457 : #endif /* !CONFIG_SMP */
458 :
459 : #endif /* _ASM_X86_PERCPU_H */
|