| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _ASM_X86_PERCPU_H | 
|---|
| 3 | #define _ASM_X86_PERCPU_H | 
|---|
| 4 |  | 
|---|
| 5 | #ifdef CONFIG_X86_64 | 
|---|
| 6 | # define __percpu_seg		gs | 
|---|
| 7 | # define __percpu_rel		(%rip) | 
|---|
| 8 | #else | 
|---|
| 9 | # define __percpu_seg		fs | 
|---|
| 10 | # define __percpu_rel | 
|---|
| 11 | #endif | 
|---|
| 12 |  | 
|---|
| 13 | #ifdef __ASSEMBLER__ | 
|---|
| 14 |  | 
|---|
| 15 | #ifdef CONFIG_SMP | 
|---|
| 16 | # define __percpu		%__percpu_seg: | 
|---|
| 17 | #else | 
|---|
| 18 | # define __percpu | 
|---|
| 19 | #endif | 
|---|
| 20 |  | 
|---|
| 21 | #define PER_CPU_VAR(var)	__percpu(var)__percpu_rel | 
|---|
| 22 |  | 
|---|
| 23 | #else /* !__ASSEMBLY__: */ | 
|---|
| 24 |  | 
|---|
| 25 | #include <linux/args.h> | 
|---|
| 26 | #include <linux/build_bug.h> | 
|---|
| 27 | #include <linux/stringify.h> | 
|---|
| 28 | #include <asm/asm.h> | 
|---|
| 29 |  | 
|---|
| 30 | #ifdef CONFIG_SMP | 
|---|
| 31 |  | 
|---|
| 32 | #define __force_percpu_prefix	"%%"__stringify(__percpu_seg)":" | 
|---|
| 33 |  | 
|---|
| 34 | #ifdef CONFIG_CC_HAS_NAMED_AS | 
|---|
| 35 |  | 
|---|
| 36 | #ifdef __CHECKER__ | 
|---|
| 37 | # define __seg_gs		__attribute__((address_space(__seg_gs))) | 
|---|
| 38 | # define __seg_fs		__attribute__((address_space(__seg_fs))) | 
|---|
| 39 | #endif | 
|---|
| 40 |  | 
|---|
| 41 | #define __percpu_prefix | 
|---|
| 42 | #define __percpu_seg_override	CONCATENATE(__seg_, __percpu_seg) | 
|---|
| 43 |  | 
|---|
| 44 | #else /* !CONFIG_CC_HAS_NAMED_AS: */ | 
|---|
| 45 |  | 
|---|
| 46 | #define __percpu_prefix		__force_percpu_prefix | 
|---|
| 47 | #define __percpu_seg_override | 
|---|
| 48 |  | 
|---|
| 49 | #endif /* CONFIG_CC_HAS_NAMED_AS */ | 
|---|
| 50 |  | 
|---|
| 51 | /* | 
|---|
| 52 | * Compared to the generic __my_cpu_offset version, the following | 
|---|
| 53 | * saves one instruction and avoids clobbering a temp register. | 
|---|
| 54 | */ | 
|---|
| 55 | #define __my_cpu_offset		this_cpu_read(this_cpu_off) | 
|---|
| 56 |  | 
|---|
| 57 | /* | 
|---|
| 58 | * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit | 
|---|
| 59 | * kernel, because games are played with CONFIG_X86_64 there and | 
|---|
| 60 | * sizeof(this_cpu_off) becames 4. | 
|---|
| 61 | */ | 
|---|
| 62 | #ifndef BUILD_VDSO32_64 | 
|---|
| 63 | #define arch_raw_cpu_ptr(_ptr)						\ | 
|---|
| 64 | ({									\ | 
|---|
| 65 | unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off);	\ | 
|---|
| 66 | \ | 
|---|
| 67 | tcp_ptr__ += (__force unsigned long)(_ptr);			\ | 
|---|
| 68 | (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)tcp_ptr__;		\ | 
|---|
| 69 | }) | 
|---|
| 70 | #else | 
|---|
| 71 | #define arch_raw_cpu_ptr(_ptr)						\ | 
|---|
| 72 | ({									\ | 
|---|
| 73 | BUILD_BUG();							\ | 
|---|
| 74 | (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)0;			\ | 
|---|
| 75 | }) | 
|---|
| 76 | #endif | 
|---|
| 77 |  | 
|---|
| 78 | #define PER_CPU_VAR(var)	%__percpu_seg:(var)__percpu_rel | 
|---|
| 79 |  | 
|---|
| 80 | #else /* !CONFIG_SMP: */ | 
|---|
| 81 |  | 
|---|
| 82 | #define __force_percpu_prefix | 
|---|
| 83 | #define __percpu_prefix | 
|---|
| 84 | #define __percpu_seg_override | 
|---|
| 85 |  | 
|---|
| 86 | #define PER_CPU_VAR(var)	(var)__percpu_rel | 
|---|
| 87 |  | 
|---|
| 88 | #endif /* CONFIG_SMP */ | 
|---|
| 89 |  | 
|---|
| 90 | #if defined(CONFIG_USE_X86_SEG_SUPPORT) && defined(USE_TYPEOF_UNQUAL) | 
|---|
| 91 | # define __my_cpu_type(var)	typeof(var) | 
|---|
| 92 | # define __my_cpu_ptr(ptr)	(ptr) | 
|---|
| 93 | # define __my_cpu_var(var)	(var) | 
|---|
| 94 |  | 
|---|
| 95 | # define __percpu_qual		__percpu_seg_override | 
|---|
| 96 | #else | 
|---|
| 97 | # define __my_cpu_type(var)	typeof(var) __percpu_seg_override | 
|---|
| 98 | # define __my_cpu_ptr(ptr)	(__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr) | 
|---|
| 99 | # define __my_cpu_var(var)	(*__my_cpu_ptr(&(var))) | 
|---|
| 100 | #endif | 
|---|
| 101 |  | 
|---|
| 102 | #define __force_percpu_arg(x)	__force_percpu_prefix "%" #x | 
|---|
| 103 | #define __percpu_arg(x)		__percpu_prefix "%" #x | 
|---|
| 104 |  | 
|---|
| 105 | /* | 
|---|
| 106 | * For arch-specific code, we can use direct single-insn ops (they | 
|---|
| 107 | * don't give an lvalue though). | 
|---|
| 108 | */ | 
|---|
| 109 |  | 
|---|
| 110 | #define __pcpu_type_1		u8 | 
|---|
| 111 | #define __pcpu_type_2		u16 | 
|---|
| 112 | #define __pcpu_type_4		u32 | 
|---|
| 113 | #define __pcpu_type_8		u64 | 
|---|
| 114 |  | 
|---|
| 115 | #define __pcpu_cast_1(val)	((u8)(((unsigned long) val) & 0xff)) | 
|---|
| 116 | #define __pcpu_cast_2(val)	((u16)(((unsigned long) val) & 0xffff)) | 
|---|
| 117 | #define __pcpu_cast_4(val)	((u32)(((unsigned long) val) & 0xffffffff)) | 
|---|
| 118 | #define __pcpu_cast_8(val)	((u64)(val)) | 
|---|
| 119 |  | 
|---|
| 120 | #define __pcpu_op_1(op)		op "b " | 
|---|
| 121 | #define __pcpu_op_2(op)		op "w " | 
|---|
| 122 | #define __pcpu_op_4(op)		op "l " | 
|---|
| 123 | #define __pcpu_op_8(op)		op "q " | 
|---|
| 124 |  | 
|---|
| 125 | #define __pcpu_reg_1(mod, x)	mod "q" (x) | 
|---|
| 126 | #define __pcpu_reg_2(mod, x)	mod "r" (x) | 
|---|
| 127 | #define __pcpu_reg_4(mod, x)	mod "r" (x) | 
|---|
| 128 | #define __pcpu_reg_8(mod, x)	mod "r" (x) | 
|---|
| 129 |  | 
|---|
| 130 | #define __pcpu_reg_imm_1(x)	"qi" (x) | 
|---|
| 131 | #define __pcpu_reg_imm_2(x)	"ri" (x) | 
|---|
| 132 | #define __pcpu_reg_imm_4(x)	"ri" (x) | 
|---|
| 133 | #define __pcpu_reg_imm_8(x)	"re" (x) | 
|---|
| 134 |  | 
|---|
| 135 | #ifdef CONFIG_USE_X86_SEG_SUPPORT | 
|---|
| 136 |  | 
|---|
| 137 | #define __raw_cpu_read(size, qual, pcp)					\ | 
|---|
| 138 | ({									\ | 
|---|
| 139 | *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp));		\ | 
|---|
| 140 | }) | 
|---|
| 141 |  | 
|---|
| 142 | #define __raw_cpu_write(size, qual, pcp, val)				\ | 
|---|
| 143 | do {									\ | 
|---|
| 144 | *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val);	\ | 
|---|
| 145 | } while (0) | 
|---|
| 146 |  | 
|---|
| 147 | #define __raw_cpu_read_const(pcp)	__raw_cpu_read(, , pcp) | 
|---|
| 148 |  | 
|---|
| 149 | #else /* !CONFIG_USE_X86_SEG_SUPPORT: */ | 
|---|
| 150 |  | 
|---|
| 151 | #define __raw_cpu_read(size, qual, _var)				\ | 
|---|
| 152 | ({									\ | 
|---|
| 153 | __pcpu_type_##size pfo_val__;					\ | 
|---|
| 154 | \ | 
|---|
| 155 | asm qual (__pcpu_op_##size("mov")				\ | 
|---|
| 156 | __percpu_arg([var]) ", %[val]"			\ | 
|---|
| 157 | : [val] __pcpu_reg_##size("=", pfo_val__)			\ | 
|---|
| 158 | : [var] "m" (__my_cpu_var(_var)));				\ | 
|---|
| 159 | \ | 
|---|
| 160 | (typeof(_var))(unsigned long) pfo_val__;			\ | 
|---|
| 161 | }) | 
|---|
| 162 |  | 
|---|
| 163 | #define __raw_cpu_write(size, qual, _var, _val)				\ | 
|---|
| 164 | do {									\ | 
|---|
| 165 | __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\ | 
|---|
| 166 | \ | 
|---|
| 167 | if (0) {		                                        \ | 
|---|
| 168 | TYPEOF_UNQUAL(_var) pto_tmp__;				\ | 
|---|
| 169 | pto_tmp__ = (_val);					\ | 
|---|
| 170 | (void)pto_tmp__;					\ | 
|---|
| 171 | }								\ | 
|---|
| 172 | asm qual (__pcpu_op_##size("mov") "%[val], "			\ | 
|---|
| 173 | __percpu_arg([var])					\ | 
|---|
| 174 | : [var] "=m" (__my_cpu_var(_var))				\ | 
|---|
| 175 | : [val] __pcpu_reg_imm_##size(pto_val__));			\ | 
|---|
| 176 | } while (0) | 
|---|
| 177 |  | 
|---|
| 178 | /* | 
|---|
| 179 | * The generic per-CPU infrastrucutre is not suitable for | 
|---|
| 180 | * reading const-qualified variables. | 
|---|
| 181 | */ | 
|---|
| 182 | #define __raw_cpu_read_const(pcp)	({ BUILD_BUG(); (typeof(pcp))0; }) | 
|---|
| 183 |  | 
|---|
| 184 | #endif /* CONFIG_USE_X86_SEG_SUPPORT */ | 
|---|
| 185 |  | 
|---|
| 186 | #define __raw_cpu_read_stable(size, _var)				\ | 
|---|
| 187 | ({									\ | 
|---|
| 188 | __pcpu_type_##size pfo_val__;					\ | 
|---|
| 189 | \ | 
|---|
| 190 | asm(__pcpu_op_##size("mov")					\ | 
|---|
| 191 | __force_percpu_arg(a[var]) ", %[val]"			\ | 
|---|
| 192 | : [val] __pcpu_reg_##size("=", pfo_val__)			\ | 
|---|
| 193 | : [var] "i" (&(_var)));					\ | 
|---|
| 194 | \ | 
|---|
| 195 | (typeof(_var))(unsigned long) pfo_val__;			\ | 
|---|
| 196 | }) | 
|---|
| 197 |  | 
|---|
| 198 | #define percpu_unary_op(size, qual, op, _var)				\ | 
|---|
| 199 | ({									\ | 
|---|
| 200 | asm qual (__pcpu_op_##size(op) __percpu_arg([var])		\ | 
|---|
| 201 | : [var] "+m" (__my_cpu_var(_var)));				\ | 
|---|
| 202 | }) | 
|---|
| 203 |  | 
|---|
| 204 | #define percpu_binary_op(size, qual, op, _var, _val)			\ | 
|---|
| 205 | do {									\ | 
|---|
| 206 | __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\ | 
|---|
| 207 | \ | 
|---|
| 208 | if (0) {		                                        \ | 
|---|
| 209 | TYPEOF_UNQUAL(_var) pto_tmp__;				\ | 
|---|
| 210 | pto_tmp__ = (_val);					\ | 
|---|
| 211 | (void)pto_tmp__;					\ | 
|---|
| 212 | }								\ | 
|---|
| 213 | asm qual (__pcpu_op_##size(op) "%[val], " __percpu_arg([var])	\ | 
|---|
| 214 | : [var] "+m" (__my_cpu_var(_var))				\ | 
|---|
| 215 | : [val] __pcpu_reg_imm_##size(pto_val__));			\ | 
|---|
| 216 | } while (0) | 
|---|
| 217 |  | 
|---|
| 218 | /* | 
|---|
| 219 | * Generate a per-CPU add to memory instruction and optimize code | 
|---|
| 220 | * if one is added or subtracted. | 
|---|
| 221 | */ | 
|---|
| 222 | #define percpu_add_op(size, qual, var, val)				\ | 
|---|
| 223 | do {									\ | 
|---|
| 224 | const int pao_ID__ =						\ | 
|---|
| 225 | (__builtin_constant_p(val) &&				\ | 
|---|
| 226 | ((val) == 1 ||					\ | 
|---|
| 227 | (val) == (typeof(val))-1)) ? (int)(val) : 0;	\ | 
|---|
| 228 | \ | 
|---|
| 229 | if (0) {							\ | 
|---|
| 230 | TYPEOF_UNQUAL(var) pao_tmp__;				\ | 
|---|
| 231 | pao_tmp__ = (val);					\ | 
|---|
| 232 | (void)pao_tmp__;					\ | 
|---|
| 233 | }								\ | 
|---|
| 234 | if (pao_ID__ == 1)						\ | 
|---|
| 235 | percpu_unary_op(size, qual, "inc", var);		\ | 
|---|
| 236 | else if (pao_ID__ == -1)					\ | 
|---|
| 237 | percpu_unary_op(size, qual, "dec", var);		\ | 
|---|
| 238 | else								\ | 
|---|
| 239 | percpu_binary_op(size, qual, "add", var, val);		\ | 
|---|
| 240 | } while (0) | 
|---|
| 241 |  | 
|---|
| 242 | /* | 
|---|
| 243 | * Add return operation | 
|---|
| 244 | */ | 
|---|
| 245 | #define percpu_add_return_op(size, qual, _var, _val)			\ | 
|---|
| 246 | ({									\ | 
|---|
| 247 | __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val);	\ | 
|---|
| 248 | \ | 
|---|
| 249 | asm qual (__pcpu_op_##size("xadd") "%[tmp], "			\ | 
|---|
| 250 | __percpu_arg([var])					\ | 
|---|
| 251 | : [tmp] __pcpu_reg_##size("+", paro_tmp__),		\ | 
|---|
| 252 | [var] "+m" (__my_cpu_var(_var))			\ | 
|---|
| 253 | : : "memory");					\ | 
|---|
| 254 | (typeof(_var))(unsigned long) (paro_tmp__ + _val);		\ | 
|---|
| 255 | }) | 
|---|
| 256 |  | 
|---|
| 257 | /* | 
|---|
| 258 | * raw_cpu_xchg() can use a load-store since | 
|---|
| 259 | * it is not required to be IRQ-safe. | 
|---|
| 260 | */ | 
|---|
| 261 | #define raw_percpu_xchg_op(_var, _nval)					\ | 
|---|
| 262 | ({									\ | 
|---|
| 263 | TYPEOF_UNQUAL(_var) pxo_old__ = raw_cpu_read(_var);		\ | 
|---|
| 264 | \ | 
|---|
| 265 | raw_cpu_write(_var, _nval);					\ | 
|---|
| 266 | \ | 
|---|
| 267 | pxo_old__;							\ | 
|---|
| 268 | }) | 
|---|
| 269 |  | 
|---|
| 270 | /* | 
|---|
| 271 | * this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix. | 
|---|
| 272 | * XCHG is expensive due to the implied LOCK prefix. The processor | 
|---|
| 273 | * cannot prefetch cachelines if XCHG is used. | 
|---|
| 274 | */ | 
|---|
| 275 | #define this_percpu_xchg_op(_var, _nval)				\ | 
|---|
| 276 | ({									\ | 
|---|
| 277 | TYPEOF_UNQUAL(_var) pxo_old__ = this_cpu_read(_var);		\ | 
|---|
| 278 | \ | 
|---|
| 279 | do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval));	\ | 
|---|
| 280 | \ | 
|---|
| 281 | pxo_old__;							\ | 
|---|
| 282 | }) | 
|---|
| 283 |  | 
|---|
| 284 | /* | 
|---|
| 285 | * CMPXCHG has no such implied lock semantics as a result it is much | 
|---|
| 286 | * more efficient for CPU-local operations. | 
|---|
| 287 | */ | 
|---|
| 288 | #define percpu_cmpxchg_op(size, qual, _var, _oval, _nval)		\ | 
|---|
| 289 | ({									\ | 
|---|
| 290 | __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval);	\ | 
|---|
| 291 | __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);	\ | 
|---|
| 292 | \ | 
|---|
| 293 | asm qual (__pcpu_op_##size("cmpxchg") "%[nval], "		\ | 
|---|
| 294 | __percpu_arg([var])					\ | 
|---|
| 295 | : [oval] "+a" (pco_old__),				\ | 
|---|
| 296 | [var] "+m" (__my_cpu_var(_var))			\ | 
|---|
| 297 | : [nval] __pcpu_reg_##size(, pco_new__)		\ | 
|---|
| 298 | : "memory");						\ | 
|---|
| 299 | \ | 
|---|
| 300 | (typeof(_var))(unsigned long) pco_old__;			\ | 
|---|
| 301 | }) | 
|---|
| 302 |  | 
|---|
| 303 | #define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval)		\ | 
|---|
| 304 | ({									\ | 
|---|
| 305 | bool success;							\ | 
|---|
| 306 | __pcpu_type_##size *pco_oval__ = (__pcpu_type_##size *)(_ovalp); \ | 
|---|
| 307 | __pcpu_type_##size pco_old__ = *pco_oval__;			\ | 
|---|
| 308 | __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);	\ | 
|---|
| 309 | \ | 
|---|
| 310 | asm qual (__pcpu_op_##size("cmpxchg") "%[nval], "		\ | 
|---|
| 311 | __percpu_arg([var])					\ | 
|---|
| 312 | : "=@ccz" (success),					\ | 
|---|
| 313 | [oval] "+a" (pco_old__),				\ | 
|---|
| 314 | [var] "+m" (__my_cpu_var(_var))			\ | 
|---|
| 315 | : [nval] __pcpu_reg_##size(, pco_new__)		\ | 
|---|
| 316 | : "memory");						\ | 
|---|
| 317 | if (unlikely(!success))						\ | 
|---|
| 318 | *pco_oval__ = pco_old__;				\ | 
|---|
| 319 | \ | 
|---|
| 320 | likely(success);						\ | 
|---|
| 321 | }) | 
|---|
| 322 |  | 
|---|
| 323 | #if defined(CONFIG_X86_32) && !defined(CONFIG_UML) | 
|---|
| 324 |  | 
|---|
| 325 | #define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval)		\ | 
|---|
| 326 | ({									\ | 
|---|
| 327 | union {								\ | 
|---|
| 328 | u64 var;						\ | 
|---|
| 329 | struct {						\ | 
|---|
| 330 | u32 low, high;					\ | 
|---|
| 331 | };							\ | 
|---|
| 332 | } old__, new__;							\ | 
|---|
| 333 | \ | 
|---|
| 334 | old__.var = _oval;						\ | 
|---|
| 335 | new__.var = _nval;						\ | 
|---|
| 336 | \ | 
|---|
| 337 | asm_inline qual (						\ | 
|---|
| 338 | ALTERNATIVE("call this_cpu_cmpxchg8b_emu",		\ | 
|---|
| 339 | "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ | 
|---|
| 340 | : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)),	\ | 
|---|
| 341 | "+a" (old__.low), "+d" (old__.high))	\ | 
|---|
| 342 | : "b" (new__.low), "c" (new__.high),			\ | 
|---|
| 343 | "S" (&(_var))						\ | 
|---|
| 344 | : "memory");						\ | 
|---|
| 345 | \ | 
|---|
| 346 | old__.var;							\ | 
|---|
| 347 | }) | 
|---|
| 348 |  | 
|---|
| 349 | #define raw_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg64_op(8,         , pcp, oval, nval) | 
|---|
| 350 | #define this_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg64_op(8, volatile, pcp, oval, nval) | 
|---|
| 351 |  | 
|---|
| 352 | #define percpu_try_cmpxchg64_op(size, qual, _var, _ovalp, _nval)	\ | 
|---|
| 353 | ({									\ | 
|---|
| 354 | bool success;							\ | 
|---|
| 355 | u64 *_oval = (u64 *)(_ovalp);					\ | 
|---|
| 356 | union {								\ | 
|---|
| 357 | u64 var;						\ | 
|---|
| 358 | struct {						\ | 
|---|
| 359 | u32 low, high;					\ | 
|---|
| 360 | };							\ | 
|---|
| 361 | } old__, new__;							\ | 
|---|
| 362 | \ | 
|---|
| 363 | old__.var = *_oval;						\ | 
|---|
| 364 | new__.var = _nval;						\ | 
|---|
| 365 | \ | 
|---|
| 366 | asm_inline qual (						\ | 
|---|
| 367 | ALTERNATIVE("call this_cpu_cmpxchg8b_emu",		\ | 
|---|
| 368 | "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ | 
|---|
| 369 | : ALT_OUTPUT_SP("=@ccz" (success),			\ | 
|---|
| 370 | [var] "+m" (__my_cpu_var(_var)),	\ | 
|---|
| 371 | "+a" (old__.low), "+d" (old__.high))	\ | 
|---|
| 372 | : "b" (new__.low), "c" (new__.high),			\ | 
|---|
| 373 | "S" (&(_var))						\ | 
|---|
| 374 | : "memory");						\ | 
|---|
| 375 | if (unlikely(!success))						\ | 
|---|
| 376 | *_oval = old__.var;					\ | 
|---|
| 377 | \ | 
|---|
| 378 | likely(success);						\ | 
|---|
| 379 | }) | 
|---|
| 380 |  | 
|---|
| 381 | #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval)		percpu_try_cmpxchg64_op(8,         , pcp, ovalp, nval) | 
|---|
| 382 | #define this_cpu_try_cmpxchg64(pcp, ovalp, nval)	percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval) | 
|---|
| 383 |  | 
|---|
| 384 | #endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */ | 
|---|
| 385 |  | 
|---|
| 386 | #ifdef CONFIG_X86_64 | 
|---|
| 387 | #define raw_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg_op(8,         , pcp, oval, nval); | 
|---|
| 388 | #define this_cpu_cmpxchg64(pcp, oval, nval)		percpu_cmpxchg_op(8, volatile, pcp, oval, nval); | 
|---|
| 389 |  | 
|---|
| 390 | #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval)		percpu_try_cmpxchg_op(8,         , pcp, ovalp, nval); | 
|---|
| 391 | #define this_cpu_try_cmpxchg64(pcp, ovalp, nval)	percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval); | 
|---|
| 392 |  | 
|---|
| 393 | #define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval)		\ | 
|---|
| 394 | ({									\ | 
|---|
| 395 | union {								\ | 
|---|
| 396 | u128 var;						\ | 
|---|
| 397 | struct {						\ | 
|---|
| 398 | u64 low, high;					\ | 
|---|
| 399 | };							\ | 
|---|
| 400 | } old__, new__;							\ | 
|---|
| 401 | \ | 
|---|
| 402 | old__.var = _oval;						\ | 
|---|
| 403 | new__.var = _nval;						\ | 
|---|
| 404 | \ | 
|---|
| 405 | asm_inline qual (						\ | 
|---|
| 406 | ALTERNATIVE("call this_cpu_cmpxchg16b_emu",		\ | 
|---|
| 407 | "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ | 
|---|
| 408 | : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)),	\ | 
|---|
| 409 | "+a" (old__.low), "+d" (old__.high))	\ | 
|---|
| 410 | : "b" (new__.low), "c" (new__.high),			\ | 
|---|
| 411 | "S" (&(_var))						\ | 
|---|
| 412 | : "memory");						\ | 
|---|
| 413 | \ | 
|---|
| 414 | old__.var;							\ | 
|---|
| 415 | }) | 
|---|
| 416 |  | 
|---|
| 417 | #define raw_cpu_cmpxchg128(pcp, oval, nval)		percpu_cmpxchg128_op(16,         , pcp, oval, nval) | 
|---|
| 418 | #define this_cpu_cmpxchg128(pcp, oval, nval)		percpu_cmpxchg128_op(16, volatile, pcp, oval, nval) | 
|---|
| 419 |  | 
|---|
| 420 | #define percpu_try_cmpxchg128_op(size, qual, _var, _ovalp, _nval)	\ | 
|---|
| 421 | ({									\ | 
|---|
| 422 | bool success;							\ | 
|---|
| 423 | u128 *_oval = (u128 *)(_ovalp);					\ | 
|---|
| 424 | union {								\ | 
|---|
| 425 | u128 var;						\ | 
|---|
| 426 | struct {						\ | 
|---|
| 427 | u64 low, high;					\ | 
|---|
| 428 | };							\ | 
|---|
| 429 | } old__, new__;							\ | 
|---|
| 430 | \ | 
|---|
| 431 | old__.var = *_oval;						\ | 
|---|
| 432 | new__.var = _nval;						\ | 
|---|
| 433 | \ | 
|---|
| 434 | asm_inline qual (						\ | 
|---|
| 435 | ALTERNATIVE("call this_cpu_cmpxchg16b_emu",		\ | 
|---|
| 436 | "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ | 
|---|
| 437 | : ALT_OUTPUT_SP("=@ccz" (success),			\ | 
|---|
| 438 | [var] "+m" (__my_cpu_var(_var)),	\ | 
|---|
| 439 | "+a" (old__.low), "+d" (old__.high))	\ | 
|---|
| 440 | : "b" (new__.low), "c" (new__.high),			\ | 
|---|
| 441 | "S" (&(_var))						\ | 
|---|
| 442 | : "memory");						\ | 
|---|
| 443 | if (unlikely(!success))						\ | 
|---|
| 444 | *_oval = old__.var;					\ | 
|---|
| 445 | \ | 
|---|
| 446 | likely(success);						\ | 
|---|
| 447 | }) | 
|---|
| 448 |  | 
|---|
| 449 | #define raw_cpu_try_cmpxchg128(pcp, ovalp, nval)	percpu_try_cmpxchg128_op(16,         , pcp, ovalp, nval) | 
|---|
| 450 | #define this_cpu_try_cmpxchg128(pcp, ovalp, nval)	percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval) | 
|---|
| 451 |  | 
|---|
| 452 | #endif /* CONFIG_X86_64 */ | 
|---|
| 453 |  | 
|---|
| 454 | #define raw_cpu_read_1(pcp)				__raw_cpu_read(1, , pcp) | 
|---|
| 455 | #define raw_cpu_read_2(pcp)				__raw_cpu_read(2, , pcp) | 
|---|
| 456 | #define raw_cpu_read_4(pcp)				__raw_cpu_read(4, , pcp) | 
|---|
| 457 | #define raw_cpu_write_1(pcp, val)			__raw_cpu_write(1, , pcp, val) | 
|---|
| 458 | #define raw_cpu_write_2(pcp, val)			__raw_cpu_write(2, , pcp, val) | 
|---|
| 459 | #define raw_cpu_write_4(pcp, val)			__raw_cpu_write(4, , pcp, val) | 
|---|
| 460 |  | 
|---|
| 461 | #define this_cpu_read_1(pcp)				__raw_cpu_read(1, volatile, pcp) | 
|---|
| 462 | #define this_cpu_read_2(pcp)				__raw_cpu_read(2, volatile, pcp) | 
|---|
| 463 | #define this_cpu_read_4(pcp)				__raw_cpu_read(4, volatile, pcp) | 
|---|
| 464 | #define this_cpu_write_1(pcp, val)			__raw_cpu_write(1, volatile, pcp, val) | 
|---|
| 465 | #define this_cpu_write_2(pcp, val)			__raw_cpu_write(2, volatile, pcp, val) | 
|---|
| 466 | #define this_cpu_write_4(pcp, val)			__raw_cpu_write(4, volatile, pcp, val) | 
|---|
| 467 |  | 
|---|
| 468 | #define this_cpu_read_stable_1(pcp)			__raw_cpu_read_stable(1, pcp) | 
|---|
| 469 | #define this_cpu_read_stable_2(pcp)			__raw_cpu_read_stable(2, pcp) | 
|---|
| 470 | #define this_cpu_read_stable_4(pcp)			__raw_cpu_read_stable(4, pcp) | 
|---|
| 471 |  | 
|---|
| 472 | #define raw_cpu_add_1(pcp, val)				percpu_add_op(1, , (pcp), val) | 
|---|
| 473 | #define raw_cpu_add_2(pcp, val)				percpu_add_op(2, , (pcp), val) | 
|---|
| 474 | #define raw_cpu_add_4(pcp, val)				percpu_add_op(4, , (pcp), val) | 
|---|
| 475 | #define raw_cpu_and_1(pcp, val)				percpu_binary_op(1, , "and", (pcp), val) | 
|---|
| 476 | #define raw_cpu_and_2(pcp, val)				percpu_binary_op(2, , "and", (pcp), val) | 
|---|
| 477 | #define raw_cpu_and_4(pcp, val)				percpu_binary_op(4, , "and", (pcp), val) | 
|---|
| 478 | #define raw_cpu_or_1(pcp, val)				percpu_binary_op(1, , "or", (pcp), val) | 
|---|
| 479 | #define raw_cpu_or_2(pcp, val)				percpu_binary_op(2, , "or", (pcp), val) | 
|---|
| 480 | #define raw_cpu_or_4(pcp, val)				percpu_binary_op(4, , "or", (pcp), val) | 
|---|
| 481 | #define raw_cpu_xchg_1(pcp, val)			raw_percpu_xchg_op(pcp, val) | 
|---|
| 482 | #define raw_cpu_xchg_2(pcp, val)			raw_percpu_xchg_op(pcp, val) | 
|---|
| 483 | #define raw_cpu_xchg_4(pcp, val)			raw_percpu_xchg_op(pcp, val) | 
|---|
| 484 |  | 
|---|
| 485 | #define this_cpu_add_1(pcp, val)			percpu_add_op(1, volatile, (pcp), val) | 
|---|
| 486 | #define this_cpu_add_2(pcp, val)			percpu_add_op(2, volatile, (pcp), val) | 
|---|
| 487 | #define this_cpu_add_4(pcp, val)			percpu_add_op(4, volatile, (pcp), val) | 
|---|
| 488 | #define this_cpu_and_1(pcp, val)			percpu_binary_op(1, volatile, "and", (pcp), val) | 
|---|
| 489 | #define this_cpu_and_2(pcp, val)			percpu_binary_op(2, volatile, "and", (pcp), val) | 
|---|
| 490 | #define this_cpu_and_4(pcp, val)			percpu_binary_op(4, volatile, "and", (pcp), val) | 
|---|
| 491 | #define this_cpu_or_1(pcp, val)				percpu_binary_op(1, volatile, "or", (pcp), val) | 
|---|
| 492 | #define this_cpu_or_2(pcp, val)				percpu_binary_op(2, volatile, "or", (pcp), val) | 
|---|
| 493 | #define this_cpu_or_4(pcp, val)				percpu_binary_op(4, volatile, "or", (pcp), val) | 
|---|
| 494 | #define this_cpu_xchg_1(pcp, nval)			this_percpu_xchg_op(pcp, nval) | 
|---|
| 495 | #define this_cpu_xchg_2(pcp, nval)			this_percpu_xchg_op(pcp, nval) | 
|---|
| 496 | #define this_cpu_xchg_4(pcp, nval)			this_percpu_xchg_op(pcp, nval) | 
|---|
| 497 |  | 
|---|
| 498 | #define raw_cpu_add_return_1(pcp, val)			percpu_add_return_op(1, , pcp, val) | 
|---|
| 499 | #define raw_cpu_add_return_2(pcp, val)			percpu_add_return_op(2, , pcp, val) | 
|---|
| 500 | #define raw_cpu_add_return_4(pcp, val)			percpu_add_return_op(4, , pcp, val) | 
|---|
| 501 | #define raw_cpu_cmpxchg_1(pcp, oval, nval)		percpu_cmpxchg_op(1, , pcp, oval, nval) | 
|---|
| 502 | #define raw_cpu_cmpxchg_2(pcp, oval, nval)		percpu_cmpxchg_op(2, , pcp, oval, nval) | 
|---|
| 503 | #define raw_cpu_cmpxchg_4(pcp, oval, nval)		percpu_cmpxchg_op(4, , pcp, oval, nval) | 
|---|
| 504 | #define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval)		percpu_try_cmpxchg_op(1, , pcp, ovalp, nval) | 
|---|
| 505 | #define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval)		percpu_try_cmpxchg_op(2, , pcp, ovalp, nval) | 
|---|
| 506 | #define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval)		percpu_try_cmpxchg_op(4, , pcp, ovalp, nval) | 
|---|
| 507 |  | 
|---|
| 508 | #define this_cpu_add_return_1(pcp, val)			percpu_add_return_op(1, volatile, pcp, val) | 
|---|
| 509 | #define this_cpu_add_return_2(pcp, val)			percpu_add_return_op(2, volatile, pcp, val) | 
|---|
| 510 | #define this_cpu_add_return_4(pcp, val)			percpu_add_return_op(4, volatile, pcp, val) | 
|---|
| 511 | #define this_cpu_cmpxchg_1(pcp, oval, nval)		percpu_cmpxchg_op(1, volatile, pcp, oval, nval) | 
|---|
| 512 | #define this_cpu_cmpxchg_2(pcp, oval, nval)		percpu_cmpxchg_op(2, volatile, pcp, oval, nval) | 
|---|
| 513 | #define this_cpu_cmpxchg_4(pcp, oval, nval)		percpu_cmpxchg_op(4, volatile, pcp, oval, nval) | 
|---|
| 514 | #define this_cpu_try_cmpxchg_1(pcp, ovalp, nval)	percpu_try_cmpxchg_op(1, volatile, pcp, ovalp, nval) | 
|---|
| 515 | #define this_cpu_try_cmpxchg_2(pcp, ovalp, nval)	percpu_try_cmpxchg_op(2, volatile, pcp, ovalp, nval) | 
|---|
| 516 | #define this_cpu_try_cmpxchg_4(pcp, ovalp, nval)	percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval) | 
|---|
| 517 |  | 
|---|
| 518 | /* | 
|---|
| 519 | * Per-CPU atomic 64-bit operations are only available under 64-bit kernels. | 
|---|
| 520 | * 32-bit kernels must fall back to generic operations. | 
|---|
| 521 | */ | 
|---|
| 522 | #ifdef CONFIG_X86_64 | 
|---|
| 523 |  | 
|---|
| 524 | #define raw_cpu_read_8(pcp)				__raw_cpu_read(8, , pcp) | 
|---|
| 525 | #define raw_cpu_write_8(pcp, val)			__raw_cpu_write(8, , pcp, val) | 
|---|
| 526 |  | 
|---|
| 527 | #define this_cpu_read_8(pcp)				__raw_cpu_read(8, volatile, pcp) | 
|---|
| 528 | #define this_cpu_write_8(pcp, val)			__raw_cpu_write(8, volatile, pcp, val) | 
|---|
| 529 |  | 
|---|
| 530 | #define this_cpu_read_stable_8(pcp)			__raw_cpu_read_stable(8, pcp) | 
|---|
| 531 |  | 
|---|
| 532 | #define raw_cpu_add_8(pcp, val)				percpu_add_op(8, , (pcp), val) | 
|---|
| 533 | #define raw_cpu_and_8(pcp, val)				percpu_binary_op(8, , "and", (pcp), val) | 
|---|
| 534 | #define raw_cpu_or_8(pcp, val)				percpu_binary_op(8, , "or", (pcp), val) | 
|---|
| 535 | #define raw_cpu_add_return_8(pcp, val)			percpu_add_return_op(8, , pcp, val) | 
|---|
| 536 | #define raw_cpu_xchg_8(pcp, nval)			raw_percpu_xchg_op(pcp, nval) | 
|---|
| 537 | #define raw_cpu_cmpxchg_8(pcp, oval, nval)		percpu_cmpxchg_op(8, , pcp, oval, nval) | 
|---|
| 538 | #define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval)		percpu_try_cmpxchg_op(8, , pcp, ovalp, nval) | 
|---|
| 539 |  | 
|---|
| 540 | #define this_cpu_add_8(pcp, val)			percpu_add_op(8, volatile, (pcp), val) | 
|---|
| 541 | #define this_cpu_and_8(pcp, val)			percpu_binary_op(8, volatile, "and", (pcp), val) | 
|---|
| 542 | #define this_cpu_or_8(pcp, val)				percpu_binary_op(8, volatile, "or", (pcp), val) | 
|---|
| 543 | #define this_cpu_add_return_8(pcp, val)			percpu_add_return_op(8, volatile, pcp, val) | 
|---|
| 544 | #define this_cpu_xchg_8(pcp, nval)			this_percpu_xchg_op(pcp, nval) | 
|---|
| 545 | #define this_cpu_cmpxchg_8(pcp, oval, nval)		percpu_cmpxchg_op(8, volatile, pcp, oval, nval) | 
|---|
| 546 | #define this_cpu_try_cmpxchg_8(pcp, ovalp, nval)	percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval) | 
|---|
| 547 |  | 
|---|
| 548 | #define raw_cpu_read_long(pcp)				raw_cpu_read_8(pcp) | 
|---|
| 549 |  | 
|---|
| 550 | #else /* !CONFIG_X86_64: */ | 
|---|
| 551 |  | 
|---|
| 552 | /* There is no generic 64-bit read stable operation for 32-bit targets. */ | 
|---|
| 553 | #define this_cpu_read_stable_8(pcp)			({ BUILD_BUG(); (typeof(pcp))0; }) | 
|---|
| 554 |  | 
|---|
| 555 | #define raw_cpu_read_long(pcp)				raw_cpu_read_4(pcp) | 
|---|
| 556 |  | 
|---|
| 557 | #endif /* CONFIG_X86_64 */ | 
|---|
| 558 |  | 
|---|
| 559 | #define this_cpu_read_const(pcp)			__raw_cpu_read_const(pcp) | 
|---|
| 560 |  | 
|---|
| 561 | /* | 
|---|
| 562 | * this_cpu_read() makes the compiler load the per-CPU variable every time | 
|---|
| 563 | * it is accessed while this_cpu_read_stable() allows the value to be cached. | 
|---|
| 564 | * this_cpu_read_stable() is more efficient and can be used if its value | 
|---|
| 565 | * is guaranteed to be valid across CPUs.  The current users include | 
|---|
| 566 | * current_task and cpu_current_top_of_stack, both of which are | 
|---|
| 567 | * actually per-thread variables implemented as per-CPU variables and | 
|---|
| 568 | * thus stable for the duration of the respective task. | 
|---|
| 569 | */ | 
|---|
| 570 | #define this_cpu_read_stable(pcp)			__pcpu_size_call_return(this_cpu_read_stable_, pcp) | 
|---|
| 571 |  | 
|---|
| 572 | #define x86_this_cpu_constant_test_bit(_nr, _var)			\ | 
|---|
| 573 | ({									\ | 
|---|
| 574 | unsigned long __percpu *addr__ =				\ | 
|---|
| 575 | (unsigned long __percpu *)&(_var) + ((_nr) / BITS_PER_LONG); \ | 
|---|
| 576 | \ | 
|---|
| 577 | !!((1UL << ((_nr) % BITS_PER_LONG)) & raw_cpu_read(*addr__));	\ | 
|---|
| 578 | }) | 
|---|
| 579 |  | 
|---|
| 580 | #define x86_this_cpu_variable_test_bit(_nr, _var)			\ | 
|---|
| 581 | ({									\ | 
|---|
| 582 | bool oldbit;							\ | 
|---|
| 583 | \ | 
|---|
| 584 | asm volatile("btl %[nr], " __percpu_arg([var])			\ | 
|---|
| 585 | : "=@ccc" (oldbit)					\ | 
|---|
| 586 | : [var] "m" (__my_cpu_var(_var)),			\ | 
|---|
| 587 | [nr] "rI" (_nr));				\ | 
|---|
| 588 | oldbit;								\ | 
|---|
| 589 | }) | 
|---|
| 590 |  | 
|---|
| 591 | #define x86_this_cpu_test_bit(_nr, _var)				\ | 
|---|
| 592 | (__builtin_constant_p(_nr)					\ | 
|---|
| 593 | ? x86_this_cpu_constant_test_bit(_nr, _var)			\ | 
|---|
| 594 | : x86_this_cpu_variable_test_bit(_nr, _var)) | 
|---|
| 595 |  | 
|---|
| 596 |  | 
|---|
| 597 | #include <asm-generic/percpu.h> | 
|---|
| 598 |  | 
|---|
| 599 | /* We can use this directly for local CPU (faster). */ | 
|---|
| 600 | DECLARE_PER_CPU_CACHE_HOT(unsigned long, this_cpu_off); | 
|---|
| 601 |  | 
|---|
| 602 | #endif /* !__ASSEMBLER__ */ | 
|---|
| 603 |  | 
|---|
| 604 | #ifdef CONFIG_SMP | 
|---|
| 605 |  | 
|---|
| 606 | /* | 
|---|
| 607 | * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu | 
|---|
| 608 | * variables that are initialized and accessed before there are per_cpu | 
|---|
| 609 | * areas allocated. | 
|---|
| 610 | */ | 
|---|
| 611 |  | 
|---|
| 612 | #define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\ | 
|---|
| 613 | DEFINE_PER_CPU(_type, _name) = _initvalue;			\ | 
|---|
| 614 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\ | 
|---|
| 615 | { [0 ... NR_CPUS-1] = _initvalue };	\ | 
|---|
| 616 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map | 
|---|
| 617 |  | 
|---|
| 618 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\ | 
|---|
| 619 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue;		\ | 
|---|
| 620 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\ | 
|---|
| 621 | { [0 ... NR_CPUS-1] = _initvalue };	\ | 
|---|
| 622 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map | 
|---|
| 623 |  | 
|---|
| 624 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)				\ | 
|---|
| 625 | EXPORT_PER_CPU_SYMBOL(_name) | 
|---|
| 626 |  | 
|---|
| 627 | #define DECLARE_EARLY_PER_CPU(_type, _name)				\ | 
|---|
| 628 | DECLARE_PER_CPU(_type, _name);					\ | 
|---|
| 629 | extern __typeof__(_type) *_name##_early_ptr;			\ | 
|---|
| 630 | extern __typeof__(_type)  _name##_early_map[] | 
|---|
| 631 |  | 
|---|
| 632 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)			\ | 
|---|
| 633 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name);			\ | 
|---|
| 634 | extern __typeof__(_type) *_name##_early_ptr;			\ | 
|---|
| 635 | extern __typeof__(_type)  _name##_early_map[] | 
|---|
| 636 |  | 
|---|
| 637 | #define	early_per_cpu_ptr(_name)			(_name##_early_ptr) | 
|---|
| 638 | #define	early_per_cpu_map(_name, _idx)			(_name##_early_map[_idx]) | 
|---|
| 639 |  | 
|---|
| 640 | #define	early_per_cpu(_name, _cpu)					\ | 
|---|
| 641 | *(early_per_cpu_ptr(_name) ?					\ | 
|---|
| 642 | &early_per_cpu_ptr(_name)[_cpu] :			\ | 
|---|
| 643 | &per_cpu(_name, _cpu)) | 
|---|
| 644 |  | 
|---|
| 645 | #else /* !CONFIG_SMP: */ | 
|---|
| 646 | #define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\ | 
|---|
| 647 | DEFINE_PER_CPU(_type, _name) = _initvalue | 
|---|
| 648 |  | 
|---|
| 649 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\ | 
|---|
| 650 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue | 
|---|
| 651 |  | 
|---|
| 652 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)				\ | 
|---|
| 653 | EXPORT_PER_CPU_SYMBOL(_name) | 
|---|
| 654 |  | 
|---|
| 655 | #define DECLARE_EARLY_PER_CPU(_type, _name)				\ | 
|---|
| 656 | DECLARE_PER_CPU(_type, _name) | 
|---|
| 657 |  | 
|---|
| 658 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)			\ | 
|---|
| 659 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name) | 
|---|
| 660 |  | 
|---|
| 661 | #define	early_per_cpu(_name, _cpu)			per_cpu(_name, _cpu) | 
|---|
| 662 | #define	early_per_cpu_ptr(_name)			NULL | 
|---|
| 663 | /* no early_per_cpu_map() */ | 
|---|
| 664 |  | 
|---|
| 665 | #endif /* !CONFIG_SMP */ | 
|---|
| 666 |  | 
|---|
| 667 | #endif /* _ASM_X86_PERCPU_H */ | 
|---|
| 668 |  | 
|---|