| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ | 
|---|
| 2 | /* | 
|---|
| 3 | * linux/percpu-defs.h - basic definitions for percpu areas | 
|---|
| 4 | * | 
|---|
| 5 | * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. | 
|---|
| 6 | * | 
|---|
| 7 | * This file is separate from linux/percpu.h to avoid cyclic inclusion | 
|---|
| 8 | * dependency from arch header files.  Only to be included from | 
|---|
| 9 | * asm/percpu.h. | 
|---|
| 10 | * | 
|---|
| 11 | * This file includes macros necessary to declare percpu sections and | 
|---|
| 12 | * variables, and definitions of percpu accessors and operations.  It | 
|---|
| 13 | * should provide enough percpu features to arch header files even when | 
|---|
| 14 | * they can only include asm/percpu.h to avoid cyclic inclusion dependency. | 
|---|
| 15 | */ | 
|---|
| 16 |  | 
|---|
| 17 | #ifndef _LINUX_PERCPU_DEFS_H | 
|---|
| 18 | #define _LINUX_PERCPU_DEFS_H | 
|---|
| 19 |  | 
|---|
| 20 | #ifdef CONFIG_SMP | 
|---|
| 21 |  | 
|---|
| 22 | #ifdef MODULE | 
|---|
| 23 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | 
|---|
| 24 | #define PER_CPU_ALIGNED_SECTION "" | 
|---|
| 25 | #else | 
|---|
| 26 | #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" | 
|---|
| 27 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" | 
|---|
| 28 | #endif | 
|---|
| 29 |  | 
|---|
| 30 | #else | 
|---|
| 31 |  | 
|---|
| 32 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | 
|---|
| 33 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" | 
|---|
| 34 |  | 
|---|
| 35 | #endif | 
|---|
| 36 |  | 
|---|
| 37 | /* | 
|---|
| 38 | * Base implementations of per-CPU variable declarations and definitions, where | 
|---|
| 39 | * the section in which the variable is to be placed is provided by the | 
|---|
| 40 | * 'sec' argument.  This may be used to affect the parameters governing the | 
|---|
| 41 | * variable's storage. | 
|---|
| 42 | * | 
|---|
| 43 | * NOTE!  The sections for the DECLARE and for the DEFINE must match, lest | 
|---|
| 44 | * linkage errors occur due the compiler generating the wrong code to access | 
|---|
| 45 | * that section. | 
|---|
| 46 | */ | 
|---|
| 47 | #define __PCPU_ATTRS(sec)						\ | 
|---|
| 48 | __percpu __attribute__((section(PER_CPU_BASE_SECTION sec)))	\ | 
|---|
| 49 | PER_CPU_ATTRIBUTES | 
|---|
| 50 |  | 
|---|
| 51 | #define __PCPU_DUMMY_ATTRS						\ | 
|---|
| 52 | __section(".discard") __attribute__((unused)) | 
|---|
| 53 |  | 
|---|
| 54 | /* | 
|---|
| 55 | * s390 and alpha modules require percpu variables to be defined as | 
|---|
| 56 | * weak to force the compiler to generate GOT based external | 
|---|
| 57 | * references for them.  This is necessary because percpu sections | 
|---|
| 58 | * will be located outside of the usually addressable area. | 
|---|
| 59 | * | 
|---|
| 60 | * This definition puts the following two extra restrictions when | 
|---|
| 61 | * defining percpu variables. | 
|---|
| 62 | * | 
|---|
| 63 | * 1. The symbol must be globally unique, even the static ones. | 
|---|
| 64 | * 2. Static percpu variables cannot be defined inside a function. | 
|---|
| 65 | * | 
|---|
| 66 | * Archs which need weak percpu definitions should set | 
|---|
| 67 | * CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU when necessary. | 
|---|
| 68 | * | 
|---|
| 69 | * To ensure that the generic code observes the above two | 
|---|
| 70 | * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak | 
|---|
| 71 | * definition is used for all cases. | 
|---|
| 72 | */ | 
|---|
| 73 | #if (defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)) || \ | 
|---|
| 74 | defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) | 
|---|
| 75 | /* | 
|---|
| 76 | * __pcpu_scope_* dummy variable is used to enforce scope.  It | 
|---|
| 77 | * receives the static modifier when it's used in front of | 
|---|
| 78 | * DEFINE_PER_CPU() and will trigger build failure if | 
|---|
| 79 | * DECLARE_PER_CPU() is used for the same variable. | 
|---|
| 80 | * | 
|---|
| 81 | * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness | 
|---|
| 82 | * such that hidden weak symbol collision, which will cause unrelated | 
|---|
| 83 | * variables to share the same address, can be detected during build. | 
|---|
| 84 | */ | 
|---|
| 85 | #define DECLARE_PER_CPU_SECTION(type, name, sec)			\ | 
|---|
| 86 | extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;		\ | 
|---|
| 87 | extern __PCPU_ATTRS(sec) __typeof__(type) name | 
|---|
| 88 |  | 
|---|
| 89 | #define DEFINE_PER_CPU_SECTION(type, name, sec)				\ | 
|---|
| 90 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;			\ | 
|---|
| 91 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;		\ | 
|---|
| 92 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;			\ | 
|---|
| 93 | extern __PCPU_ATTRS(sec) __typeof__(type) name;			\ | 
|---|
| 94 | __PCPU_ATTRS(sec) __weak __typeof__(type) name | 
|---|
| 95 | #else | 
|---|
| 96 | /* | 
|---|
| 97 | * Normal declaration and definition macros. | 
|---|
| 98 | */ | 
|---|
| 99 | #define DECLARE_PER_CPU_SECTION(type, name, sec)			\ | 
|---|
| 100 | extern __PCPU_ATTRS(sec) __typeof__(type) name | 
|---|
| 101 |  | 
|---|
| 102 | #define DEFINE_PER_CPU_SECTION(type, name, sec)				\ | 
|---|
| 103 | __PCPU_ATTRS(sec) __typeof__(type) name | 
|---|
| 104 | #endif | 
|---|
| 105 |  | 
|---|
| 106 | /* | 
|---|
| 107 | * Variant on the per-CPU variable declaration/definition theme used for | 
|---|
| 108 | * ordinary per-CPU variables. | 
|---|
| 109 | */ | 
|---|
| 110 | #define DECLARE_PER_CPU(type, name)					\ | 
|---|
| 111 | DECLARE_PER_CPU_SECTION(type, name, "") | 
|---|
| 112 |  | 
|---|
| 113 | #define DEFINE_PER_CPU(type, name)					\ | 
|---|
| 114 | DEFINE_PER_CPU_SECTION(type, name, "") | 
|---|
| 115 |  | 
|---|
| 116 | /* | 
|---|
| 117 | * Declaration/definition used for per-CPU variables that are frequently | 
|---|
| 118 | * accessed and should be in a single cacheline. | 
|---|
| 119 | * | 
|---|
| 120 | * For use only by architecture and core code.  Only use scalar or pointer | 
|---|
| 121 | * types to maximize density. | 
|---|
| 122 | */ | 
|---|
| 123 | #define DECLARE_PER_CPU_CACHE_HOT(type, name)				\ | 
|---|
| 124 | DECLARE_PER_CPU_SECTION(type, name, "..hot.." #name) | 
|---|
| 125 |  | 
|---|
| 126 | #define DEFINE_PER_CPU_CACHE_HOT(type, name)				\ | 
|---|
| 127 | DEFINE_PER_CPU_SECTION(type, name, "..hot.." #name) | 
|---|
| 128 |  | 
|---|
| 129 | /* | 
|---|
| 130 | * Declaration/definition used for per-CPU variables that must be cacheline | 
|---|
| 131 | * aligned under SMP conditions so that, whilst a particular instance of the | 
|---|
| 132 | * data corresponds to a particular CPU, inefficiencies due to direct access by | 
|---|
| 133 | * other CPUs are reduced by preventing the data from unnecessarily spanning | 
|---|
| 134 | * cachelines. | 
|---|
| 135 | * | 
|---|
| 136 | * An example of this would be statistical data, where each CPU's set of data | 
|---|
| 137 | * is updated by that CPU alone, but the data from across all CPUs is collated | 
|---|
| 138 | * by a CPU processing a read from a proc file. | 
|---|
| 139 | */ | 
|---|
| 140 | #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name)			\ | 
|---|
| 141 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | 
|---|
| 142 | ____cacheline_aligned_in_smp | 
|---|
| 143 |  | 
|---|
| 144 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)			\ | 
|---|
| 145 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | 
|---|
| 146 | ____cacheline_aligned_in_smp | 
|---|
| 147 |  | 
|---|
| 148 | #define DECLARE_PER_CPU_ALIGNED(type, name)				\ | 
|---|
| 149 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)	\ | 
|---|
| 150 | ____cacheline_aligned | 
|---|
| 151 |  | 
|---|
| 152 | #define DEFINE_PER_CPU_ALIGNED(type, name)				\ | 
|---|
| 153 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION)	\ | 
|---|
| 154 | ____cacheline_aligned | 
|---|
| 155 |  | 
|---|
| 156 | /* | 
|---|
| 157 | * Declaration/definition used for per-CPU variables that must be page aligned. | 
|---|
| 158 | */ | 
|---|
| 159 | #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name)			\ | 
|---|
| 160 | DECLARE_PER_CPU_SECTION(type, name, "..page_aligned")		\ | 
|---|
| 161 | __aligned(PAGE_SIZE) | 
|---|
| 162 |  | 
|---|
| 163 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)				\ | 
|---|
| 164 | DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")		\ | 
|---|
| 165 | __aligned(PAGE_SIZE) | 
|---|
| 166 |  | 
|---|
| 167 | /* | 
|---|
| 168 | * Declaration/definition used for per-CPU variables that must be read mostly. | 
|---|
| 169 | */ | 
|---|
| 170 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name)			\ | 
|---|
| 171 | DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") | 
|---|
| 172 |  | 
|---|
| 173 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name)				\ | 
|---|
| 174 | DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") | 
|---|
| 175 |  | 
|---|
| 176 | /* | 
|---|
| 177 | * Declaration/definition used for per-CPU variables that should be accessed | 
|---|
| 178 | * as decrypted when memory encryption is enabled in the guest. | 
|---|
| 179 | */ | 
|---|
| 180 | #ifdef CONFIG_AMD_MEM_ENCRYPT | 
|---|
| 181 | #define DECLARE_PER_CPU_DECRYPTED(type, name)				\ | 
|---|
| 182 | DECLARE_PER_CPU_SECTION(type, name, "..decrypted") | 
|---|
| 183 |  | 
|---|
| 184 | #define DEFINE_PER_CPU_DECRYPTED(type, name)				\ | 
|---|
| 185 | DEFINE_PER_CPU_SECTION(type, name, "..decrypted") | 
|---|
| 186 | #else | 
|---|
| 187 | #define DEFINE_PER_CPU_DECRYPTED(type, name)	DEFINE_PER_CPU(type, name) | 
|---|
| 188 | #endif | 
|---|
| 189 |  | 
|---|
| 190 | /* | 
|---|
| 191 | * Intermodule exports for per-CPU variables.  sparse forgets about | 
|---|
| 192 | * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to | 
|---|
| 193 | * noop if __CHECKER__. | 
|---|
| 194 | */ | 
|---|
| 195 | #ifndef __CHECKER__ | 
|---|
| 196 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var) | 
|---|
| 197 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var) | 
|---|
| 198 | #else | 
|---|
| 199 | #define EXPORT_PER_CPU_SYMBOL(var) | 
|---|
| 200 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) | 
|---|
| 201 | #endif | 
|---|
| 202 |  | 
|---|
| 203 | /* | 
|---|
| 204 | * Accessors and operations. | 
|---|
| 205 | */ | 
|---|
| 206 | #ifndef __ASSEMBLY__ | 
|---|
| 207 |  | 
|---|
| 208 | /* | 
|---|
| 209 | * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating | 
|---|
| 210 | * @ptr and is invoked once before a percpu area is accessed by all | 
|---|
| 211 | * accessors and operations.  This is performed in the generic part of | 
|---|
| 212 | * percpu and arch overrides don't need to worry about it; however, if an | 
|---|
| 213 | * arch wants to implement an arch-specific percpu accessor or operation, | 
|---|
| 214 | * it may use __verify_pcpu_ptr() to verify the parameters. | 
|---|
| 215 | * | 
|---|
| 216 | * + 0 is required in order to convert the pointer type from a | 
|---|
| 217 | * potential array type to a pointer to a single item of the array. | 
|---|
| 218 | */ | 
|---|
| 219 | #define __verify_pcpu_ptr(ptr)						\ | 
|---|
| 220 | do {									\ | 
|---|
| 221 | const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;	\ | 
|---|
| 222 | (void)__vpp_verify;						\ | 
|---|
| 223 | } while (0) | 
|---|
| 224 |  | 
|---|
| 225 | #define PERCPU_PTR(__p)							\ | 
|---|
| 226 | (TYPEOF_UNQUAL(*(__p)) __force __kernel *)((__force unsigned long)(__p)) | 
|---|
| 227 |  | 
|---|
| 228 | #ifdef CONFIG_SMP | 
|---|
| 229 |  | 
|---|
| 230 | /* | 
|---|
| 231 | * Add an offset to a pointer.  Use RELOC_HIDE() to prevent the compiler | 
|---|
| 232 | * from making incorrect assumptions about the pointer value. | 
|---|
| 233 | */ | 
|---|
| 234 | #define SHIFT_PERCPU_PTR(__p, __offset)					\ | 
|---|
| 235 | RELOC_HIDE(PERCPU_PTR(__p), (__offset)) | 
|---|
| 236 |  | 
|---|
| 237 | #define per_cpu_ptr(ptr, cpu)						\ | 
|---|
| 238 | ({									\ | 
|---|
| 239 | __verify_pcpu_ptr(ptr);						\ | 
|---|
| 240 | SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)));			\ | 
|---|
| 241 | }) | 
|---|
| 242 |  | 
|---|
| 243 | #define raw_cpu_ptr(ptr)						\ | 
|---|
| 244 | ({									\ | 
|---|
| 245 | __verify_pcpu_ptr(ptr);						\ | 
|---|
| 246 | arch_raw_cpu_ptr(ptr);						\ | 
|---|
| 247 | }) | 
|---|
| 248 |  | 
|---|
| 249 | #ifdef CONFIG_DEBUG_PREEMPT | 
|---|
| 250 | #define this_cpu_ptr(ptr)						\ | 
|---|
| 251 | ({									\ | 
|---|
| 252 | __verify_pcpu_ptr(ptr);						\ | 
|---|
| 253 | SHIFT_PERCPU_PTR(ptr, my_cpu_offset);				\ | 
|---|
| 254 | }) | 
|---|
| 255 | #else | 
|---|
| 256 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) | 
|---|
| 257 | #endif | 
|---|
| 258 |  | 
|---|
| 259 | #else	/* CONFIG_SMP */ | 
|---|
| 260 |  | 
|---|
| 261 | #define per_cpu_ptr(ptr, cpu)						\ | 
|---|
| 262 | ({									\ | 
|---|
| 263 | (void)(cpu);							\ | 
|---|
| 264 | __verify_pcpu_ptr(ptr);						\ | 
|---|
| 265 | PERCPU_PTR(ptr);						\ | 
|---|
| 266 | }) | 
|---|
| 267 |  | 
|---|
| 268 | #define raw_cpu_ptr(ptr)	per_cpu_ptr(ptr, 0) | 
|---|
| 269 | #define this_cpu_ptr(ptr)	raw_cpu_ptr(ptr) | 
|---|
| 270 |  | 
|---|
| 271 | #endif	/* CONFIG_SMP */ | 
|---|
| 272 |  | 
|---|
| 273 | #define per_cpu(var, cpu)	(*per_cpu_ptr(&(var), cpu)) | 
|---|
| 274 |  | 
|---|
| 275 | /* | 
|---|
| 276 | * Must be an lvalue. Since @var must be a simple identifier, | 
|---|
| 277 | * we force a syntax error here if it isn't. | 
|---|
| 278 | */ | 
|---|
| 279 | #define get_cpu_var(var)						\ | 
|---|
| 280 | (*({									\ | 
|---|
| 281 | preempt_disable();						\ | 
|---|
| 282 | this_cpu_ptr(&var);						\ | 
|---|
| 283 | })) | 
|---|
| 284 |  | 
|---|
| 285 | /* | 
|---|
| 286 | * The weird & is necessary because sparse considers (void)(var) to be | 
|---|
| 287 | * a direct dereference of percpu variable (var). | 
|---|
| 288 | */ | 
|---|
| 289 | #define put_cpu_var(var)						\ | 
|---|
| 290 | do {									\ | 
|---|
| 291 | (void)&(var);							\ | 
|---|
| 292 | preempt_enable();						\ | 
|---|
| 293 | } while (0) | 
|---|
| 294 |  | 
|---|
| 295 | #define get_cpu_ptr(var)						\ | 
|---|
| 296 | ({									\ | 
|---|
| 297 | preempt_disable();						\ | 
|---|
| 298 | this_cpu_ptr(var);						\ | 
|---|
| 299 | }) | 
|---|
| 300 |  | 
|---|
| 301 | #define put_cpu_ptr(var)						\ | 
|---|
| 302 | do {									\ | 
|---|
| 303 | (void)(var);							\ | 
|---|
| 304 | preempt_enable();						\ | 
|---|
| 305 | } while (0) | 
|---|
| 306 |  | 
|---|
| 307 | /* | 
|---|
| 308 | * Branching function to split up a function into a set of functions that | 
|---|
| 309 | * are called for different scalar sizes of the objects handled. | 
|---|
| 310 | */ | 
|---|
| 311 |  | 
|---|
| 312 | extern void __bad_size_call_parameter(void); | 
|---|
| 313 |  | 
|---|
| 314 | #ifdef CONFIG_DEBUG_PREEMPT | 
|---|
| 315 | extern void __this_cpu_preempt_check(const char *op); | 
|---|
| 316 | #else | 
|---|
| 317 | static __always_inline void __this_cpu_preempt_check(const char *op) { } | 
|---|
| 318 | #endif | 
|---|
| 319 |  | 
|---|
| 320 | #define __pcpu_size_call_return(stem, variable)				\ | 
|---|
| 321 | ({									\ | 
|---|
| 322 | TYPEOF_UNQUAL(variable) pscr_ret__;				\ | 
|---|
| 323 | __verify_pcpu_ptr(&(variable));					\ | 
|---|
| 324 | switch(sizeof(variable)) {					\ | 
|---|
| 325 | case 1: pscr_ret__ = stem##1(variable); break;			\ | 
|---|
| 326 | case 2: pscr_ret__ = stem##2(variable); break;			\ | 
|---|
| 327 | case 4: pscr_ret__ = stem##4(variable); break;			\ | 
|---|
| 328 | case 8: pscr_ret__ = stem##8(variable); break;			\ | 
|---|
| 329 | default:							\ | 
|---|
| 330 | __bad_size_call_parameter(); break;			\ | 
|---|
| 331 | }								\ | 
|---|
| 332 | pscr_ret__;							\ | 
|---|
| 333 | }) | 
|---|
| 334 |  | 
|---|
| 335 | #define __pcpu_size_call_return2(stem, variable, ...)			\ | 
|---|
| 336 | ({									\ | 
|---|
| 337 | TYPEOF_UNQUAL(variable) pscr2_ret__;				\ | 
|---|
| 338 | __verify_pcpu_ptr(&(variable));					\ | 
|---|
| 339 | switch(sizeof(variable)) {					\ | 
|---|
| 340 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\ | 
|---|
| 341 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\ | 
|---|
| 342 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\ | 
|---|
| 343 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\ | 
|---|
| 344 | default:							\ | 
|---|
| 345 | __bad_size_call_parameter(); break;			\ | 
|---|
| 346 | }								\ | 
|---|
| 347 | pscr2_ret__;							\ | 
|---|
| 348 | }) | 
|---|
| 349 |  | 
|---|
| 350 | #define __pcpu_size_call_return2bool(stem, variable, ...)		\ | 
|---|
| 351 | ({									\ | 
|---|
| 352 | bool pscr2_ret__;						\ | 
|---|
| 353 | __verify_pcpu_ptr(&(variable));					\ | 
|---|
| 354 | switch(sizeof(variable)) {					\ | 
|---|
| 355 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\ | 
|---|
| 356 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\ | 
|---|
| 357 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\ | 
|---|
| 358 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\ | 
|---|
| 359 | default:							\ | 
|---|
| 360 | __bad_size_call_parameter(); break;			\ | 
|---|
| 361 | }								\ | 
|---|
| 362 | pscr2_ret__;							\ | 
|---|
| 363 | }) | 
|---|
| 364 |  | 
|---|
| 365 | #define __pcpu_size_call(stem, variable, ...)				\ | 
|---|
| 366 | do {									\ | 
|---|
| 367 | __verify_pcpu_ptr(&(variable));					\ | 
|---|
| 368 | switch(sizeof(variable)) {					\ | 
|---|
| 369 | case 1: stem##1(variable, __VA_ARGS__);break;		\ | 
|---|
| 370 | case 2: stem##2(variable, __VA_ARGS__);break;		\ | 
|---|
| 371 | case 4: stem##4(variable, __VA_ARGS__);break;		\ | 
|---|
| 372 | case 8: stem##8(variable, __VA_ARGS__);break;		\ | 
|---|
| 373 | default: 						\ | 
|---|
| 374 | __bad_size_call_parameter();break;		\ | 
|---|
| 375 | }								\ | 
|---|
| 376 | } while (0) | 
|---|
| 377 |  | 
|---|
| 378 | /* | 
|---|
| 379 | * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@gentwo.org> | 
|---|
| 380 | * | 
|---|
| 381 | * Optimized manipulation for memory allocated through the per cpu | 
|---|
| 382 | * allocator or for addresses of per cpu variables. | 
|---|
| 383 | * | 
|---|
| 384 | * These operation guarantee exclusivity of access for other operations | 
|---|
| 385 | * on the *same* processor. The assumption is that per cpu data is only | 
|---|
| 386 | * accessed by a single processor instance (the current one). | 
|---|
| 387 | * | 
|---|
| 388 | * The arch code can provide optimized implementation by defining macros | 
|---|
| 389 | * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per | 
|---|
| 390 | * cpu atomic operations for 2 byte sized RMW actions. If arch code does | 
|---|
| 391 | * not provide operations for a scalar size then the fallback in the | 
|---|
| 392 | * generic code will be used. | 
|---|
| 393 | * | 
|---|
| 394 | * cmpxchg_double replaces two adjacent scalars at once.  The first two | 
|---|
| 395 | * parameters are per cpu variables which have to be of the same size.  A | 
|---|
| 396 | * truth value is returned to indicate success or failure (since a double | 
|---|
| 397 | * register result is difficult to handle).  There is very limited hardware | 
|---|
| 398 | * support for these operations, so only certain sizes may work. | 
|---|
| 399 | */ | 
|---|
| 400 |  | 
|---|
| 401 | /* | 
|---|
| 402 | * Operations for contexts where we do not want to do any checks for | 
|---|
| 403 | * preemptions.  Unless strictly necessary, always use [__]this_cpu_*() | 
|---|
| 404 | * instead. | 
|---|
| 405 | * | 
|---|
| 406 | * If there is no other protection through preempt disable and/or disabling | 
|---|
| 407 | * interrupts then one of these RMW operations can show unexpected behavior | 
|---|
| 408 | * because the execution thread was rescheduled on another processor or an | 
|---|
| 409 | * interrupt occurred and the same percpu variable was modified from the | 
|---|
| 410 | * interrupt context. | 
|---|
| 411 | */ | 
|---|
| 412 | #define raw_cpu_read(pcp)		__pcpu_size_call_return(raw_cpu_read_, pcp) | 
|---|
| 413 | #define raw_cpu_write(pcp, val)		__pcpu_size_call(raw_cpu_write_, pcp, val) | 
|---|
| 414 | #define raw_cpu_add(pcp, val)		__pcpu_size_call(raw_cpu_add_, pcp, val) | 
|---|
| 415 | #define raw_cpu_and(pcp, val)		__pcpu_size_call(raw_cpu_and_, pcp, val) | 
|---|
| 416 | #define raw_cpu_or(pcp, val)		__pcpu_size_call(raw_cpu_or_, pcp, val) | 
|---|
| 417 | #define raw_cpu_add_return(pcp, val)	__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) | 
|---|
| 418 | #define raw_cpu_xchg(pcp, nval)		__pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) | 
|---|
| 419 | #define raw_cpu_cmpxchg(pcp, oval, nval) \ | 
|---|
| 420 | __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) | 
|---|
| 421 | #define raw_cpu_try_cmpxchg(pcp, ovalp, nval) \ | 
|---|
| 422 | __pcpu_size_call_return2bool(raw_cpu_try_cmpxchg_, pcp, ovalp, nval) | 
|---|
| 423 | #define raw_cpu_sub(pcp, val)		raw_cpu_add(pcp, -(val)) | 
|---|
| 424 | #define raw_cpu_inc(pcp)		raw_cpu_add(pcp, 1) | 
|---|
| 425 | #define raw_cpu_dec(pcp)		raw_cpu_sub(pcp, 1) | 
|---|
| 426 | #define raw_cpu_sub_return(pcp, val)	raw_cpu_add_return(pcp, -(typeof(pcp))(val)) | 
|---|
| 427 | #define raw_cpu_inc_return(pcp)		raw_cpu_add_return(pcp, 1) | 
|---|
| 428 | #define raw_cpu_dec_return(pcp)		raw_cpu_add_return(pcp, -1) | 
|---|
| 429 |  | 
|---|
| 430 | /* | 
|---|
| 431 | * Operations for contexts that are safe from preemption/interrupts.  These | 
|---|
| 432 | * operations verify that preemption is disabled. | 
|---|
| 433 | */ | 
|---|
| 434 | #define __this_cpu_read(pcp)						\ | 
|---|
| 435 | ({									\ | 
|---|
| 436 | __this_cpu_preempt_check("read");				\ | 
|---|
| 437 | raw_cpu_read(pcp);						\ | 
|---|
| 438 | }) | 
|---|
| 439 |  | 
|---|
| 440 | #define __this_cpu_write(pcp, val)					\ | 
|---|
| 441 | ({									\ | 
|---|
| 442 | __this_cpu_preempt_check("write");				\ | 
|---|
| 443 | raw_cpu_write(pcp, val);					\ | 
|---|
| 444 | }) | 
|---|
| 445 |  | 
|---|
| 446 | #define __this_cpu_add(pcp, val)					\ | 
|---|
| 447 | ({									\ | 
|---|
| 448 | __this_cpu_preempt_check("add");				\ | 
|---|
| 449 | raw_cpu_add(pcp, val);						\ | 
|---|
| 450 | }) | 
|---|
| 451 |  | 
|---|
| 452 | #define __this_cpu_and(pcp, val)					\ | 
|---|
| 453 | ({									\ | 
|---|
| 454 | __this_cpu_preempt_check("and");				\ | 
|---|
| 455 | raw_cpu_and(pcp, val);						\ | 
|---|
| 456 | }) | 
|---|
| 457 |  | 
|---|
| 458 | #define __this_cpu_or(pcp, val)						\ | 
|---|
| 459 | ({									\ | 
|---|
| 460 | __this_cpu_preempt_check("or");					\ | 
|---|
| 461 | raw_cpu_or(pcp, val);						\ | 
|---|
| 462 | }) | 
|---|
| 463 |  | 
|---|
| 464 | #define __this_cpu_add_return(pcp, val)					\ | 
|---|
| 465 | ({									\ | 
|---|
| 466 | __this_cpu_preempt_check("add_return");				\ | 
|---|
| 467 | raw_cpu_add_return(pcp, val);					\ | 
|---|
| 468 | }) | 
|---|
| 469 |  | 
|---|
| 470 | #define __this_cpu_xchg(pcp, nval)					\ | 
|---|
| 471 | ({									\ | 
|---|
| 472 | __this_cpu_preempt_check("xchg");				\ | 
|---|
| 473 | raw_cpu_xchg(pcp, nval);					\ | 
|---|
| 474 | }) | 
|---|
| 475 |  | 
|---|
| 476 | #define __this_cpu_cmpxchg(pcp, oval, nval)				\ | 
|---|
| 477 | ({									\ | 
|---|
| 478 | __this_cpu_preempt_check("cmpxchg");				\ | 
|---|
| 479 | raw_cpu_cmpxchg(pcp, oval, nval);				\ | 
|---|
| 480 | }) | 
|---|
| 481 |  | 
|---|
| 482 | #define __this_cpu_try_cmpxchg(pcp, ovalp, nval)			\ | 
|---|
| 483 | ({									\ | 
|---|
| 484 | __this_cpu_preempt_check("try_cmpxchg");			\ | 
|---|
| 485 | raw_cpu_try_cmpxchg(pcp, ovalp, nval);				\ | 
|---|
| 486 | }) | 
|---|
| 487 |  | 
|---|
| 488 | #define __this_cpu_sub(pcp, val)	__this_cpu_add(pcp, -(typeof(pcp))(val)) | 
|---|
| 489 | #define __this_cpu_inc(pcp)		__this_cpu_add(pcp, 1) | 
|---|
| 490 | #define __this_cpu_dec(pcp)		__this_cpu_sub(pcp, 1) | 
|---|
| 491 | #define __this_cpu_sub_return(pcp, val)	__this_cpu_add_return(pcp, -(typeof(pcp))(val)) | 
|---|
| 492 | #define __this_cpu_inc_return(pcp)	__this_cpu_add_return(pcp, 1) | 
|---|
| 493 | #define __this_cpu_dec_return(pcp)	__this_cpu_add_return(pcp, -1) | 
|---|
| 494 |  | 
|---|
| 495 | /* | 
|---|
| 496 | * Operations with implied preemption/interrupt protection.  These | 
|---|
| 497 | * operations can be used without worrying about preemption or interrupt. | 
|---|
| 498 | */ | 
|---|
| 499 | #define this_cpu_read(pcp)		__pcpu_size_call_return(this_cpu_read_, pcp) | 
|---|
| 500 | #define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, pcp, val) | 
|---|
| 501 | #define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, pcp, val) | 
|---|
| 502 | #define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, pcp, val) | 
|---|
| 503 | #define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, pcp, val) | 
|---|
| 504 | #define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val) | 
|---|
| 505 | #define this_cpu_xchg(pcp, nval)	__pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) | 
|---|
| 506 | #define this_cpu_cmpxchg(pcp, oval, nval) \ | 
|---|
| 507 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) | 
|---|
| 508 | #define this_cpu_try_cmpxchg(pcp, ovalp, nval) \ | 
|---|
| 509 | __pcpu_size_call_return2bool(this_cpu_try_cmpxchg_, pcp, ovalp, nval) | 
|---|
| 510 | #define this_cpu_sub(pcp, val)		this_cpu_add(pcp, -(typeof(pcp))(val)) | 
|---|
| 511 | #define this_cpu_inc(pcp)		this_cpu_add(pcp, 1) | 
|---|
| 512 | #define this_cpu_dec(pcp)		this_cpu_sub(pcp, 1) | 
|---|
| 513 | #define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(typeof(pcp))(val)) | 
|---|
| 514 | #define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1) | 
|---|
| 515 | #define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1) | 
|---|
| 516 |  | 
|---|
| 517 | #endif /* __ASSEMBLY__ */ | 
|---|
| 518 | #endif /* _LINUX_PERCPU_DEFS_H */ | 
|---|
| 519 |  | 
|---|