| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef __LINUX_SMP_H | 
|---|
| 3 | #define __LINUX_SMP_H | 
|---|
| 4 |  | 
|---|
| 5 | /* | 
|---|
| 6 | *	Generic SMP support | 
|---|
| 7 | *		Alan Cox. <alan@redhat.com> | 
|---|
| 8 | */ | 
|---|
| 9 |  | 
|---|
| 10 | #include <linux/errno.h> | 
|---|
| 11 | #include <linux/types.h> | 
|---|
| 12 | #include <linux/list.h> | 
|---|
| 13 | #include <linux/cpumask.h> | 
|---|
| 14 | #include <linux/init.h> | 
|---|
| 15 | #include <linux/smp_types.h> | 
|---|
| 16 |  | 
|---|
| 17 | typedef void (*smp_call_func_t)(void *info); | 
|---|
| 18 | typedef bool (*smp_cond_func_t)(int cpu, void *info); | 
|---|
| 19 |  | 
|---|
| 20 | /* | 
|---|
| 21 | * structure shares (partial) layout with struct irq_work | 
|---|
| 22 | */ | 
|---|
| 23 | struct __call_single_data { | 
|---|
| 24 | struct __call_single_node node; | 
|---|
| 25 | smp_call_func_t func; | 
|---|
| 26 | void *info; | 
|---|
| 27 | }; | 
|---|
| 28 |  | 
|---|
| 29 | #define CSD_INIT(_func, _info) \ | 
|---|
| 30 | (struct __call_single_data){ .func = (_func), .info = (_info), } | 
|---|
| 31 |  | 
|---|
| 32 | /* Use __aligned() to avoid to use 2 cache lines for 1 csd */ | 
|---|
| 33 | typedef struct __call_single_data call_single_data_t | 
|---|
| 34 | __aligned(sizeof(struct __call_single_data)); | 
|---|
| 35 |  | 
|---|
| 36 | #define INIT_CSD(_csd, _func, _info)		\ | 
|---|
| 37 | do {						\ | 
|---|
| 38 | *(_csd) = CSD_INIT((_func), (_info));	\ | 
|---|
| 39 | } while (0) | 
|---|
| 40 |  | 
|---|
| 41 | /* | 
|---|
| 42 | * Enqueue a llist_node on the call_single_queue; be very careful, read | 
|---|
| 43 | * flush_smp_call_function_queue() in detail. | 
|---|
| 44 | */ | 
|---|
| 45 | extern void __smp_call_single_queue(int cpu, struct llist_node *node); | 
|---|
| 46 |  | 
|---|
| 47 | /* total number of cpus in this system (may exceed NR_CPUS) */ | 
|---|
| 48 | extern unsigned int total_cpus; | 
|---|
| 49 |  | 
|---|
| 50 | int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, | 
|---|
| 51 | int wait); | 
|---|
| 52 |  | 
|---|
| 53 | void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, | 
|---|
| 54 | void *info, bool wait, const struct cpumask *mask); | 
|---|
| 55 |  | 
|---|
| 56 | int smp_call_function_single_async(int cpu, call_single_data_t *csd); | 
|---|
| 57 |  | 
|---|
| 58 | /* | 
|---|
| 59 | * Cpus stopping functions in panic. All have default weak definitions. | 
|---|
| 60 | * Architecture-dependent code may override them. | 
|---|
| 61 | */ | 
|---|
| 62 | void __noreturn panic_smp_self_stop(void); | 
|---|
| 63 | void __noreturn nmi_panic_self_stop(struct pt_regs *regs); | 
|---|
| 64 | void crash_smp_send_stop(void); | 
|---|
| 65 |  | 
|---|
| 66 | /* | 
|---|
| 67 | * Call a function on all processors | 
|---|
| 68 | */ | 
|---|
| 69 | static inline void on_each_cpu(smp_call_func_t func, void *info, int wait) | 
|---|
| 70 | { | 
|---|
| 71 | on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask); | 
|---|
| 72 | } | 
|---|
| 73 |  | 
|---|
| 74 | /** | 
|---|
| 75 | * on_each_cpu_mask(): Run a function on processors specified by | 
|---|
| 76 | * cpumask, which may include the local processor. | 
|---|
| 77 | * @mask: The set of cpus to run on (only runs on online subset). | 
|---|
| 78 | * @func: The function to run. This must be fast and non-blocking. | 
|---|
| 79 | * @info: An arbitrary pointer to pass to the function. | 
|---|
| 80 | * @wait: If true, wait (atomically) until function has completed | 
|---|
| 81 | *        on other CPUs. | 
|---|
| 82 | * | 
|---|
| 83 | * If @wait is true, then returns once @func has returned. | 
|---|
| 84 | * | 
|---|
| 85 | * You must not call this function with disabled interrupts or from a | 
|---|
| 86 | * hardware interrupt handler or from a bottom half handler.  The | 
|---|
| 87 | * exception is that it may be used during early boot while | 
|---|
| 88 | * early_boot_irqs_disabled is set. | 
|---|
| 89 | */ | 
|---|
| 90 | static inline void on_each_cpu_mask(const struct cpumask *mask, | 
|---|
| 91 | smp_call_func_t func, void *info, bool wait) | 
|---|
| 92 | { | 
|---|
| 93 | on_each_cpu_cond_mask(NULL, func, info, wait, mask); | 
|---|
| 94 | } | 
|---|
| 95 |  | 
|---|
| 96 | /* | 
|---|
| 97 | * Call a function on each processor for which the supplied function | 
|---|
| 98 | * cond_func returns a positive value. This may include the local | 
|---|
| 99 | * processor.  May be used during early boot while early_boot_irqs_disabled is | 
|---|
| 100 | * set. Use local_irq_save/restore() instead of local_irq_disable/enable(). | 
|---|
| 101 | */ | 
|---|
| 102 | static inline void on_each_cpu_cond(smp_cond_func_t cond_func, | 
|---|
| 103 | smp_call_func_t func, void *info, bool wait) | 
|---|
| 104 | { | 
|---|
| 105 | on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); | 
|---|
| 106 | } | 
|---|
| 107 |  | 
|---|
| 108 | /* | 
|---|
| 109 | * Architecture specific boot CPU setup.  Defined as empty weak function in | 
|---|
| 110 | * init/main.c. Architectures can override it. | 
|---|
| 111 | */ | 
|---|
| 112 | void __init smp_prepare_boot_cpu(void); | 
|---|
| 113 |  | 
|---|
| 114 | #ifdef CONFIG_SMP | 
|---|
| 115 |  | 
|---|
| 116 | #include <linux/preempt.h> | 
|---|
| 117 | #include <linux/compiler.h> | 
|---|
| 118 | #include <linux/thread_info.h> | 
|---|
| 119 | #include <asm/smp.h> | 
|---|
| 120 |  | 
|---|
| 121 | /* | 
|---|
| 122 | * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. | 
|---|
| 123 | * (defined in asm header): | 
|---|
| 124 | */ | 
|---|
| 125 |  | 
|---|
| 126 | /* | 
|---|
| 127 | * stops all CPUs but the current one: | 
|---|
| 128 | */ | 
|---|
| 129 | extern void smp_send_stop(void); | 
|---|
| 130 |  | 
|---|
| 131 | /* | 
|---|
| 132 | * sends a 'reschedule' event to another CPU: | 
|---|
| 133 | */ | 
|---|
| 134 | extern void arch_smp_send_reschedule(int cpu); | 
|---|
| 135 | /* | 
|---|
| 136 | * scheduler_ipi() is inline so can't be passed as callback reason, but the | 
|---|
| 137 | * callsite IP should be sufficient for root-causing IPIs sent from here. | 
|---|
| 138 | */ | 
|---|
| 139 | #define smp_send_reschedule(cpu) ({		  \ | 
|---|
| 140 | trace_ipi_send_cpu(cpu, _RET_IP_, NULL);  \ | 
|---|
| 141 | arch_smp_send_reschedule(cpu);		  \ | 
|---|
| 142 | }) | 
|---|
| 143 |  | 
|---|
| 144 | /* | 
|---|
| 145 | * Prepare machine for booting other CPUs. | 
|---|
| 146 | */ | 
|---|
| 147 | extern void smp_prepare_cpus(unsigned int max_cpus); | 
|---|
| 148 |  | 
|---|
| 149 | /* | 
|---|
| 150 | * Bring a CPU up | 
|---|
| 151 | */ | 
|---|
| 152 | extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); | 
|---|
| 153 |  | 
|---|
| 154 | /* | 
|---|
| 155 | * Final polishing of CPUs | 
|---|
| 156 | */ | 
|---|
| 157 | extern void smp_cpus_done(unsigned int max_cpus); | 
|---|
| 158 |  | 
|---|
| 159 | /* | 
|---|
| 160 | * Call a function on all other processors | 
|---|
| 161 | */ | 
|---|
| 162 | void smp_call_function(smp_call_func_t func, void *info, int wait); | 
|---|
| 163 | void smp_call_function_many(const struct cpumask *mask, | 
|---|
| 164 | smp_call_func_t func, void *info, bool wait); | 
|---|
| 165 |  | 
|---|
| 166 | int smp_call_function_any(const struct cpumask *mask, | 
|---|
| 167 | smp_call_func_t func, void *info, int wait); | 
|---|
| 168 |  | 
|---|
| 169 | void kick_all_cpus_sync(void); | 
|---|
| 170 | void wake_up_all_idle_cpus(void); | 
|---|
| 171 |  | 
|---|
| 172 | /* | 
|---|
| 173 | * Generic and arch helpers | 
|---|
| 174 | */ | 
|---|
| 175 | void __init call_function_init(void); | 
|---|
| 176 | void generic_smp_call_function_single_interrupt(void); | 
|---|
| 177 | #define generic_smp_call_function_interrupt \ | 
|---|
| 178 | generic_smp_call_function_single_interrupt | 
|---|
| 179 |  | 
|---|
| 180 | extern unsigned int setup_max_cpus; | 
|---|
| 181 | extern void __init setup_nr_cpu_ids(void); | 
|---|
| 182 | extern void __init smp_init(void); | 
|---|
| 183 |  | 
|---|
| 184 | extern int __boot_cpu_id; | 
|---|
| 185 |  | 
|---|
| 186 | static inline int get_boot_cpu_id(void) | 
|---|
| 187 | { | 
|---|
| 188 | return __boot_cpu_id; | 
|---|
| 189 | } | 
|---|
| 190 |  | 
|---|
| 191 | #else /* !SMP */ | 
|---|
| 192 |  | 
|---|
| 193 | static inline void smp_send_stop(void) { } | 
|---|
| 194 |  | 
|---|
| 195 | /* | 
|---|
| 196 | *	These macros fold the SMP functionality into a single CPU system | 
|---|
| 197 | */ | 
|---|
| 198 | #define raw_smp_processor_id()			0 | 
|---|
| 199 | static inline void up_smp_call_function(smp_call_func_t func, void *info) | 
|---|
| 200 | { | 
|---|
| 201 | } | 
|---|
| 202 | #define smp_call_function(func, info, wait) \ | 
|---|
| 203 | (up_smp_call_function(func, info)) | 
|---|
| 204 |  | 
|---|
| 205 | static inline void smp_send_reschedule(int cpu) { } | 
|---|
| 206 | #define smp_call_function_many(mask, func, info, wait) \ | 
|---|
| 207 | (up_smp_call_function(func, info)) | 
|---|
| 208 | static inline void call_function_init(void) { } | 
|---|
| 209 |  | 
|---|
| 210 | static inline int | 
|---|
| 211 | smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, | 
|---|
| 212 | void *info, int wait) | 
|---|
| 213 | { | 
|---|
| 214 | return smp_call_function_single(0, func, info, wait); | 
|---|
| 215 | } | 
|---|
| 216 |  | 
|---|
| 217 | static inline void kick_all_cpus_sync(void) {  } | 
|---|
| 218 | static inline void wake_up_all_idle_cpus(void) {  } | 
|---|
| 219 |  | 
|---|
| 220 | #define setup_max_cpus 0 | 
|---|
| 221 |  | 
|---|
| 222 | #ifdef CONFIG_UP_LATE_INIT | 
|---|
| 223 | extern void __init up_late_init(void); | 
|---|
| 224 | static __always_inline void smp_init(void) { up_late_init(); } | 
|---|
| 225 | #else | 
|---|
| 226 | static inline void smp_init(void) { } | 
|---|
| 227 | #endif | 
|---|
| 228 |  | 
|---|
| 229 | static inline int get_boot_cpu_id(void) | 
|---|
| 230 | { | 
|---|
| 231 | return 0; | 
|---|
| 232 | } | 
|---|
| 233 |  | 
|---|
| 234 | #endif /* !SMP */ | 
|---|
| 235 |  | 
|---|
| 236 | /** | 
|---|
| 237 | * raw_smp_processor_id() - get the current (unstable) CPU id | 
|---|
| 238 | * | 
|---|
| 239 | * For then you know what you are doing and need an unstable | 
|---|
| 240 | * CPU id. | 
|---|
| 241 | */ | 
|---|
| 242 |  | 
|---|
| 243 | /** | 
|---|
| 244 | * smp_processor_id() - get the current (stable) CPU id | 
|---|
| 245 | * | 
|---|
| 246 | * This is the normal accessor to the CPU id and should be used | 
|---|
| 247 | * whenever possible. | 
|---|
| 248 | * | 
|---|
| 249 | * The CPU id is stable when: | 
|---|
| 250 | * | 
|---|
| 251 | *  - IRQs are disabled; | 
|---|
| 252 | *  - preemption is disabled; | 
|---|
| 253 | *  - the task is CPU affine. | 
|---|
| 254 | * | 
|---|
| 255 | * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN | 
|---|
| 256 | * when smp_processor_id() is used when the CPU id is not stable. | 
|---|
| 257 | */ | 
|---|
| 258 |  | 
|---|
| 259 | /* | 
|---|
| 260 | * Allow the architecture to differentiate between a stable and unstable read. | 
|---|
| 261 | * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a | 
|---|
| 262 | * regular asm read for the stable. | 
|---|
| 263 | */ | 
|---|
| 264 | #ifndef __smp_processor_id | 
|---|
| 265 | #define __smp_processor_id() raw_smp_processor_id() | 
|---|
| 266 | #endif | 
|---|
| 267 |  | 
|---|
| 268 | #ifdef CONFIG_DEBUG_PREEMPT | 
|---|
| 269 | extern unsigned int debug_smp_processor_id(void); | 
|---|
| 270 | # define smp_processor_id() debug_smp_processor_id() | 
|---|
| 271 | #else | 
|---|
| 272 | # define smp_processor_id() __smp_processor_id() | 
|---|
| 273 | #endif | 
|---|
| 274 |  | 
|---|
| 275 | #define get_cpu()		({ preempt_disable(); __smp_processor_id(); }) | 
|---|
| 276 | #define put_cpu()		preempt_enable() | 
|---|
| 277 |  | 
|---|
| 278 | /* | 
|---|
| 279 | * Callback to arch code if there's nosmp or maxcpus=0 on the | 
|---|
| 280 | * boot command line: | 
|---|
| 281 | */ | 
|---|
| 282 | extern void arch_disable_smp_support(void); | 
|---|
| 283 |  | 
|---|
| 284 | extern void arch_thaw_secondary_cpus_begin(void); | 
|---|
| 285 | extern void arch_thaw_secondary_cpus_end(void); | 
|---|
| 286 |  | 
|---|
| 287 | void smp_setup_processor_id(void); | 
|---|
| 288 |  | 
|---|
| 289 | int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, | 
|---|
| 290 | bool phys); | 
|---|
| 291 |  | 
|---|
| 292 | /* SMP core functions */ | 
|---|
| 293 | int smpcfd_prepare_cpu(unsigned int cpu); | 
|---|
| 294 | int smpcfd_dead_cpu(unsigned int cpu); | 
|---|
| 295 | int smpcfd_dying_cpu(unsigned int cpu); | 
|---|
| 296 |  | 
|---|
| 297 | #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG | 
|---|
| 298 | bool csd_lock_is_stuck(void); | 
|---|
| 299 | #else | 
|---|
| 300 | static inline bool csd_lock_is_stuck(void) { return false; } | 
|---|
| 301 | #endif | 
|---|
| 302 |  | 
|---|
| 303 | #endif /* __LINUX_SMP_H */ | 
|---|
| 304 |  | 
|---|