| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | /* | 
|---|
| 3 | * workqueue.h --- work queue handling for Linux. | 
|---|
| 4 | */ | 
|---|
| 5 |  | 
|---|
| 6 | #ifndef _LINUX_WORKQUEUE_H | 
|---|
| 7 | #define _LINUX_WORKQUEUE_H | 
|---|
| 8 |  | 
|---|
| 9 | #include <linux/alloc_tag.h> | 
|---|
| 10 | #include <linux/timer.h> | 
|---|
| 11 | #include <linux/linkage.h> | 
|---|
| 12 | #include <linux/bitops.h> | 
|---|
| 13 | #include <linux/lockdep.h> | 
|---|
| 14 | #include <linux/threads.h> | 
|---|
| 15 | #include <linux/atomic.h> | 
|---|
| 16 | #include <linux/cpumask_types.h> | 
|---|
| 17 | #include <linux/rcupdate.h> | 
|---|
| 18 | #include <linux/workqueue_types.h> | 
|---|
| 19 |  | 
|---|
| 20 | /* | 
|---|
| 21 | * The first word is the work queue pointer and the flags rolled into | 
|---|
| 22 | * one | 
|---|
| 23 | */ | 
|---|
| 24 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) | 
|---|
| 25 |  | 
|---|
| 26 | enum work_bits { | 
|---|
| 27 | WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */ | 
|---|
| 28 | WORK_STRUCT_INACTIVE_BIT,	/* work item is inactive */ | 
|---|
| 29 | WORK_STRUCT_PWQ_BIT,		/* data points to pwq */ | 
|---|
| 30 | WORK_STRUCT_LINKED_BIT,		/* next work is linked to this one */ | 
|---|
| 31 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 
|---|
| 32 | WORK_STRUCT_STATIC_BIT,		/* static initializer (debugobjects) */ | 
|---|
| 33 | #endif | 
|---|
| 34 | WORK_STRUCT_FLAG_BITS, | 
|---|
| 35 |  | 
|---|
| 36 | /* color for workqueue flushing */ | 
|---|
| 37 | WORK_STRUCT_COLOR_SHIFT	= WORK_STRUCT_FLAG_BITS, | 
|---|
| 38 | WORK_STRUCT_COLOR_BITS	= 4, | 
|---|
| 39 |  | 
|---|
| 40 | /* | 
|---|
| 41 | * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/ | 
|---|
| 42 | * debugobjects turned off. This makes pwqs aligned to 256 bytes (512 | 
|---|
| 43 | * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors. | 
|---|
| 44 | * | 
|---|
| 45 | * MSB | 
|---|
| 46 | * [ pwq pointer ] [ flush color ] [ STRUCT flags ] | 
|---|
| 47 | *                     4 bits        4 or 5 bits | 
|---|
| 48 | */ | 
|---|
| 49 | WORK_STRUCT_PWQ_SHIFT	= WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS, | 
|---|
| 50 |  | 
|---|
| 51 | /* | 
|---|
| 52 | * data contains off-queue information when !WORK_STRUCT_PWQ. | 
|---|
| 53 | * | 
|---|
| 54 | * MSB | 
|---|
| 55 | * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ] | 
|---|
| 56 | *                  16 bits          1 bit        4 or 5 bits | 
|---|
| 57 | */ | 
|---|
| 58 | WORK_OFFQ_FLAG_SHIFT	= WORK_STRUCT_FLAG_BITS, | 
|---|
| 59 | WORK_OFFQ_BH_BIT	= WORK_OFFQ_FLAG_SHIFT, | 
|---|
| 60 | WORK_OFFQ_FLAG_END, | 
|---|
| 61 | WORK_OFFQ_FLAG_BITS	= WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT, | 
|---|
| 62 |  | 
|---|
| 63 | WORK_OFFQ_DISABLE_SHIFT	= WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS, | 
|---|
| 64 | WORK_OFFQ_DISABLE_BITS	= 16, | 
|---|
| 65 |  | 
|---|
| 66 | /* | 
|---|
| 67 | * When a work item is off queue, the high bits encode off-queue flags | 
|---|
| 68 | * and the last pool it was on. Cap pool ID to 31 bits and use the | 
|---|
| 69 | * highest number to indicate that no pool is associated. | 
|---|
| 70 | */ | 
|---|
| 71 | WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS, | 
|---|
| 72 | WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, | 
|---|
| 73 | WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, | 
|---|
| 74 | }; | 
|---|
| 75 |  | 
|---|
| 76 | enum work_flags { | 
|---|
| 77 | WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT, | 
|---|
| 78 | WORK_STRUCT_INACTIVE	= 1 << WORK_STRUCT_INACTIVE_BIT, | 
|---|
| 79 | WORK_STRUCT_PWQ		= 1 << WORK_STRUCT_PWQ_BIT, | 
|---|
| 80 | WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT, | 
|---|
| 81 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 
|---|
| 82 | WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT, | 
|---|
| 83 | #else | 
|---|
| 84 | WORK_STRUCT_STATIC	= 0, | 
|---|
| 85 | #endif | 
|---|
| 86 | }; | 
|---|
| 87 |  | 
|---|
| 88 | enum wq_misc_consts { | 
|---|
| 89 | WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS), | 
|---|
| 90 |  | 
|---|
| 91 | /* not bound to any CPU, prefer the local CPU */ | 
|---|
| 92 | WORK_CPU_UNBOUND	= NR_CPUS, | 
|---|
| 93 |  | 
|---|
| 94 | /* bit mask for work_busy() return values */ | 
|---|
| 95 | WORK_BUSY_PENDING	= 1 << 0, | 
|---|
| 96 | WORK_BUSY_RUNNING	= 1 << 1, | 
|---|
| 97 |  | 
|---|
| 98 | /* maximum string length for set_worker_desc() */ | 
|---|
| 99 | WORKER_DESC_LEN		= 32, | 
|---|
| 100 | }; | 
|---|
| 101 |  | 
|---|
| 102 | /* Convenience constants - of type 'unsigned long', not 'enum'! */ | 
|---|
| 103 | #define WORK_OFFQ_BH		(1ul << WORK_OFFQ_BH_BIT) | 
|---|
| 104 | #define WORK_OFFQ_FLAG_MASK	(((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT) | 
|---|
| 105 | #define WORK_OFFQ_DISABLE_MASK	(((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT) | 
|---|
| 106 | #define WORK_OFFQ_POOL_NONE	((1ul << WORK_OFFQ_POOL_BITS) - 1) | 
|---|
| 107 | #define WORK_STRUCT_NO_POOL	(WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) | 
|---|
| 108 | #define WORK_STRUCT_PWQ_MASK	(~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1)) | 
|---|
| 109 |  | 
|---|
| 110 | #define WORK_DATA_INIT()	ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) | 
|---|
| 111 | #define WORK_DATA_STATIC_INIT()	\ | 
|---|
| 112 | ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) | 
|---|
| 113 |  | 
|---|
| 114 | struct delayed_work { | 
|---|
| 115 | struct work_struct work; | 
|---|
| 116 | struct timer_list timer; | 
|---|
| 117 |  | 
|---|
| 118 | /* target workqueue and CPU ->timer uses to queue ->work */ | 
|---|
| 119 | struct workqueue_struct *wq; | 
|---|
| 120 | int cpu; | 
|---|
| 121 | }; | 
|---|
| 122 |  | 
|---|
| 123 | struct rcu_work { | 
|---|
| 124 | struct work_struct work; | 
|---|
| 125 | struct rcu_head rcu; | 
|---|
| 126 |  | 
|---|
| 127 | /* target workqueue ->rcu uses to queue ->work */ | 
|---|
| 128 | struct workqueue_struct *wq; | 
|---|
| 129 | }; | 
|---|
| 130 |  | 
|---|
| 131 | enum wq_affn_scope { | 
|---|
| 132 | WQ_AFFN_DFL,			/* use system default */ | 
|---|
| 133 | WQ_AFFN_CPU,			/* one pod per CPU */ | 
|---|
| 134 | WQ_AFFN_SMT,			/* one pod poer SMT */ | 
|---|
| 135 | WQ_AFFN_CACHE,			/* one pod per LLC */ | 
|---|
| 136 | WQ_AFFN_NUMA,			/* one pod per NUMA node */ | 
|---|
| 137 | WQ_AFFN_SYSTEM,			/* one pod across the whole system */ | 
|---|
| 138 |  | 
|---|
| 139 | WQ_AFFN_NR_TYPES, | 
|---|
| 140 | }; | 
|---|
| 141 |  | 
|---|
| 142 | /** | 
|---|
| 143 | * struct workqueue_attrs - A struct for workqueue attributes. | 
|---|
| 144 | * | 
|---|
| 145 | * This can be used to change attributes of an unbound workqueue. | 
|---|
| 146 | */ | 
|---|
| 147 | struct workqueue_attrs { | 
|---|
| 148 | /** | 
|---|
| 149 | * @nice: nice level | 
|---|
| 150 | */ | 
|---|
| 151 | int nice; | 
|---|
| 152 |  | 
|---|
| 153 | /** | 
|---|
| 154 | * @cpumask: allowed CPUs | 
|---|
| 155 | * | 
|---|
| 156 | * Work items in this workqueue are affine to these CPUs and not allowed | 
|---|
| 157 | * to execute on other CPUs. A pool serving a workqueue must have the | 
|---|
| 158 | * same @cpumask. | 
|---|
| 159 | */ | 
|---|
| 160 | cpumask_var_t cpumask; | 
|---|
| 161 |  | 
|---|
| 162 | /** | 
|---|
| 163 | * @__pod_cpumask: internal attribute used to create per-pod pools | 
|---|
| 164 | * | 
|---|
| 165 | * Internal use only. | 
|---|
| 166 | * | 
|---|
| 167 | * Per-pod unbound worker pools are used to improve locality. Always a | 
|---|
| 168 | * subset of ->cpumask. A workqueue can be associated with multiple | 
|---|
| 169 | * worker pools with disjoint @__pod_cpumask's. Whether the enforcement | 
|---|
| 170 | * of a pool's @__pod_cpumask is strict depends on @affn_strict. | 
|---|
| 171 | */ | 
|---|
| 172 | cpumask_var_t __pod_cpumask; | 
|---|
| 173 |  | 
|---|
| 174 | /** | 
|---|
| 175 | * @affn_strict: affinity scope is strict | 
|---|
| 176 | * | 
|---|
| 177 | * If clear, workqueue will make a best-effort attempt at starting the | 
|---|
| 178 | * worker inside @__pod_cpumask but the scheduler is free to migrate it | 
|---|
| 179 | * outside. | 
|---|
| 180 | * | 
|---|
| 181 | * If set, workers are only allowed to run inside @__pod_cpumask. | 
|---|
| 182 | */ | 
|---|
| 183 | bool affn_strict; | 
|---|
| 184 |  | 
|---|
| 185 | /* | 
|---|
| 186 | * Below fields aren't properties of a worker_pool. They only modify how | 
|---|
| 187 | * :c:func:`apply_workqueue_attrs` select pools and thus don't | 
|---|
| 188 | * participate in pool hash calculations or equality comparisons. | 
|---|
| 189 | * | 
|---|
| 190 | * If @affn_strict is set, @cpumask isn't a property of a worker_pool | 
|---|
| 191 | * either. | 
|---|
| 192 | */ | 
|---|
| 193 |  | 
|---|
| 194 | /** | 
|---|
| 195 | * @affn_scope: unbound CPU affinity scope | 
|---|
| 196 | * | 
|---|
| 197 | * CPU pods are used to improve execution locality of unbound work | 
|---|
| 198 | * items. There are multiple pod types, one for each wq_affn_scope, and | 
|---|
| 199 | * every CPU in the system belongs to one pod in every pod type. CPUs | 
|---|
| 200 | * that belong to the same pod share the worker pool. For example, | 
|---|
| 201 | * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker | 
|---|
| 202 | * pool for each NUMA node. | 
|---|
| 203 | */ | 
|---|
| 204 | enum wq_affn_scope affn_scope; | 
|---|
| 205 |  | 
|---|
| 206 | /** | 
|---|
| 207 | * @ordered: work items must be executed one by one in queueing order | 
|---|
| 208 | */ | 
|---|
| 209 | bool ordered; | 
|---|
| 210 | }; | 
|---|
| 211 |  | 
|---|
| 212 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) | 
|---|
| 213 | { | 
|---|
| 214 | return container_of(work, struct delayed_work, work); | 
|---|
| 215 | } | 
|---|
| 216 |  | 
|---|
| 217 | static inline struct rcu_work *to_rcu_work(struct work_struct *work) | 
|---|
| 218 | { | 
|---|
| 219 | return container_of(work, struct rcu_work, work); | 
|---|
| 220 | } | 
|---|
| 221 |  | 
|---|
| 222 | struct execute_work { | 
|---|
| 223 | struct work_struct work; | 
|---|
| 224 | }; | 
|---|
| 225 |  | 
|---|
| 226 | #ifdef CONFIG_LOCKDEP | 
|---|
| 227 | /* | 
|---|
| 228 | * NB: because we have to copy the lockdep_map, setting _key | 
|---|
| 229 | * here is required, otherwise it could get initialised to the | 
|---|
| 230 | * copy of the lockdep_map! | 
|---|
| 231 | */ | 
|---|
| 232 | #define __WORK_INIT_LOCKDEP_MAP(n, k) \ | 
|---|
| 233 | .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), | 
|---|
| 234 | #else | 
|---|
| 235 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | 
|---|
| 236 | #endif | 
|---|
| 237 |  | 
|---|
| 238 | #define __WORK_INITIALIZER(n, f) {					\ | 
|---|
| 239 | .data = WORK_DATA_STATIC_INIT(),				\ | 
|---|
| 240 | .entry	= { &(n).entry, &(n).entry },				\ | 
|---|
| 241 | .func = (f),							\ | 
|---|
| 242 | __WORK_INIT_LOCKDEP_MAP(#n, &(n))				\ | 
|---|
| 243 | } | 
|---|
| 244 |  | 
|---|
| 245 | #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {			\ | 
|---|
| 246 | .work = __WORK_INITIALIZER((n).work, (f)),			\ | 
|---|
| 247 | .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\ | 
|---|
| 248 | (tflags) | TIMER_IRQSAFE),		\ | 
|---|
| 249 | } | 
|---|
| 250 |  | 
|---|
| 251 | #define DECLARE_WORK(n, f)						\ | 
|---|
| 252 | struct work_struct n = __WORK_INITIALIZER(n, f) | 
|---|
| 253 |  | 
|---|
| 254 | #define DECLARE_DELAYED_WORK(n, f)					\ | 
|---|
| 255 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) | 
|---|
| 256 |  | 
|---|
| 257 | #define DECLARE_DEFERRABLE_WORK(n, f)					\ | 
|---|
| 258 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) | 
|---|
| 259 |  | 
|---|
| 260 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 
|---|
| 261 | extern void __init_work(struct work_struct *work, int onstack); | 
|---|
| 262 | extern void destroy_work_on_stack(struct work_struct *work); | 
|---|
| 263 | extern void destroy_delayed_work_on_stack(struct delayed_work *work); | 
|---|
| 264 | static inline unsigned int work_static(struct work_struct *work) | 
|---|
| 265 | { | 
|---|
| 266 | return *work_data_bits(work) & WORK_STRUCT_STATIC; | 
|---|
| 267 | } | 
|---|
| 268 | #else | 
|---|
| 269 | static inline void __init_work(struct work_struct *work, int onstack) { } | 
|---|
| 270 | static inline void destroy_work_on_stack(struct work_struct *work) { } | 
|---|
| 271 | static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } | 
|---|
| 272 | static inline unsigned int work_static(struct work_struct *work) { return 0; } | 
|---|
| 273 | #endif | 
|---|
| 274 |  | 
|---|
| 275 | /* | 
|---|
| 276 | * initialize all of a work item in one go | 
|---|
| 277 | * | 
|---|
| 278 | * NOTE! No point in using "atomic_long_set()": using a direct | 
|---|
| 279 | * assignment of the work data initializer allows the compiler | 
|---|
| 280 | * to generate better code. | 
|---|
| 281 | */ | 
|---|
| 282 | #ifdef CONFIG_LOCKDEP | 
|---|
| 283 | #define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\ | 
|---|
| 284 | do {								\ | 
|---|
| 285 | __init_work((_work), _onstack);				\ | 
|---|
| 286 | (_work)->data = (atomic_long_t) WORK_DATA_INIT();	\ | 
|---|
| 287 | lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \ | 
|---|
| 288 | INIT_LIST_HEAD(&(_work)->entry);			\ | 
|---|
| 289 | (_work)->func = (_func);				\ | 
|---|
| 290 | } while (0) | 
|---|
| 291 | #else | 
|---|
| 292 | #define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\ | 
|---|
| 293 | do {								\ | 
|---|
| 294 | __init_work((_work), _onstack);				\ | 
|---|
| 295 | (_work)->data = (atomic_long_t) WORK_DATA_INIT();	\ | 
|---|
| 296 | INIT_LIST_HEAD(&(_work)->entry);			\ | 
|---|
| 297 | (_work)->func = (_func);				\ | 
|---|
| 298 | } while (0) | 
|---|
| 299 | #endif | 
|---|
| 300 |  | 
|---|
| 301 | #define __INIT_WORK(_work, _func, _onstack)				\ | 
|---|
| 302 | do {								\ | 
|---|
| 303 | static __maybe_unused struct lock_class_key __key;	\ | 
|---|
| 304 | \ | 
|---|
| 305 | __INIT_WORK_KEY(_work, _func, _onstack, &__key);	\ | 
|---|
| 306 | } while (0) | 
|---|
| 307 |  | 
|---|
| 308 | #define INIT_WORK(_work, _func)						\ | 
|---|
| 309 | __INIT_WORK((_work), (_func), 0) | 
|---|
| 310 |  | 
|---|
| 311 | #define INIT_WORK_ONSTACK(_work, _func)					\ | 
|---|
| 312 | __INIT_WORK((_work), (_func), 1) | 
|---|
| 313 |  | 
|---|
| 314 | #define INIT_WORK_ONSTACK_KEY(_work, _func, _key)			\ | 
|---|
| 315 | __INIT_WORK_KEY((_work), (_func), 1, _key) | 
|---|
| 316 |  | 
|---|
| 317 | #define __INIT_DELAYED_WORK(_work, _func, _tflags)			\ | 
|---|
| 318 | do {								\ | 
|---|
| 319 | INIT_WORK(&(_work)->work, (_func));			\ | 
|---|
| 320 | __timer_init(&(_work)->timer,				\ | 
|---|
| 321 | delayed_work_timer_fn,			\ | 
|---|
| 322 | (_tflags) | TIMER_IRQSAFE);		\ | 
|---|
| 323 | } while (0) | 
|---|
| 324 |  | 
|---|
| 325 | #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)		\ | 
|---|
| 326 | do {								\ | 
|---|
| 327 | INIT_WORK_ONSTACK(&(_work)->work, (_func));		\ | 
|---|
| 328 | __timer_init_on_stack(&(_work)->timer,			\ | 
|---|
| 329 | delayed_work_timer_fn,		\ | 
|---|
| 330 | (_tflags) | TIMER_IRQSAFE);	\ | 
|---|
| 331 | } while (0) | 
|---|
| 332 |  | 
|---|
| 333 | #define INIT_DELAYED_WORK(_work, _func)					\ | 
|---|
| 334 | __INIT_DELAYED_WORK(_work, _func, 0) | 
|---|
| 335 |  | 
|---|
| 336 | #define INIT_DELAYED_WORK_ONSTACK(_work, _func)				\ | 
|---|
| 337 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) | 
|---|
| 338 |  | 
|---|
| 339 | #define INIT_DEFERRABLE_WORK(_work, _func)				\ | 
|---|
| 340 | __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) | 
|---|
| 341 |  | 
|---|
| 342 | #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)			\ | 
|---|
| 343 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) | 
|---|
| 344 |  | 
|---|
| 345 | #define INIT_RCU_WORK(_work, _func)					\ | 
|---|
| 346 | INIT_WORK(&(_work)->work, (_func)) | 
|---|
| 347 |  | 
|---|
| 348 | #define INIT_RCU_WORK_ONSTACK(_work, _func)				\ | 
|---|
| 349 | INIT_WORK_ONSTACK(&(_work)->work, (_func)) | 
|---|
| 350 |  | 
|---|
| 351 | /** | 
|---|
| 352 | * work_pending - Find out whether a work item is currently pending | 
|---|
| 353 | * @work: The work item in question | 
|---|
| 354 | */ | 
|---|
| 355 | #define work_pending(work) \ | 
|---|
| 356 | test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) | 
|---|
| 357 |  | 
|---|
| 358 | /** | 
|---|
| 359 | * delayed_work_pending - Find out whether a delayable work item is currently | 
|---|
| 360 | * pending | 
|---|
| 361 | * @w: The work item in question | 
|---|
| 362 | */ | 
|---|
| 363 | #define delayed_work_pending(w) \ | 
|---|
| 364 | work_pending(&(w)->work) | 
|---|
| 365 |  | 
|---|
| 366 | /* | 
|---|
| 367 | * Workqueue flags and constants.  For details, please refer to | 
|---|
| 368 | * Documentation/core-api/workqueue.rst. | 
|---|
| 369 | */ | 
|---|
| 370 | enum wq_flags { | 
|---|
| 371 | WQ_BH			= 1 << 0, /* execute in bottom half (softirq) context */ | 
|---|
| 372 | WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */ | 
|---|
| 373 | WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */ | 
|---|
| 374 | WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */ | 
|---|
| 375 | WQ_HIGHPRI		= 1 << 4, /* high priority */ | 
|---|
| 376 | WQ_CPU_INTENSIVE	= 1 << 5, /* cpu intensive workqueue */ | 
|---|
| 377 | WQ_SYSFS		= 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */ | 
|---|
| 378 |  | 
|---|
| 379 | /* | 
|---|
| 380 | * Per-cpu workqueues are generally preferred because they tend to | 
|---|
| 381 | * show better performance thanks to cache locality.  Per-cpu | 
|---|
| 382 | * workqueues exclude the scheduler from choosing the CPU to | 
|---|
| 383 | * execute the worker threads, which has an unfortunate side effect | 
|---|
| 384 | * of increasing power consumption. | 
|---|
| 385 | * | 
|---|
| 386 | * The scheduler considers a CPU idle if it doesn't have any task | 
|---|
| 387 | * to execute and tries to keep idle cores idle to conserve power; | 
|---|
| 388 | * however, for example, a per-cpu work item scheduled from an | 
|---|
| 389 | * interrupt handler on an idle CPU will force the scheduler to | 
|---|
| 390 | * execute the work item on that CPU breaking the idleness, which in | 
|---|
| 391 | * turn may lead to more scheduling choices which are sub-optimal | 
|---|
| 392 | * in terms of power consumption. | 
|---|
| 393 | * | 
|---|
| 394 | * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default | 
|---|
| 395 | * but become unbound if workqueue.power_efficient kernel param is | 
|---|
| 396 | * specified.  Per-cpu workqueues which are identified to | 
|---|
| 397 | * contribute significantly to power-consumption are identified and | 
|---|
| 398 | * marked with this flag and enabling the power_efficient mode | 
|---|
| 399 | * leads to noticeable power saving at the cost of small | 
|---|
| 400 | * performance disadvantage. | 
|---|
| 401 | * | 
|---|
| 402 | * http://thread.gmane.org/gmane.linux.kernel/1480396 | 
|---|
| 403 | */ | 
|---|
| 404 | WQ_POWER_EFFICIENT	= 1 << 7, | 
|---|
| 405 | WQ_PERCPU		= 1 << 8, /* bound to a specific cpu */ | 
|---|
| 406 |  | 
|---|
| 407 | __WQ_DESTROYING		= 1 << 15, /* internal: workqueue is destroying */ | 
|---|
| 408 | __WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */ | 
|---|
| 409 | __WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */ | 
|---|
| 410 | __WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */ | 
|---|
| 411 |  | 
|---|
| 412 | /* BH wq only allows the following flags */ | 
|---|
| 413 | __WQ_BH_ALLOWS		= WQ_BH | WQ_HIGHPRI | WQ_PERCPU, | 
|---|
| 414 | }; | 
|---|
| 415 |  | 
|---|
| 416 | enum wq_consts { | 
|---|
| 417 | WQ_MAX_ACTIVE		= 2048,	  /* I like 2048, better ideas? */ | 
|---|
| 418 | WQ_UNBOUND_MAX_ACTIVE	= WQ_MAX_ACTIVE, | 
|---|
| 419 | WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2, | 
|---|
| 420 |  | 
|---|
| 421 | /* | 
|---|
| 422 | * Per-node default cap on min_active. Unless explicitly set, min_active | 
|---|
| 423 | * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see | 
|---|
| 424 | * workqueue_struct->min_active definition. | 
|---|
| 425 | */ | 
|---|
| 426 | WQ_DFL_MIN_ACTIVE	= 8, | 
|---|
| 427 | }; | 
|---|
| 428 |  | 
|---|
| 429 | /* | 
|---|
| 430 | * System-wide workqueues which are always present. | 
|---|
| 431 | * | 
|---|
| 432 | * system_percpu_wq is the one used by schedule[_delayed]_work[_on](). | 
|---|
| 433 | * Multi-CPU multi-threaded.  There are users which expect relatively | 
|---|
| 434 | * short queue flush time.  Don't queue works which can run for too | 
|---|
| 435 | * long. | 
|---|
| 436 | * | 
|---|
| 437 | * system_highpri_wq is similar to system_percpu_wq but for work items which | 
|---|
| 438 | * require WQ_HIGHPRI. | 
|---|
| 439 | * | 
|---|
| 440 | * system_long_wq is similar to system_percpu_wq but may host long running | 
|---|
| 441 | * works.  Queue flushing might take relatively long. | 
|---|
| 442 | * | 
|---|
| 443 | * system_dfl_wq is unbound workqueue.  Workers are not bound to | 
|---|
| 444 | * any specific CPU, not concurrency managed, and all queued works are | 
|---|
| 445 | * executed immediately as long as max_active limit is not reached and | 
|---|
| 446 | * resources are available. | 
|---|
| 447 | * | 
|---|
| 448 | * system_freezable_wq is equivalent to system_percpu_wq except that it's | 
|---|
| 449 | * freezable. | 
|---|
| 450 | * | 
|---|
| 451 | * *_power_efficient_wq are inclined towards saving power and converted | 
|---|
| 452 | * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, | 
|---|
| 453 | * they are same as their non-power-efficient counterparts - e.g. | 
|---|
| 454 | * system_power_efficient_wq is identical to system_percpu_wq if | 
|---|
| 455 | * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info. | 
|---|
| 456 | * | 
|---|
| 457 | * system_bh[_highpri]_wq are convenience interface to softirq. BH work items | 
|---|
| 458 | * are executed in the queueing CPU's BH context in the queueing order. | 
|---|
| 459 | */ | 
|---|
| 460 | extern struct workqueue_struct *system_wq; /* use system_percpu_wq, this will be removed */ | 
|---|
| 461 | extern struct workqueue_struct *system_percpu_wq; | 
|---|
| 462 | extern struct workqueue_struct *system_highpri_wq; | 
|---|
| 463 | extern struct workqueue_struct *system_long_wq; | 
|---|
| 464 | extern struct workqueue_struct *system_unbound_wq; | 
|---|
| 465 | extern struct workqueue_struct *system_dfl_wq; | 
|---|
| 466 | extern struct workqueue_struct *system_freezable_wq; | 
|---|
| 467 | extern struct workqueue_struct *system_power_efficient_wq; | 
|---|
| 468 | extern struct workqueue_struct *system_freezable_power_efficient_wq; | 
|---|
| 469 | extern struct workqueue_struct *system_bh_wq; | 
|---|
| 470 | extern struct workqueue_struct *system_bh_highpri_wq; | 
|---|
| 471 |  | 
|---|
| 472 | void workqueue_softirq_action(bool highpri); | 
|---|
| 473 | void workqueue_softirq_dead(unsigned int cpu); | 
|---|
| 474 |  | 
|---|
| 475 | /** | 
|---|
| 476 | * alloc_workqueue - allocate a workqueue | 
|---|
| 477 | * @fmt: printf format for the name of the workqueue | 
|---|
| 478 | * @flags: WQ_* flags | 
|---|
| 479 | * @max_active: max in-flight work items, 0 for default | 
|---|
| 480 | * @...: args for @fmt | 
|---|
| 481 | * | 
|---|
| 482 | * For a per-cpu workqueue, @max_active limits the number of in-flight work | 
|---|
| 483 | * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be | 
|---|
| 484 | * executing at most one work item for the workqueue. | 
|---|
| 485 | * | 
|---|
| 486 | * For unbound workqueues, @max_active limits the number of in-flight work items | 
|---|
| 487 | * for the whole system. e.g. @max_active of 16 indicates that there can be | 
|---|
| 488 | * at most 16 work items executing for the workqueue in the whole system. | 
|---|
| 489 | * | 
|---|
| 490 | * As sharing the same active counter for an unbound workqueue across multiple | 
|---|
| 491 | * NUMA nodes can be expensive, @max_active is distributed to each NUMA node | 
|---|
| 492 | * according to the proportion of the number of online CPUs and enforced | 
|---|
| 493 | * independently. | 
|---|
| 494 | * | 
|---|
| 495 | * Depending on online CPU distribution, a node may end up with per-node | 
|---|
| 496 | * max_active which is significantly lower than @max_active, which can lead to | 
|---|
| 497 | * deadlocks if the per-node concurrency limit is lower than the maximum number | 
|---|
| 498 | * of interdependent work items for the workqueue. | 
|---|
| 499 | * | 
|---|
| 500 | * To guarantee forward progress regardless of online CPU distribution, the | 
|---|
| 501 | * concurrency limit on every node is guaranteed to be equal to or greater than | 
|---|
| 502 | * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means | 
|---|
| 503 | * that the sum of per-node max_active's may be larger than @max_active. | 
|---|
| 504 | * | 
|---|
| 505 | * For detailed information on %WQ_\* flags, please refer to | 
|---|
| 506 | * Documentation/core-api/workqueue.rst. | 
|---|
| 507 | * | 
|---|
| 508 | * RETURNS: | 
|---|
| 509 | * Pointer to the allocated workqueue on success, %NULL on failure. | 
|---|
| 510 | */ | 
|---|
| 511 | __printf(1, 4) struct workqueue_struct * | 
|---|
| 512 | alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...); | 
|---|
| 513 | #define alloc_workqueue(...)	alloc_hooks(alloc_workqueue_noprof(__VA_ARGS__)) | 
|---|
| 514 |  | 
|---|
| 515 | #ifdef CONFIG_LOCKDEP | 
|---|
| 516 | /** | 
|---|
| 517 | * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map | 
|---|
| 518 | * @fmt: printf format for the name of the workqueue | 
|---|
| 519 | * @flags: WQ_* flags | 
|---|
| 520 | * @max_active: max in-flight work items, 0 for default | 
|---|
| 521 | * @lockdep_map: user-defined lockdep_map | 
|---|
| 522 | * @...: args for @fmt | 
|---|
| 523 | * | 
|---|
| 524 | * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for | 
|---|
| 525 | * workqueues created with the same purpose and to avoid leaking a lockdep_map | 
|---|
| 526 | * on each workqueue creation. | 
|---|
| 527 | * | 
|---|
| 528 | * RETURNS: | 
|---|
| 529 | * Pointer to the allocated workqueue on success, %NULL on failure. | 
|---|
| 530 | */ | 
|---|
| 531 | __printf(1, 5) struct workqueue_struct * | 
|---|
| 532 | alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, | 
|---|
| 533 | struct lockdep_map *lockdep_map, ...); | 
|---|
| 534 |  | 
|---|
| 535 | /** | 
|---|
| 536 | * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with | 
|---|
| 537 | * user-defined lockdep_map | 
|---|
| 538 | * | 
|---|
| 539 | * @fmt: printf format for the name of the workqueue | 
|---|
| 540 | * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) | 
|---|
| 541 | * @lockdep_map: user-defined lockdep_map | 
|---|
| 542 | * @args: args for @fmt | 
|---|
| 543 | * | 
|---|
| 544 | * Same as alloc_ordered_workqueue but with the a user-define lockdep_map. | 
|---|
| 545 | * Useful for workqueues created with the same purpose and to avoid leaking a | 
|---|
| 546 | * lockdep_map on each workqueue creation. | 
|---|
| 547 | * | 
|---|
| 548 | * RETURNS: | 
|---|
| 549 | * Pointer to the allocated workqueue on success, %NULL on failure. | 
|---|
| 550 | */ | 
|---|
| 551 | #define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...)	\ | 
|---|
| 552 | alloc_hooks(alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),\ | 
|---|
| 553 | 1, lockdep_map, ##args)) | 
|---|
| 554 | #endif | 
|---|
| 555 |  | 
|---|
| 556 | /** | 
|---|
| 557 | * alloc_ordered_workqueue - allocate an ordered workqueue | 
|---|
| 558 | * @fmt: printf format for the name of the workqueue | 
|---|
| 559 | * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) | 
|---|
| 560 | * @args: args for @fmt | 
|---|
| 561 | * | 
|---|
| 562 | * Allocate an ordered workqueue.  An ordered workqueue executes at | 
|---|
| 563 | * most one work item at any given time in the queued order.  They are | 
|---|
| 564 | * implemented as unbound workqueues with @max_active of one. | 
|---|
| 565 | * | 
|---|
| 566 | * RETURNS: | 
|---|
| 567 | * Pointer to the allocated workqueue on success, %NULL on failure. | 
|---|
| 568 | */ | 
|---|
| 569 | #define alloc_ordered_workqueue(fmt, flags, args...)			\ | 
|---|
| 570 | alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) | 
|---|
| 571 |  | 
|---|
| 572 | #define create_workqueue(name)						\ | 
|---|
| 573 | alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_PERCPU, 1, (name)) | 
|---|
| 574 | #define create_freezable_workqueue(name)				\ | 
|---|
| 575 | alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\ | 
|---|
| 576 | WQ_MEM_RECLAIM, 1, (name)) | 
|---|
| 577 | #define create_singlethread_workqueue(name)				\ | 
|---|
| 578 | alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) | 
|---|
| 579 |  | 
|---|
| 580 | #define from_work(var, callback_work, work_fieldname)	\ | 
|---|
| 581 | container_of(callback_work, typeof(*var), work_fieldname) | 
|---|
| 582 |  | 
|---|
| 583 | extern void destroy_workqueue(struct workqueue_struct *wq); | 
|---|
| 584 |  | 
|---|
| 585 | struct workqueue_attrs *alloc_workqueue_attrs_noprof(void); | 
|---|
| 586 | #define alloc_workqueue_attrs(...)	alloc_hooks(alloc_workqueue_attrs_noprof(__VA_ARGS__)) | 
|---|
| 587 |  | 
|---|
| 588 | void free_workqueue_attrs(struct workqueue_attrs *attrs); | 
|---|
| 589 | int apply_workqueue_attrs(struct workqueue_struct *wq, | 
|---|
| 590 | const struct workqueue_attrs *attrs); | 
|---|
| 591 | extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask); | 
|---|
| 592 |  | 
|---|
| 593 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, | 
|---|
| 594 | struct work_struct *work); | 
|---|
| 595 | extern bool queue_work_node(int node, struct workqueue_struct *wq, | 
|---|
| 596 | struct work_struct *work); | 
|---|
| 597 | extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
|---|
| 598 | struct delayed_work *work, unsigned long delay); | 
|---|
| 599 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, | 
|---|
| 600 | struct delayed_work *dwork, unsigned long delay); | 
|---|
| 601 | extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); | 
|---|
| 602 |  | 
|---|
| 603 | extern void __flush_workqueue(struct workqueue_struct *wq); | 
|---|
| 604 | extern void drain_workqueue(struct workqueue_struct *wq); | 
|---|
| 605 |  | 
|---|
| 606 | extern int schedule_on_each_cpu(work_func_t func); | 
|---|
| 607 |  | 
|---|
| 608 | int execute_in_process_context(work_func_t fn, struct execute_work *); | 
|---|
| 609 |  | 
|---|
| 610 | extern bool flush_work(struct work_struct *work); | 
|---|
| 611 | extern bool cancel_work(struct work_struct *work); | 
|---|
| 612 | extern bool cancel_work_sync(struct work_struct *work); | 
|---|
| 613 |  | 
|---|
| 614 | extern bool flush_delayed_work(struct delayed_work *dwork); | 
|---|
| 615 | extern bool cancel_delayed_work(struct delayed_work *dwork); | 
|---|
| 616 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); | 
|---|
| 617 |  | 
|---|
| 618 | extern bool disable_work(struct work_struct *work); | 
|---|
| 619 | extern bool disable_work_sync(struct work_struct *work); | 
|---|
| 620 | extern bool enable_work(struct work_struct *work); | 
|---|
| 621 |  | 
|---|
| 622 | extern bool disable_delayed_work(struct delayed_work *dwork); | 
|---|
| 623 | extern bool disable_delayed_work_sync(struct delayed_work *dwork); | 
|---|
| 624 | extern bool enable_delayed_work(struct delayed_work *dwork); | 
|---|
| 625 |  | 
|---|
| 626 | extern bool flush_rcu_work(struct rcu_work *rwork); | 
|---|
| 627 |  | 
|---|
| 628 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | 
|---|
| 629 | int max_active); | 
|---|
| 630 | extern void workqueue_set_min_active(struct workqueue_struct *wq, | 
|---|
| 631 | int min_active); | 
|---|
| 632 | extern struct work_struct *current_work(void); | 
|---|
| 633 | extern bool current_is_workqueue_rescuer(void); | 
|---|
| 634 | extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); | 
|---|
| 635 | extern unsigned int work_busy(struct work_struct *work); | 
|---|
| 636 | extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); | 
|---|
| 637 | extern void print_worker_info(const char *log_lvl, struct task_struct *task); | 
|---|
| 638 | extern void show_all_workqueues(void); | 
|---|
| 639 | extern void show_freezable_workqueues(void); | 
|---|
| 640 | extern void show_one_workqueue(struct workqueue_struct *wq); | 
|---|
| 641 | extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); | 
|---|
| 642 |  | 
|---|
| 643 | /** | 
|---|
| 644 | * queue_work - queue work on a workqueue | 
|---|
| 645 | * @wq: workqueue to use | 
|---|
| 646 | * @work: work to queue | 
|---|
| 647 | * | 
|---|
| 648 | * Returns %false if @work was already on a queue, %true otherwise. | 
|---|
| 649 | * | 
|---|
| 650 | * We queue the work to the CPU on which it was submitted, but if the CPU dies | 
|---|
| 651 | * it can be processed by another CPU. | 
|---|
| 652 | * | 
|---|
| 653 | * Memory-ordering properties:  If it returns %true, guarantees that all stores | 
|---|
| 654 | * preceding the call to queue_work() in the program order will be visible from | 
|---|
| 655 | * the CPU which will execute @work by the time such work executes, e.g., | 
|---|
| 656 | * | 
|---|
| 657 | * { x is initially 0 } | 
|---|
| 658 | * | 
|---|
| 659 | *   CPU0				CPU1 | 
|---|
| 660 | * | 
|---|
| 661 | *   WRITE_ONCE(x, 1);			[ @work is being executed ] | 
|---|
| 662 | *   r0 = queue_work(wq, work);		  r1 = READ_ONCE(x); | 
|---|
| 663 | * | 
|---|
| 664 | * Forbids: r0 == true && r1 == 0 | 
|---|
| 665 | */ | 
|---|
| 666 | static inline bool queue_work(struct workqueue_struct *wq, | 
|---|
| 667 | struct work_struct *work) | 
|---|
| 668 | { | 
|---|
| 669 | return queue_work_on(cpu: WORK_CPU_UNBOUND, wq, work); | 
|---|
| 670 | } | 
|---|
| 671 |  | 
|---|
| 672 | /** | 
|---|
| 673 | * queue_delayed_work - queue work on a workqueue after delay | 
|---|
| 674 | * @wq: workqueue to use | 
|---|
| 675 | * @dwork: delayable work to queue | 
|---|
| 676 | * @delay: number of jiffies to wait before queueing | 
|---|
| 677 | * | 
|---|
| 678 | * Equivalent to queue_delayed_work_on() but tries to use the local CPU. | 
|---|
| 679 | */ | 
|---|
| 680 | static inline bool queue_delayed_work(struct workqueue_struct *wq, | 
|---|
| 681 | struct delayed_work *dwork, | 
|---|
| 682 | unsigned long delay) | 
|---|
| 683 | { | 
|---|
| 684 | return queue_delayed_work_on(cpu: WORK_CPU_UNBOUND, wq, work: dwork, delay); | 
|---|
| 685 | } | 
|---|
| 686 |  | 
|---|
| 687 | /** | 
|---|
| 688 | * mod_delayed_work - modify delay of or queue a delayed work | 
|---|
| 689 | * @wq: workqueue to use | 
|---|
| 690 | * @dwork: work to queue | 
|---|
| 691 | * @delay: number of jiffies to wait before queueing | 
|---|
| 692 | * | 
|---|
| 693 | * mod_delayed_work_on() on local CPU. | 
|---|
| 694 | */ | 
|---|
| 695 | static inline bool mod_delayed_work(struct workqueue_struct *wq, | 
|---|
| 696 | struct delayed_work *dwork, | 
|---|
| 697 | unsigned long delay) | 
|---|
| 698 | { | 
|---|
| 699 | return mod_delayed_work_on(cpu: WORK_CPU_UNBOUND, wq, dwork, delay); | 
|---|
| 700 | } | 
|---|
| 701 |  | 
|---|
| 702 | /** | 
|---|
| 703 | * schedule_work_on - put work task on a specific cpu | 
|---|
| 704 | * @cpu: cpu to put the work task on | 
|---|
| 705 | * @work: job to be done | 
|---|
| 706 | * | 
|---|
| 707 | * This puts a job on a specific cpu | 
|---|
| 708 | */ | 
|---|
| 709 | static inline bool schedule_work_on(int cpu, struct work_struct *work) | 
|---|
| 710 | { | 
|---|
| 711 | return queue_work_on(cpu, wq: system_percpu_wq, work); | 
|---|
| 712 | } | 
|---|
| 713 |  | 
|---|
| 714 | /** | 
|---|
| 715 | * schedule_work - put work task in global workqueue | 
|---|
| 716 | * @work: job to be done | 
|---|
| 717 | * | 
|---|
| 718 | * Returns %false if @work was already on the kernel-global workqueue and | 
|---|
| 719 | * %true otherwise. | 
|---|
| 720 | * | 
|---|
| 721 | * This puts a job in the kernel-global workqueue if it was not already | 
|---|
| 722 | * queued and leaves it in the same position on the kernel-global | 
|---|
| 723 | * workqueue otherwise. | 
|---|
| 724 | * | 
|---|
| 725 | * Shares the same memory-ordering properties of queue_work(), cf. the | 
|---|
| 726 | * DocBook header of queue_work(). | 
|---|
| 727 | */ | 
|---|
| 728 | static inline bool schedule_work(struct work_struct *work) | 
|---|
| 729 | { | 
|---|
| 730 | return queue_work(wq: system_percpu_wq, work); | 
|---|
| 731 | } | 
|---|
| 732 |  | 
|---|
| 733 | /** | 
|---|
| 734 | * enable_and_queue_work - Enable and queue a work item on a specific workqueue | 
|---|
| 735 | * @wq: The target workqueue | 
|---|
| 736 | * @work: The work item to be enabled and queued | 
|---|
| 737 | * | 
|---|
| 738 | * This function combines the operations of enable_work() and queue_work(), | 
|---|
| 739 | * providing a convenient way to enable and queue a work item in a single call. | 
|---|
| 740 | * It invokes enable_work() on @work and then queues it if the disable depth | 
|---|
| 741 | * reached 0. Returns %true if the disable depth reached 0 and @work is queued, | 
|---|
| 742 | * and %false otherwise. | 
|---|
| 743 | * | 
|---|
| 744 | * Note that @work is always queued when disable depth reaches zero. If the | 
|---|
| 745 | * desired behavior is queueing only if certain events took place while @work is | 
|---|
| 746 | * disabled, the user should implement the necessary state tracking and perform | 
|---|
| 747 | * explicit conditional queueing after enable_work(). | 
|---|
| 748 | */ | 
|---|
| 749 | static inline bool enable_and_queue_work(struct workqueue_struct *wq, | 
|---|
| 750 | struct work_struct *work) | 
|---|
| 751 | { | 
|---|
| 752 | if (enable_work(work)) { | 
|---|
| 753 | queue_work(wq, work); | 
|---|
| 754 | return true; | 
|---|
| 755 | } | 
|---|
| 756 | return false; | 
|---|
| 757 | } | 
|---|
| 758 |  | 
|---|
| 759 | /* | 
|---|
| 760 | * Detect attempt to flush system-wide workqueues at compile time when possible. | 
|---|
| 761 | * Warn attempt to flush system-wide workqueues at runtime. | 
|---|
| 762 | * | 
|---|
| 763 | * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp | 
|---|
| 764 | * for reasons and steps for converting system-wide workqueues into local workqueues. | 
|---|
| 765 | */ | 
|---|
| 766 | extern void __warn_flushing_systemwide_wq(void) | 
|---|
| 767 | __compiletime_warning( "Please avoid flushing system-wide workqueues."); | 
|---|
| 768 |  | 
|---|
| 769 | /* Please stop using this function, for this function will be removed in near future. */ | 
|---|
| 770 | #define flush_scheduled_work()						\ | 
|---|
| 771 | ({									\ | 
|---|
| 772 | __warn_flushing_systemwide_wq();				\ | 
|---|
| 773 | __flush_workqueue(system_percpu_wq);					\ | 
|---|
| 774 | }) | 
|---|
| 775 |  | 
|---|
| 776 | #define flush_workqueue(wq)						\ | 
|---|
| 777 | ({									\ | 
|---|
| 778 | struct workqueue_struct *_wq = (wq);				\ | 
|---|
| 779 | \ | 
|---|
| 780 | if ((__builtin_constant_p(_wq == system_percpu_wq) &&			\ | 
|---|
| 781 | _wq == system_percpu_wq) ||					\ | 
|---|
| 782 | (__builtin_constant_p(_wq == system_highpri_wq) &&		\ | 
|---|
| 783 | _wq == system_highpri_wq) ||				\ | 
|---|
| 784 | (__builtin_constant_p(_wq == system_long_wq) &&		\ | 
|---|
| 785 | _wq == system_long_wq) ||					\ | 
|---|
| 786 | (__builtin_constant_p(_wq == system_dfl_wq) &&		\ | 
|---|
| 787 | _wq == system_dfl_wq) ||				\ | 
|---|
| 788 | (__builtin_constant_p(_wq == system_freezable_wq) &&	\ | 
|---|
| 789 | _wq == system_freezable_wq) ||				\ | 
|---|
| 790 | (__builtin_constant_p(_wq == system_power_efficient_wq) &&	\ | 
|---|
| 791 | _wq == system_power_efficient_wq) ||			\ | 
|---|
| 792 | (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \ | 
|---|
| 793 | _wq == system_freezable_power_efficient_wq))		\ | 
|---|
| 794 | __warn_flushing_systemwide_wq();			\ | 
|---|
| 795 | __flush_workqueue(_wq);						\ | 
|---|
| 796 | }) | 
|---|
| 797 |  | 
|---|
| 798 | /** | 
|---|
| 799 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 
|---|
| 800 | * @cpu: cpu to use | 
|---|
| 801 | * @dwork: job to be done | 
|---|
| 802 | * @delay: number of jiffies to wait | 
|---|
| 803 | * | 
|---|
| 804 | * After waiting for a given time this puts a job in the kernel-global | 
|---|
| 805 | * workqueue on the specified CPU. | 
|---|
| 806 | */ | 
|---|
| 807 | static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, | 
|---|
| 808 | unsigned long delay) | 
|---|
| 809 | { | 
|---|
| 810 | return queue_delayed_work_on(cpu, wq: system_percpu_wq, work: dwork, delay); | 
|---|
| 811 | } | 
|---|
| 812 |  | 
|---|
| 813 | /** | 
|---|
| 814 | * schedule_delayed_work - put work task in global workqueue after delay | 
|---|
| 815 | * @dwork: job to be done | 
|---|
| 816 | * @delay: number of jiffies to wait or 0 for immediate execution | 
|---|
| 817 | * | 
|---|
| 818 | * After waiting for a given time this puts a job in the kernel-global | 
|---|
| 819 | * workqueue. | 
|---|
| 820 | */ | 
|---|
| 821 | static inline bool schedule_delayed_work(struct delayed_work *dwork, | 
|---|
| 822 | unsigned long delay) | 
|---|
| 823 | { | 
|---|
| 824 | return queue_delayed_work(wq: system_percpu_wq, dwork, delay); | 
|---|
| 825 | } | 
|---|
| 826 |  | 
|---|
| 827 | #ifndef CONFIG_SMP | 
|---|
| 828 | static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | 
|---|
| 829 | { | 
|---|
| 830 | return fn(arg); | 
|---|
| 831 | } | 
|---|
| 832 | static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) | 
|---|
| 833 | { | 
|---|
| 834 | return fn(arg); | 
|---|
| 835 | } | 
|---|
| 836 | #else | 
|---|
| 837 | long work_on_cpu_key(int cpu, long (*fn)(void *), | 
|---|
| 838 | void *arg, struct lock_class_key *key); | 
|---|
| 839 | /* | 
|---|
| 840 | * A new key is defined for each caller to make sure the work | 
|---|
| 841 | * associated with the function doesn't share its locking class. | 
|---|
| 842 | */ | 
|---|
| 843 | #define work_on_cpu(_cpu, _fn, _arg)			\ | 
|---|
| 844 | ({							\ | 
|---|
| 845 | static struct lock_class_key __key;		\ | 
|---|
| 846 | \ | 
|---|
| 847 | work_on_cpu_key(_cpu, _fn, _arg, &__key);	\ | 
|---|
| 848 | }) | 
|---|
| 849 |  | 
|---|
| 850 | #endif /* CONFIG_SMP */ | 
|---|
| 851 |  | 
|---|
| 852 | #ifdef CONFIG_FREEZER | 
|---|
| 853 | extern void freeze_workqueues_begin(void); | 
|---|
| 854 | extern bool freeze_workqueues_busy(void); | 
|---|
| 855 | extern void thaw_workqueues(void); | 
|---|
| 856 | #endif /* CONFIG_FREEZER */ | 
|---|
| 857 |  | 
|---|
| 858 | #ifdef CONFIG_SYSFS | 
|---|
| 859 | int workqueue_sysfs_register(struct workqueue_struct *wq); | 
|---|
| 860 | #else	/* CONFIG_SYSFS */ | 
|---|
| 861 | static inline int workqueue_sysfs_register(struct workqueue_struct *wq) | 
|---|
| 862 | { return 0; } | 
|---|
| 863 | #endif	/* CONFIG_SYSFS */ | 
|---|
| 864 |  | 
|---|
| 865 | #ifdef CONFIG_WQ_WATCHDOG | 
|---|
| 866 | void wq_watchdog_touch(int cpu); | 
|---|
| 867 | #else	/* CONFIG_WQ_WATCHDOG */ | 
|---|
| 868 | static inline void wq_watchdog_touch(int cpu) { } | 
|---|
| 869 | #endif	/* CONFIG_WQ_WATCHDOG */ | 
|---|
| 870 |  | 
|---|
| 871 | #ifdef CONFIG_SMP | 
|---|
| 872 | int workqueue_prepare_cpu(unsigned int cpu); | 
|---|
| 873 | int workqueue_online_cpu(unsigned int cpu); | 
|---|
| 874 | int workqueue_offline_cpu(unsigned int cpu); | 
|---|
| 875 | #endif | 
|---|
| 876 |  | 
|---|
| 877 | void __init workqueue_init_early(void); | 
|---|
| 878 | void __init workqueue_init(void); | 
|---|
| 879 | void __init workqueue_init_topology(void); | 
|---|
| 880 |  | 
|---|
| 881 | #endif | 
|---|
| 882 |  | 
|---|