| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | /* | 
|---|
| 3 | * Runtime locking correctness validator | 
|---|
| 4 | * | 
|---|
| 5 | *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 
|---|
| 6 | *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra | 
|---|
| 7 | * | 
|---|
| 8 | * see Documentation/locking/lockdep-design.rst for more details. | 
|---|
| 9 | */ | 
|---|
| 10 | #ifndef __LINUX_LOCKDEP_H | 
|---|
| 11 | #define __LINUX_LOCKDEP_H | 
|---|
| 12 |  | 
|---|
| 13 | #include <linux/lockdep_types.h> | 
|---|
| 14 | #include <linux/smp.h> | 
|---|
| 15 | #include <asm/percpu.h> | 
|---|
| 16 |  | 
|---|
| 17 | struct task_struct; | 
|---|
| 18 |  | 
|---|
| 19 | #ifdef CONFIG_LOCKDEP | 
|---|
| 20 |  | 
|---|
| 21 | #include <linux/linkage.h> | 
|---|
| 22 | #include <linux/list.h> | 
|---|
| 23 | #include <linux/debug_locks.h> | 
|---|
| 24 | #include <linux/stacktrace.h> | 
|---|
| 25 |  | 
|---|
| 26 | static inline void lockdep_copy_map(struct lockdep_map *to, | 
|---|
| 27 | struct lockdep_map *from) | 
|---|
| 28 | { | 
|---|
| 29 | int i; | 
|---|
| 30 |  | 
|---|
| 31 | *to = *from; | 
|---|
| 32 | /* | 
|---|
| 33 | * Since the class cache can be modified concurrently we could observe | 
|---|
| 34 | * half pointers (64bit arch using 32bit copy insns). Therefore clear | 
|---|
| 35 | * the caches and take the performance hit. | 
|---|
| 36 | * | 
|---|
| 37 | * XXX it doesn't work well with lockdep_set_class_and_subclass(), since | 
|---|
| 38 | *     that relies on cache abuse. | 
|---|
| 39 | */ | 
|---|
| 40 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) | 
|---|
| 41 | to->class_cache[i] = NULL; | 
|---|
| 42 | } | 
|---|
| 43 |  | 
|---|
| 44 | /* | 
|---|
| 45 | * Every lock has a list of other locks that were taken after it. | 
|---|
| 46 | * We only grow the list, never remove from it: | 
|---|
| 47 | */ | 
|---|
| 48 | struct lock_list { | 
|---|
| 49 | struct list_head		entry; | 
|---|
| 50 | struct lock_class		*class; | 
|---|
| 51 | struct lock_class		*links_to; | 
|---|
| 52 | const struct lock_trace		*trace; | 
|---|
| 53 | u16				distance; | 
|---|
| 54 | /* bitmap of different dependencies from head to this */ | 
|---|
| 55 | u8				dep; | 
|---|
| 56 | /* used by BFS to record whether "prev -> this" only has -(*R)-> */ | 
|---|
| 57 | u8				only_xr; | 
|---|
| 58 |  | 
|---|
| 59 | /* | 
|---|
| 60 | * The parent field is used to implement breadth-first search, and the | 
|---|
| 61 | * bit 0 is reused to indicate if the lock has been accessed in BFS. | 
|---|
| 62 | */ | 
|---|
| 63 | struct lock_list		*parent; | 
|---|
| 64 | }; | 
|---|
| 65 |  | 
|---|
| 66 | /** | 
|---|
| 67 | * struct lock_chain - lock dependency chain record | 
|---|
| 68 | * | 
|---|
| 69 | * @irq_context: the same as irq_context in held_lock below | 
|---|
| 70 | * @depth:       the number of held locks in this chain | 
|---|
| 71 | * @base:        the index in chain_hlocks for this chain | 
|---|
| 72 | * @entry:       the collided lock chains in lock_chain hash list | 
|---|
| 73 | * @chain_key:   the hash key of this lock_chain | 
|---|
| 74 | */ | 
|---|
| 75 | struct lock_chain { | 
|---|
| 76 | /* see BUILD_BUG_ON()s in add_chain_cache() */ | 
|---|
| 77 | unsigned int			irq_context :  2, | 
|---|
| 78 | depth       :  6, | 
|---|
| 79 | base	    : 24; | 
|---|
| 80 | /* 4 byte hole */ | 
|---|
| 81 | struct hlist_node		entry; | 
|---|
| 82 | u64				chain_key; | 
|---|
| 83 | }; | 
|---|
| 84 |  | 
|---|
| 85 | /* | 
|---|
| 86 | * Initialization, self-test and debugging-output methods: | 
|---|
| 87 | */ | 
|---|
| 88 | extern void lockdep_init(void); | 
|---|
| 89 | extern void lockdep_reset(void); | 
|---|
| 90 | extern void lockdep_reset_lock(struct lockdep_map *lock); | 
|---|
| 91 | extern void lockdep_free_key_range(void *start, unsigned long size); | 
|---|
| 92 | extern asmlinkage void lockdep_sys_exit(void); | 
|---|
| 93 | extern void lockdep_set_selftest_task(struct task_struct *task); | 
|---|
| 94 |  | 
|---|
| 95 | extern void lockdep_init_task(struct task_struct *task); | 
|---|
| 96 |  | 
|---|
| 97 | /* | 
|---|
| 98 | * Split the recursion counter in two to readily detect 'off' vs recursion. | 
|---|
| 99 | */ | 
|---|
| 100 | #define LOCKDEP_RECURSION_BITS	16 | 
|---|
| 101 | #define LOCKDEP_OFF		(1U << LOCKDEP_RECURSION_BITS) | 
|---|
| 102 | #define LOCKDEP_RECURSION_MASK	(LOCKDEP_OFF - 1) | 
|---|
| 103 |  | 
|---|
| 104 | /* | 
|---|
| 105 | * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due | 
|---|
| 106 | * to header dependencies. | 
|---|
| 107 | */ | 
|---|
| 108 |  | 
|---|
| 109 | #define lockdep_off()					\ | 
|---|
| 110 | do {							\ | 
|---|
| 111 | current->lockdep_recursion += LOCKDEP_OFF;	\ | 
|---|
| 112 | } while (0) | 
|---|
| 113 |  | 
|---|
| 114 | #define lockdep_on()					\ | 
|---|
| 115 | do {							\ | 
|---|
| 116 | current->lockdep_recursion -= LOCKDEP_OFF;	\ | 
|---|
| 117 | } while (0) | 
|---|
| 118 |  | 
|---|
| 119 | extern void lockdep_register_key(struct lock_class_key *key); | 
|---|
| 120 | extern void lockdep_unregister_key(struct lock_class_key *key); | 
|---|
| 121 |  | 
|---|
| 122 | /* | 
|---|
| 123 | * These methods are used by specific locking variants (spinlocks, | 
|---|
| 124 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events | 
|---|
| 125 | * to lockdep: | 
|---|
| 126 | */ | 
|---|
| 127 |  | 
|---|
| 128 | extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, | 
|---|
| 129 | struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); | 
|---|
| 130 |  | 
|---|
| 131 | static inline void | 
|---|
| 132 | lockdep_init_map_waits(struct lockdep_map *lock, const char *name, | 
|---|
| 133 | struct lock_class_key *key, int subclass, u8 inner, u8 outer) | 
|---|
| 134 | { | 
|---|
| 135 | lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); | 
|---|
| 136 | } | 
|---|
| 137 |  | 
|---|
| 138 | static inline void | 
|---|
| 139 | lockdep_init_map_wait(struct lockdep_map *lock, const char *name, | 
|---|
| 140 | struct lock_class_key *key, int subclass, u8 inner) | 
|---|
| 141 | { | 
|---|
| 142 | lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); | 
|---|
| 143 | } | 
|---|
| 144 |  | 
|---|
| 145 | static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, | 
|---|
| 146 | struct lock_class_key *key, int subclass) | 
|---|
| 147 | { | 
|---|
| 148 | lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); | 
|---|
| 149 | } | 
|---|
| 150 |  | 
|---|
| 151 | /* | 
|---|
| 152 | * Reinitialize a lock key - for cases where there is special locking or | 
|---|
| 153 | * special initialization of locks so that the validator gets the scope | 
|---|
| 154 | * of dependencies wrong: they are either too broad (they need a class-split) | 
|---|
| 155 | * or they are too narrow (they suffer from a false class-split): | 
|---|
| 156 | */ | 
|---|
| 157 | #define lockdep_set_class(lock, key)				\ | 
|---|
| 158 | lockdep_init_map_type(&(lock)->dep_map, #key, key, 0,	\ | 
|---|
| 159 | (lock)->dep_map.wait_type_inner,	\ | 
|---|
| 160 | (lock)->dep_map.wait_type_outer,	\ | 
|---|
| 161 | (lock)->dep_map.lock_type) | 
|---|
| 162 |  | 
|---|
| 163 | #define lockdep_set_class_and_name(lock, key, name)		\ | 
|---|
| 164 | lockdep_init_map_type(&(lock)->dep_map, name, key, 0,	\ | 
|---|
| 165 | (lock)->dep_map.wait_type_inner,	\ | 
|---|
| 166 | (lock)->dep_map.wait_type_outer,	\ | 
|---|
| 167 | (lock)->dep_map.lock_type) | 
|---|
| 168 |  | 
|---|
| 169 | #define lockdep_set_class_and_subclass(lock, key, sub)		\ | 
|---|
| 170 | lockdep_init_map_type(&(lock)->dep_map, #key, key, sub,	\ | 
|---|
| 171 | (lock)->dep_map.wait_type_inner,	\ | 
|---|
| 172 | (lock)->dep_map.wait_type_outer,	\ | 
|---|
| 173 | (lock)->dep_map.lock_type) | 
|---|
| 174 |  | 
|---|
| 175 | #define lockdep_set_subclass(lock, sub)					\ | 
|---|
| 176 | lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\ | 
|---|
| 177 | (lock)->dep_map.wait_type_inner,		\ | 
|---|
| 178 | (lock)->dep_map.wait_type_outer,		\ | 
|---|
| 179 | (lock)->dep_map.lock_type) | 
|---|
| 180 |  | 
|---|
| 181 | /** | 
|---|
| 182 | * lockdep_set_novalidate_class: disable checking of lock ordering on a given | 
|---|
| 183 | * lock | 
|---|
| 184 | * @lock: Lock to mark | 
|---|
| 185 | * | 
|---|
| 186 | * Lockdep will still record that this lock has been taken, and print held | 
|---|
| 187 | * instances when dumping locks | 
|---|
| 188 | */ | 
|---|
| 189 | #define lockdep_set_novalidate_class(lock) \ | 
|---|
| 190 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) | 
|---|
| 191 |  | 
|---|
| 192 | /** | 
|---|
| 193 | * lockdep_set_notrack_class: disable lockdep tracking of a given lock entirely | 
|---|
| 194 | * @lock: Lock to mark | 
|---|
| 195 | * | 
|---|
| 196 | * Bigger hammer than lockdep_set_novalidate_class: so far just for bcachefs, | 
|---|
| 197 | * which takes more locks than lockdep is able to track (48). | 
|---|
| 198 | */ | 
|---|
| 199 | #define lockdep_set_notrack_class(lock) \ | 
|---|
| 200 | lockdep_set_class_and_name(lock, &__lockdep_no_track__, #lock) | 
|---|
| 201 |  | 
|---|
| 202 | /* | 
|---|
| 203 | * Compare locking classes | 
|---|
| 204 | */ | 
|---|
| 205 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) | 
|---|
| 206 |  | 
|---|
| 207 | static inline int lockdep_match_key(struct lockdep_map *lock, | 
|---|
| 208 | struct lock_class_key *key) | 
|---|
| 209 | { | 
|---|
| 210 | return lock->key == key; | 
|---|
| 211 | } | 
|---|
| 212 |  | 
|---|
| 213 | /* | 
|---|
| 214 | * Acquire a lock. | 
|---|
| 215 | * | 
|---|
| 216 | * Values for "read": | 
|---|
| 217 | * | 
|---|
| 218 | *   0: exclusive (write) acquire | 
|---|
| 219 | *   1: read-acquire (no recursion allowed) | 
|---|
| 220 | *   2: read-acquire with same-instance recursion allowed | 
|---|
| 221 | * | 
|---|
| 222 | * Values for check: | 
|---|
| 223 | * | 
|---|
| 224 | *   0: simple checks (freeing, held-at-exit-time, etc.) | 
|---|
| 225 | *   1: full validation | 
|---|
| 226 | */ | 
|---|
| 227 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 
|---|
| 228 | int trylock, int read, int check, | 
|---|
| 229 | struct lockdep_map *nest_lock, unsigned long ip); | 
|---|
| 230 |  | 
|---|
| 231 | extern void lock_release(struct lockdep_map *lock, unsigned long ip); | 
|---|
| 232 |  | 
|---|
| 233 | extern void lock_sync(struct lockdep_map *lock, unsigned int subclass, | 
|---|
| 234 | int read, int check, struct lockdep_map *nest_lock, | 
|---|
| 235 | unsigned long ip); | 
|---|
| 236 |  | 
|---|
| 237 | /* lock_is_held_type() returns */ | 
|---|
| 238 | #define LOCK_STATE_UNKNOWN	-1 | 
|---|
| 239 | #define LOCK_STATE_NOT_HELD	0 | 
|---|
| 240 | #define LOCK_STATE_HELD		1 | 
|---|
| 241 |  | 
|---|
| 242 | /* | 
|---|
| 243 | * Same "read" as for lock_acquire(), except -1 means any. | 
|---|
| 244 | */ | 
|---|
| 245 | extern int lock_is_held_type(const struct lockdep_map *lock, int read); | 
|---|
| 246 |  | 
|---|
| 247 | static inline int lock_is_held(const struct lockdep_map *lock) | 
|---|
| 248 | { | 
|---|
| 249 | return lock_is_held_type(lock, -1); | 
|---|
| 250 | } | 
|---|
| 251 |  | 
|---|
| 252 | #define lockdep_is_held(lock)		lock_is_held(&(lock)->dep_map) | 
|---|
| 253 | #define lockdep_is_held_type(lock, r)	lock_is_held_type(&(lock)->dep_map, (r)) | 
|---|
| 254 |  | 
|---|
| 255 | extern void lock_set_class(struct lockdep_map *lock, const char *name, | 
|---|
| 256 | struct lock_class_key *key, unsigned int subclass, | 
|---|
| 257 | unsigned long ip); | 
|---|
| 258 |  | 
|---|
| 259 | #define lock_set_novalidate_class(l, n, i) \ | 
|---|
| 260 | lock_set_class(l, n, &__lockdep_no_validate__, 0, i) | 
|---|
| 261 |  | 
|---|
| 262 | static inline void lock_set_subclass(struct lockdep_map *lock, | 
|---|
| 263 | unsigned int subclass, unsigned long ip) | 
|---|
| 264 | { | 
|---|
| 265 | lock_set_class(lock, lock->name, lock->key, subclass, ip); | 
|---|
| 266 | } | 
|---|
| 267 |  | 
|---|
| 268 | extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); | 
|---|
| 269 |  | 
|---|
| 270 | #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } | 
|---|
| 271 |  | 
|---|
| 272 | extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); | 
|---|
| 273 | extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); | 
|---|
| 274 | extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); | 
|---|
| 275 |  | 
|---|
| 276 | #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0) | 
|---|
| 277 |  | 
|---|
| 278 | #define lockdep_assert(cond)		\ | 
|---|
| 279 | do { WARN_ON(debug_locks && !(cond)); } while (0) | 
|---|
| 280 |  | 
|---|
| 281 | #define lockdep_assert_once(cond)	\ | 
|---|
| 282 | do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0) | 
|---|
| 283 |  | 
|---|
| 284 | #define lockdep_assert_held(l)		\ | 
|---|
| 285 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) | 
|---|
| 286 |  | 
|---|
| 287 | #define lockdep_assert_not_held(l)	\ | 
|---|
| 288 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD) | 
|---|
| 289 |  | 
|---|
| 290 | #define lockdep_assert_held_write(l)	\ | 
|---|
| 291 | lockdep_assert(lockdep_is_held_type(l, 0)) | 
|---|
| 292 |  | 
|---|
| 293 | #define lockdep_assert_held_read(l)	\ | 
|---|
| 294 | lockdep_assert(lockdep_is_held_type(l, 1)) | 
|---|
| 295 |  | 
|---|
| 296 | #define lockdep_assert_held_once(l)		\ | 
|---|
| 297 | lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) | 
|---|
| 298 |  | 
|---|
| 299 | #define lockdep_assert_none_held_once()		\ | 
|---|
| 300 | lockdep_assert_once(!current->lockdep_depth) | 
|---|
| 301 |  | 
|---|
| 302 | #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion) | 
|---|
| 303 |  | 
|---|
| 304 | #define lockdep_pin_lock(l)	lock_pin_lock(&(l)->dep_map) | 
|---|
| 305 | #define lockdep_repin_lock(l,c)	lock_repin_lock(&(l)->dep_map, (c)) | 
|---|
| 306 | #define lockdep_unpin_lock(l,c)	lock_unpin_lock(&(l)->dep_map, (c)) | 
|---|
| 307 |  | 
|---|
| 308 | /* | 
|---|
| 309 | * Must use lock_map_aquire_try() with override maps to avoid | 
|---|
| 310 | * lockdep thinking they participate in the block chain. | 
|---|
| 311 | */ | 
|---|
| 312 | #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type)	\ | 
|---|
| 313 | struct lockdep_map _name = {			\ | 
|---|
| 314 | .name = #_name "-wait-type-override",	\ | 
|---|
| 315 | .wait_type_inner = _wait_type,		\ | 
|---|
| 316 | .lock_type = LD_LOCK_WAIT_OVERRIDE, } | 
|---|
| 317 |  | 
|---|
| 318 | #else /* !CONFIG_LOCKDEP */ | 
|---|
| 319 |  | 
|---|
| 320 | static inline void lockdep_init_task(struct task_struct *task) | 
|---|
| 321 | { | 
|---|
| 322 | } | 
|---|
| 323 |  | 
|---|
| 324 | static inline void lockdep_off(void) | 
|---|
| 325 | { | 
|---|
| 326 | } | 
|---|
| 327 |  | 
|---|
| 328 | static inline void lockdep_on(void) | 
|---|
| 329 | { | 
|---|
| 330 | } | 
|---|
| 331 |  | 
|---|
| 332 | static inline void lockdep_set_selftest_task(struct task_struct *task) | 
|---|
| 333 | { | 
|---|
| 334 | } | 
|---|
| 335 |  | 
|---|
| 336 | # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0) | 
|---|
| 337 | # define lock_release(l, i)			do { } while (0) | 
|---|
| 338 | # define lock_downgrade(l, i)			do { } while (0) | 
|---|
| 339 | # define lock_set_class(l, n, key, s, i)	do { (void)(key); } while (0) | 
|---|
| 340 | # define lock_set_novalidate_class(l, n, i)	do { } while (0) | 
|---|
| 341 | # define lock_set_subclass(l, s, i)		do { } while (0) | 
|---|
| 342 | # define lockdep_init()				do { } while (0) | 
|---|
| 343 | # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ | 
|---|
| 344 | do { (void)(name); (void)(key); } while (0) | 
|---|
| 345 | # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ | 
|---|
| 346 | do { (void)(name); (void)(key); } while (0) | 
|---|
| 347 | # define lockdep_init_map_wait(lock, name, key, sub, inner) \ | 
|---|
| 348 | do { (void)(name); (void)(key); } while (0) | 
|---|
| 349 | # define lockdep_init_map(lock, name, key, sub) \ | 
|---|
| 350 | do { (void)(name); (void)(key); } while (0) | 
|---|
| 351 | # define lockdep_set_class(lock, key)		do { (void)(key); } while (0) | 
|---|
| 352 | # define lockdep_set_class_and_name(lock, key, name) \ | 
|---|
| 353 | do { (void)(key); (void)(name); } while (0) | 
|---|
| 354 | #define lockdep_set_class_and_subclass(lock, key, sub) \ | 
|---|
| 355 | do { (void)(key); } while (0) | 
|---|
| 356 | #define lockdep_set_subclass(lock, sub)		do { } while (0) | 
|---|
| 357 |  | 
|---|
| 358 | #define lockdep_set_novalidate_class(lock) do { } while (0) | 
|---|
| 359 | #define lockdep_set_notrack_class(lock) do { } while (0) | 
|---|
| 360 |  | 
|---|
| 361 | /* | 
|---|
| 362 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP | 
|---|
| 363 | * case since the result is not well defined and the caller should rather | 
|---|
| 364 | * #ifdef the call himself. | 
|---|
| 365 | */ | 
|---|
| 366 |  | 
|---|
| 367 | # define lockdep_reset()		do { debug_locks = 1; } while (0) | 
|---|
| 368 | # define lockdep_free_key_range(start, size)	do { } while (0) | 
|---|
| 369 | # define lockdep_sys_exit() 			do { } while (0) | 
|---|
| 370 |  | 
|---|
| 371 | static inline void lockdep_register_key(struct lock_class_key *key) | 
|---|
| 372 | { | 
|---|
| 373 | } | 
|---|
| 374 |  | 
|---|
| 375 | static inline void lockdep_unregister_key(struct lock_class_key *key) | 
|---|
| 376 | { | 
|---|
| 377 | } | 
|---|
| 378 |  | 
|---|
| 379 | #define lockdep_depth(tsk)	(0) | 
|---|
| 380 |  | 
|---|
| 381 | /* | 
|---|
| 382 | * Dummy forward declarations, allow users to write less ifdef-y code | 
|---|
| 383 | * and depend on dead code elimination. | 
|---|
| 384 | */ | 
|---|
| 385 | extern int lock_is_held(const void *); | 
|---|
| 386 | extern int lockdep_is_held(const void *); | 
|---|
| 387 | #define lockdep_is_held_type(l, r)		(1) | 
|---|
| 388 |  | 
|---|
| 389 | #define lockdep_assert(c)			do { } while (0) | 
|---|
| 390 | #define lockdep_assert_once(c)			do { } while (0) | 
|---|
| 391 |  | 
|---|
| 392 | #define lockdep_assert_held(l)			do { (void)(l); } while (0) | 
|---|
| 393 | #define lockdep_assert_not_held(l)		do { (void)(l); } while (0) | 
|---|
| 394 | #define lockdep_assert_held_write(l)		do { (void)(l); } while (0) | 
|---|
| 395 | #define lockdep_assert_held_read(l)		do { (void)(l); } while (0) | 
|---|
| 396 | #define lockdep_assert_held_once(l)		do { (void)(l); } while (0) | 
|---|
| 397 | #define lockdep_assert_none_held_once()	do { } while (0) | 
|---|
| 398 |  | 
|---|
| 399 | #define lockdep_recursing(tsk)			(0) | 
|---|
| 400 |  | 
|---|
| 401 | #define NIL_COOKIE (struct pin_cookie){ } | 
|---|
| 402 |  | 
|---|
| 403 | #define lockdep_pin_lock(l)			({ struct pin_cookie cookie = { }; cookie; }) | 
|---|
| 404 | #define lockdep_repin_lock(l, c)		do { (void)(l); (void)(c); } while (0) | 
|---|
| 405 | #define lockdep_unpin_lock(l, c)		do { (void)(l); (void)(c); } while (0) | 
|---|
| 406 |  | 
|---|
| 407 | #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type)	\ | 
|---|
| 408 | struct lockdep_map __maybe_unused _name = {} | 
|---|
| 409 |  | 
|---|
| 410 | #endif /* !LOCKDEP */ | 
|---|
| 411 |  | 
|---|
| 412 | #ifdef CONFIG_PROVE_LOCKING | 
|---|
| 413 | void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn); | 
|---|
| 414 |  | 
|---|
| 415 | #define lock_set_cmp_fn(lock, ...)	lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__) | 
|---|
| 416 | #else | 
|---|
| 417 | #define lock_set_cmp_fn(lock, ...)	do { } while (0) | 
|---|
| 418 | #endif | 
|---|
| 419 |  | 
|---|
| 420 | enum xhlock_context_t { | 
|---|
| 421 | XHLOCK_HARD, | 
|---|
| 422 | XHLOCK_SOFT, | 
|---|
| 423 | XHLOCK_CTX_NR, | 
|---|
| 424 | }; | 
|---|
| 425 |  | 
|---|
| 426 | /* | 
|---|
| 427 | * To initialize a lockdep_map statically use this macro. | 
|---|
| 428 | * Note that _name must not be NULL. | 
|---|
| 429 | */ | 
|---|
| 430 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ | 
|---|
| 431 | { .name = (_name), .key = (void *)(_key), } | 
|---|
| 432 |  | 
|---|
| 433 | static inline void lockdep_invariant_state(bool force) {} | 
|---|
| 434 | static inline void lockdep_free_task(struct task_struct *task) {} | 
|---|
| 435 |  | 
|---|
| 436 | #ifdef CONFIG_LOCK_STAT | 
|---|
| 437 |  | 
|---|
| 438 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); | 
|---|
| 439 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); | 
|---|
| 440 |  | 
|---|
| 441 | #define LOCK_CONTENDED(_lock, try, lock)			\ | 
|---|
| 442 | do {								\ | 
|---|
| 443 | if (!try(_lock)) {					\ | 
|---|
| 444 | lock_contended(&(_lock)->dep_map, _RET_IP_);	\ | 
|---|
| 445 | lock(_lock);					\ | 
|---|
| 446 | }							\ | 
|---|
| 447 | lock_acquired(&(_lock)->dep_map, _RET_IP_);			\ | 
|---|
| 448 | } while (0) | 
|---|
| 449 |  | 
|---|
| 450 | #define LOCK_CONTENDED_RETURN(_lock, try, lock)			\ | 
|---|
| 451 | ({								\ | 
|---|
| 452 | int ____err = 0;					\ | 
|---|
| 453 | if (!try(_lock)) {					\ | 
|---|
| 454 | lock_contended(&(_lock)->dep_map, _RET_IP_);	\ | 
|---|
| 455 | ____err = lock(_lock);				\ | 
|---|
| 456 | }							\ | 
|---|
| 457 | if (!____err)						\ | 
|---|
| 458 | lock_acquired(&(_lock)->dep_map, _RET_IP_);	\ | 
|---|
| 459 | ____err;						\ | 
|---|
| 460 | }) | 
|---|
| 461 |  | 
|---|
| 462 | #else /* CONFIG_LOCK_STAT */ | 
|---|
| 463 |  | 
|---|
| 464 | #define lock_contended(lockdep_map, ip) do {} while (0) | 
|---|
| 465 | #define lock_acquired(lockdep_map, ip) do {} while (0) | 
|---|
| 466 |  | 
|---|
| 467 | #define LOCK_CONTENDED(_lock, try, lock) \ | 
|---|
| 468 | lock(_lock) | 
|---|
| 469 |  | 
|---|
| 470 | #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ | 
|---|
| 471 | lock(_lock) | 
|---|
| 472 |  | 
|---|
| 473 | #endif /* CONFIG_LOCK_STAT */ | 
|---|
| 474 |  | 
|---|
| 475 | #ifdef CONFIG_PROVE_LOCKING | 
|---|
| 476 | extern void print_irqtrace_events(struct task_struct *curr); | 
|---|
| 477 | #else | 
|---|
| 478 | static inline void print_irqtrace_events(struct task_struct *curr) | 
|---|
| 479 | { | 
|---|
| 480 | } | 
|---|
| 481 | #endif | 
|---|
| 482 |  | 
|---|
| 483 | /* Variable used to make lockdep treat read_lock() as recursive in selftests */ | 
|---|
| 484 | #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS | 
|---|
| 485 | extern unsigned int force_read_lock_recursive; | 
|---|
| 486 | #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ | 
|---|
| 487 | #define force_read_lock_recursive 0 | 
|---|
| 488 | #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ | 
|---|
| 489 |  | 
|---|
| 490 | #ifdef CONFIG_LOCKDEP | 
|---|
| 491 | extern bool read_lock_is_recursive(void); | 
|---|
| 492 | #else /* CONFIG_LOCKDEP */ | 
|---|
| 493 | /* If !LOCKDEP, the value is meaningless */ | 
|---|
| 494 | #define read_lock_is_recursive() 0 | 
|---|
| 495 | #endif | 
|---|
| 496 |  | 
|---|
| 497 | /* | 
|---|
| 498 | * For trivial one-depth nesting of a lock-class, the following | 
|---|
| 499 | * global define can be used. (Subsystems with multiple levels | 
|---|
| 500 | * of nesting should define their own lock-nesting subclasses.) | 
|---|
| 501 | */ | 
|---|
| 502 | #define SINGLE_DEPTH_NESTING			1 | 
|---|
| 503 |  | 
|---|
| 504 | /* | 
|---|
| 505 | * Map the dependency ops to NOP or to real lockdep ops, depending | 
|---|
| 506 | * on the per lock-class debug mode: | 
|---|
| 507 | */ | 
|---|
| 508 |  | 
|---|
| 509 | #define lock_acquire_exclusive(l, s, t, n, i)		lock_acquire(l, s, t, 0, 1, n, i) | 
|---|
| 510 | #define lock_acquire_shared(l, s, t, n, i)		lock_acquire(l, s, t, 1, 1, n, i) | 
|---|
| 511 | #define lock_acquire_shared_recursive(l, s, t, n, i)	lock_acquire(l, s, t, 2, 1, n, i) | 
|---|
| 512 |  | 
|---|
| 513 | #define spin_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i) | 
|---|
| 514 | #define spin_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i) | 
|---|
| 515 | #define spin_release(l, i)			lock_release(l, i) | 
|---|
| 516 |  | 
|---|
| 517 | #define rwlock_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i) | 
|---|
| 518 | #define rwlock_acquire_read(l, s, t, i)					\ | 
|---|
| 519 | do {									\ | 
|---|
| 520 | if (read_lock_is_recursive())					\ | 
|---|
| 521 | lock_acquire_shared_recursive(l, s, t, NULL, i);	\ | 
|---|
| 522 | else								\ | 
|---|
| 523 | lock_acquire_shared(l, s, t, NULL, i);			\ | 
|---|
| 524 | } while (0) | 
|---|
| 525 |  | 
|---|
| 526 | #define rwlock_release(l, i)			lock_release(l, i) | 
|---|
| 527 |  | 
|---|
| 528 | #define seqcount_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i) | 
|---|
| 529 | #define seqcount_acquire_read(l, s, t, i)	lock_acquire_shared_recursive(l, s, t, NULL, i) | 
|---|
| 530 | #define seqcount_release(l, i)			lock_release(l, i) | 
|---|
| 531 |  | 
|---|
| 532 | #define mutex_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i) | 
|---|
| 533 | #define mutex_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i) | 
|---|
| 534 | #define mutex_release(l, i)			lock_release(l, i) | 
|---|
| 535 |  | 
|---|
| 536 | #define rwsem_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i) | 
|---|
| 537 | #define rwsem_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i) | 
|---|
| 538 | #define rwsem_acquire_read(l, s, t, i)		lock_acquire_shared(l, s, t, NULL, i) | 
|---|
| 539 | #define rwsem_release(l, i)			lock_release(l, i) | 
|---|
| 540 |  | 
|---|
| 541 | #define lock_map_acquire(l)			lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) | 
|---|
| 542 | #define lock_map_acquire_try(l)			lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_) | 
|---|
| 543 | #define lock_map_acquire_read(l)		lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) | 
|---|
| 544 | #define lock_map_acquire_tryread(l)		lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) | 
|---|
| 545 | #define lock_map_release(l)			lock_release(l, _THIS_IP_) | 
|---|
| 546 | #define lock_map_sync(l)			lock_sync(l, 0, 0, 1, NULL, _THIS_IP_) | 
|---|
| 547 |  | 
|---|
| 548 | #ifdef CONFIG_PROVE_LOCKING | 
|---|
| 549 | # define might_lock(lock)						\ | 
|---|
| 550 | do {									\ | 
|---|
| 551 | typecheck(struct lockdep_map *, &(lock)->dep_map);		\ | 
|---|
| 552 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);	\ | 
|---|
| 553 | lock_release(&(lock)->dep_map, _THIS_IP_);			\ | 
|---|
| 554 | } while (0) | 
|---|
| 555 | # define might_lock_read(lock)						\ | 
|---|
| 556 | do {									\ | 
|---|
| 557 | typecheck(struct lockdep_map *, &(lock)->dep_map);		\ | 
|---|
| 558 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\ | 
|---|
| 559 | lock_release(&(lock)->dep_map, _THIS_IP_);			\ | 
|---|
| 560 | } while (0) | 
|---|
| 561 | # define might_lock_nested(lock, subclass)				\ | 
|---|
| 562 | do {									\ | 
|---|
| 563 | typecheck(struct lockdep_map *, &(lock)->dep_map);		\ | 
|---|
| 564 | lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,		\ | 
|---|
| 565 | _THIS_IP_);					\ | 
|---|
| 566 | lock_release(&(lock)->dep_map, _THIS_IP_);			\ | 
|---|
| 567 | } while (0) | 
|---|
| 568 |  | 
|---|
| 569 | DECLARE_PER_CPU(int, hardirqs_enabled); | 
|---|
| 570 | DECLARE_PER_CPU(int, hardirq_context); | 
|---|
| 571 | DECLARE_PER_CPU(unsigned int, lockdep_recursion); | 
|---|
| 572 |  | 
|---|
| 573 | #define __lockdep_enabled	(debug_locks && !this_cpu_read(lockdep_recursion)) | 
|---|
| 574 |  | 
|---|
| 575 | #define lockdep_assert_irqs_enabled()					\ | 
|---|
| 576 | do {									\ | 
|---|
| 577 | WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ | 
|---|
| 578 | } while (0) | 
|---|
| 579 |  | 
|---|
| 580 | #define lockdep_assert_irqs_disabled()					\ | 
|---|
| 581 | do {									\ | 
|---|
| 582 | WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ | 
|---|
| 583 | } while (0) | 
|---|
| 584 |  | 
|---|
| 585 | #define lockdep_assert_in_irq()						\ | 
|---|
| 586 | do {									\ | 
|---|
| 587 | WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ | 
|---|
| 588 | } while (0) | 
|---|
| 589 |  | 
|---|
| 590 | #define lockdep_assert_no_hardirq()					\ | 
|---|
| 591 | do {									\ | 
|---|
| 592 | WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \ | 
|---|
| 593 | !this_cpu_read(hardirqs_enabled))); \ | 
|---|
| 594 | } while (0) | 
|---|
| 595 |  | 
|---|
| 596 | #define lockdep_assert_preemption_enabled()				\ | 
|---|
| 597 | do {									\ | 
|---|
| 598 | WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\ | 
|---|
| 599 | __lockdep_enabled			&&		\ | 
|---|
| 600 | (preempt_count() != 0		||		\ | 
|---|
| 601 | !this_cpu_read(hardirqs_enabled)));		\ | 
|---|
| 602 | } while (0) | 
|---|
| 603 |  | 
|---|
| 604 | #define lockdep_assert_preemption_disabled()				\ | 
|---|
| 605 | do {									\ | 
|---|
| 606 | WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\ | 
|---|
| 607 | __lockdep_enabled			&&		\ | 
|---|
| 608 | (preempt_count() == 0		&&		\ | 
|---|
| 609 | this_cpu_read(hardirqs_enabled)));		\ | 
|---|
| 610 | } while (0) | 
|---|
| 611 |  | 
|---|
| 612 | /* | 
|---|
| 613 | * Acceptable for protecting per-CPU resources accessed from BH. | 
|---|
| 614 | * Much like in_softirq() - semantics are ambiguous, use carefully. | 
|---|
| 615 | */ | 
|---|
| 616 | #define lockdep_assert_in_softirq()					\ | 
|---|
| 617 | do {									\ | 
|---|
| 618 | WARN_ON_ONCE(__lockdep_enabled			&&		\ | 
|---|
| 619 | (!in_softirq() || in_irq() || in_nmi()));		\ | 
|---|
| 620 | } while (0) | 
|---|
| 621 |  | 
|---|
| 622 | extern void lockdep_assert_in_softirq_func(void); | 
|---|
| 623 |  | 
|---|
| 624 | #else | 
|---|
| 625 | # define might_lock(lock) do { } while (0) | 
|---|
| 626 | # define might_lock_read(lock) do { } while (0) | 
|---|
| 627 | # define might_lock_nested(lock, subclass) do { } while (0) | 
|---|
| 628 |  | 
|---|
| 629 | # define lockdep_assert_irqs_enabled() do { } while (0) | 
|---|
| 630 | # define lockdep_assert_irqs_disabled() do { } while (0) | 
|---|
| 631 | # define lockdep_assert_in_irq() do { } while (0) | 
|---|
| 632 | # define lockdep_assert_no_hardirq() do { } while (0) | 
|---|
| 633 |  | 
|---|
| 634 | # define lockdep_assert_preemption_enabled() do { } while (0) | 
|---|
| 635 | # define lockdep_assert_preemption_disabled() do { } while (0) | 
|---|
| 636 | # define lockdep_assert_in_softirq() do { } while (0) | 
|---|
| 637 | # define lockdep_assert_in_softirq_func() do { } while (0) | 
|---|
| 638 | #endif | 
|---|
| 639 |  | 
|---|
| 640 | #ifdef CONFIG_PROVE_RAW_LOCK_NESTING | 
|---|
| 641 |  | 
|---|
| 642 | # define lockdep_assert_RT_in_threaded_ctx() do {			\ | 
|---|
| 643 | WARN_ONCE(debug_locks && !current->lockdep_recursion &&	\ | 
|---|
| 644 | lockdep_hardirq_context() &&			\ | 
|---|
| 645 | !(current->hardirq_threaded || current->irq_config),	\ | 
|---|
| 646 | "Not in threaded context on PREEMPT_RT as expected\n");	\ | 
|---|
| 647 | } while (0) | 
|---|
| 648 |  | 
|---|
| 649 | #else | 
|---|
| 650 |  | 
|---|
| 651 | # define lockdep_assert_RT_in_threaded_ctx() do { } while (0) | 
|---|
| 652 |  | 
|---|
| 653 | #endif | 
|---|
| 654 |  | 
|---|
| 655 | #ifdef CONFIG_LOCKDEP | 
|---|
| 656 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); | 
|---|
| 657 | #else | 
|---|
| 658 | static inline void | 
|---|
| 659 | lockdep_rcu_suspicious(const char *file, const int line, const char *s) | 
|---|
| 660 | { | 
|---|
| 661 | } | 
|---|
| 662 | #endif | 
|---|
| 663 |  | 
|---|
| 664 | #endif /* __LINUX_LOCKDEP_H */ | 
|---|
| 665 |  | 
|---|