1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_LOCAL_LOCK_H
3# error "Do not include directly, include linux/local_lock.h"
4#endif
5
6#include <linux/percpu-defs.h>
7#include <linux/lockdep.h>
8
9#ifndef CONFIG_PREEMPT_RT
10
11typedef struct {
12#ifdef CONFIG_DEBUG_LOCK_ALLOC
13 struct lockdep_map dep_map;
14 struct task_struct *owner;
15#endif
16} local_lock_t;
17
18/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
19typedef struct {
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21 struct lockdep_map dep_map;
22 struct task_struct *owner;
23#endif
24 u8 acquired;
25} local_trylock_t;
26
27#ifdef CONFIG_DEBUG_LOCK_ALLOC
28# define LOCAL_LOCK_DEBUG_INIT(lockname) \
29 .dep_map = { \
30 .name = #lockname, \
31 .wait_type_inner = LD_WAIT_CONFIG, \
32 .lock_type = LD_LOCK_PERCPU, \
33 }, \
34 .owner = NULL,
35
36# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
37 LOCAL_LOCK_DEBUG_INIT(lockname)
38
39static inline void local_lock_acquire(local_lock_t *l)
40{
41 lock_map_acquire(&l->dep_map);
42 DEBUG_LOCKS_WARN_ON(l->owner);
43 l->owner = current;
44}
45
46static inline void local_trylock_acquire(local_lock_t *l)
47{
48 lock_map_acquire_try(&l->dep_map);
49 DEBUG_LOCKS_WARN_ON(l->owner);
50 l->owner = current;
51}
52
53static inline void local_lock_release(local_lock_t *l)
54{
55 DEBUG_LOCKS_WARN_ON(l->owner != current);
56 l->owner = NULL;
57 lock_map_release(&l->dep_map);
58}
59
60static inline void local_lock_debug_init(local_lock_t *l)
61{
62 l->owner = NULL;
63}
64#else /* CONFIG_DEBUG_LOCK_ALLOC */
65# define LOCAL_LOCK_DEBUG_INIT(lockname)
66# define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
67static inline void local_lock_acquire(local_lock_t *l) { }
68static inline void local_trylock_acquire(local_lock_t *l) { }
69static inline void local_lock_release(local_lock_t *l) { }
70static inline void local_lock_debug_init(local_lock_t *l) { }
71#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
72
73#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
74#define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
75
76#define __local_lock_init(lock) \
77do { \
78 static struct lock_class_key __key; \
79 \
80 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
81 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
82 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
83 LD_LOCK_PERCPU); \
84 local_lock_debug_init(lock); \
85} while (0)
86
87#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
88
89#define __spinlock_nested_bh_init(lock) \
90do { \
91 static struct lock_class_key __key; \
92 \
93 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
94 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
95 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
96 LD_LOCK_NORMAL); \
97 local_lock_debug_init(lock); \
98} while (0)
99
100#define __local_lock_acquire(lock) \
101 do { \
102 local_trylock_t *tl; \
103 local_lock_t *l; \
104 \
105 l = (local_lock_t *)(lock); \
106 tl = (local_trylock_t *)l; \
107 _Generic((lock), \
108 local_trylock_t *: ({ \
109 lockdep_assert(tl->acquired == 0); \
110 WRITE_ONCE(tl->acquired, 1); \
111 }), \
112 local_lock_t *: (void)0); \
113 local_lock_acquire(l); \
114 } while (0)
115
116#define __local_lock(lock) \
117 do { \
118 preempt_disable(); \
119 __local_lock_acquire(lock); \
120 } while (0)
121
122#define __local_lock_irq(lock) \
123 do { \
124 local_irq_disable(); \
125 __local_lock_acquire(lock); \
126 } while (0)
127
128#define __local_lock_irqsave(lock, flags) \
129 do { \
130 local_irq_save(flags); \
131 __local_lock_acquire(lock); \
132 } while (0)
133
134#define __local_trylock(lock) \
135 ({ \
136 local_trylock_t *tl; \
137 \
138 preempt_disable(); \
139 tl = (lock); \
140 if (READ_ONCE(tl->acquired)) { \
141 preempt_enable(); \
142 tl = NULL; \
143 } else { \
144 WRITE_ONCE(tl->acquired, 1); \
145 local_trylock_acquire( \
146 (local_lock_t *)tl); \
147 } \
148 !!tl; \
149 })
150
151#define __local_trylock_irqsave(lock, flags) \
152 ({ \
153 local_trylock_t *tl; \
154 \
155 local_irq_save(flags); \
156 tl = (lock); \
157 if (READ_ONCE(tl->acquired)) { \
158 local_irq_restore(flags); \
159 tl = NULL; \
160 } else { \
161 WRITE_ONCE(tl->acquired, 1); \
162 local_trylock_acquire( \
163 (local_lock_t *)tl); \
164 } \
165 !!tl; \
166 })
167
168/* preemption or migration must be disabled before calling __local_lock_is_locked */
169#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
170
171#define __local_lock_release(lock) \
172 do { \
173 local_trylock_t *tl; \
174 local_lock_t *l; \
175 \
176 l = (local_lock_t *)(lock); \
177 tl = (local_trylock_t *)l; \
178 local_lock_release(l); \
179 _Generic((lock), \
180 local_trylock_t *: ({ \
181 lockdep_assert(tl->acquired == 1); \
182 WRITE_ONCE(tl->acquired, 0); \
183 }), \
184 local_lock_t *: (void)0); \
185 } while (0)
186
187#define __local_unlock(lock) \
188 do { \
189 __local_lock_release(lock); \
190 preempt_enable(); \
191 } while (0)
192
193#define __local_unlock_irq(lock) \
194 do { \
195 __local_lock_release(lock); \
196 local_irq_enable(); \
197 } while (0)
198
199#define __local_unlock_irqrestore(lock, flags) \
200 do { \
201 __local_lock_release(lock); \
202 local_irq_restore(flags); \
203 } while (0)
204
205#define __local_lock_nested_bh(lock) \
206 do { \
207 lockdep_assert_in_softirq(); \
208 local_lock_acquire((lock)); \
209 } while (0)
210
211#define __local_unlock_nested_bh(lock) \
212 local_lock_release((lock))
213
214#else /* !CONFIG_PREEMPT_RT */
215
216/*
217 * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
218 * critical section while staying preemptible.
219 */
220typedef spinlock_t local_lock_t;
221typedef spinlock_t local_trylock_t;
222
223#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
224#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
225
226#define __local_lock_init(l) \
227 do { \
228 local_spin_lock_init((l)); \
229 } while (0)
230
231#define __local_trylock_init(l) __local_lock_init(l)
232
233#define __local_lock(__lock) \
234 do { \
235 migrate_disable(); \
236 spin_lock((__lock)); \
237 } while (0)
238
239#define __local_lock_irq(lock) __local_lock(lock)
240
241#define __local_lock_irqsave(lock, flags) \
242 do { \
243 typecheck(unsigned long, flags); \
244 flags = 0; \
245 __local_lock(lock); \
246 } while (0)
247
248#define __local_unlock(__lock) \
249 do { \
250 spin_unlock((__lock)); \
251 migrate_enable(); \
252 } while (0)
253
254#define __local_unlock_irq(lock) __local_unlock(lock)
255
256#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
257
258#define __local_lock_nested_bh(lock) \
259do { \
260 lockdep_assert_in_softirq_func(); \
261 spin_lock((lock)); \
262} while (0)
263
264#define __local_unlock_nested_bh(lock) \
265do { \
266 spin_unlock((lock)); \
267} while (0)
268
269#define __local_trylock(lock) \
270 ({ \
271 int __locked; \
272 \
273 if (in_nmi() | in_hardirq()) { \
274 __locked = 0; \
275 } else { \
276 migrate_disable(); \
277 __locked = spin_trylock((lock)); \
278 if (!__locked) \
279 migrate_enable(); \
280 } \
281 __locked; \
282 })
283
284#define __local_trylock_irqsave(lock, flags) \
285 ({ \
286 typecheck(unsigned long, flags); \
287 flags = 0; \
288 __local_trylock(lock); \
289 })
290
291/* migration must be disabled before calling __local_lock_is_locked */
292#define __local_lock_is_locked(__lock) \
293 (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
294
295#endif /* CONFIG_PREEMPT_RT */
296