1/* SPDX-License-Identifier: GPL-2.0 */
2/* rwsem.h: R/W semaphores, public interface
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
6 */
7
8#ifndef _LINUX_RWSEM_H
9#define _LINUX_RWSEM_H
10
11#include <linux/linkage.h>
12
13#include <linux/types.h>
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/atomic.h>
17#include <linux/err.h>
18#include <linux/cleanup.h>
19
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21# define __RWSEM_DEP_MAP_INIT(lockname) \
22 .dep_map = { \
23 .name = #lockname, \
24 .wait_type_inner = LD_WAIT_SLEEP, \
25 },
26#else
27# define __RWSEM_DEP_MAP_INIT(lockname)
28#endif
29
30#ifndef CONFIG_PREEMPT_RT
31
32#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
33#include <linux/osq_lock.h>
34#endif
35
36/*
37 * For an uncontended rwsem, count and owner are the only fields a task
38 * needs to touch when acquiring the rwsem. So they are put next to each
39 * other to increase the chance that they will share the same cacheline.
40 *
41 * In a contended rwsem, the owner is likely the most frequently accessed
42 * field in the structure as the optimistic waiter that holds the osq lock
43 * will spin on owner. For an embedded rwsem, other hot fields in the
44 * containing structure should be moved further away from the rwsem to
45 * reduce the chance that they will share the same cacheline causing
46 * cacheline bouncing problem.
47 */
48struct rw_semaphore {
49 atomic_long_t count;
50 /*
51 * Write owner or one of the read owners as well flags regarding
52 * the current state of the rwsem. Can be used as a speculative
53 * check to see if the write owner is running on the cpu.
54 */
55 atomic_long_t owner;
56#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
57 struct optimistic_spin_queue osq; /* spinner MCS lock */
58#endif
59 raw_spinlock_t wait_lock;
60 struct list_head wait_list;
61#ifdef CONFIG_DEBUG_RWSEMS
62 void *magic;
63#endif
64#ifdef CONFIG_DEBUG_LOCK_ALLOC
65 struct lockdep_map dep_map;
66#endif
67};
68
69#define RWSEM_UNLOCKED_VALUE 0UL
70#define RWSEM_WRITER_LOCKED (1UL << 0)
71#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
72
73static inline int rwsem_is_locked(struct rw_semaphore *sem)
74{
75 return atomic_long_read(v: &sem->count) != RWSEM_UNLOCKED_VALUE;
76}
77
78static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
79{
80 WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
81}
82
83static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
84{
85 WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
86}
87
88/* Common initializer macros and functions */
89
90#ifdef CONFIG_DEBUG_RWSEMS
91# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
92#else
93# define __RWSEM_DEBUG_INIT(lockname)
94#endif
95
96#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
97#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
98#else
99#define __RWSEM_OPT_INIT(lockname)
100#endif
101
102#define __RWSEM_INITIALIZER(name) \
103 { __RWSEM_COUNT_INIT(name), \
104 .owner = ATOMIC_LONG_INIT(0), \
105 __RWSEM_OPT_INIT(name) \
106 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
107 .wait_list = LIST_HEAD_INIT((name).wait_list), \
108 __RWSEM_DEBUG_INIT(name) \
109 __RWSEM_DEP_MAP_INIT(name) }
110
111#define DECLARE_RWSEM(name) \
112 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
113
114extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
115 struct lock_class_key *key);
116
117#define init_rwsem(sem) \
118do { \
119 static struct lock_class_key __key; \
120 \
121 __init_rwsem((sem), #sem, &__key); \
122} while (0)
123
124/*
125 * This is the same regardless of which rwsem implementation that is being used.
126 * It is just a heuristic meant to be called by somebody already holding the
127 * rwsem to see if somebody from an incompatible type is wanting access to the
128 * lock.
129 */
130static inline int rwsem_is_contended(struct rw_semaphore *sem)
131{
132 return !list_empty(head: &sem->wait_list);
133}
134
135#if defined(CONFIG_DEBUG_RWSEMS) || defined(CONFIG_DETECT_HUNG_TASK_BLOCKER)
136/*
137 * Return just the real task structure pointer of the owner
138 */
139extern struct task_struct *rwsem_owner(struct rw_semaphore *sem);
140
141/*
142 * Return true if the rwsem is owned by a reader.
143 */
144extern bool is_rwsem_reader_owned(struct rw_semaphore *sem);
145#endif
146
147#else /* !CONFIG_PREEMPT_RT */
148
149#include <linux/rwbase_rt.h>
150
151struct rw_semaphore {
152 struct rwbase_rt rwbase;
153#ifdef CONFIG_DEBUG_LOCK_ALLOC
154 struct lockdep_map dep_map;
155#endif
156};
157
158#define __RWSEM_INITIALIZER(name) \
159 { \
160 .rwbase = __RWBASE_INITIALIZER(name), \
161 __RWSEM_DEP_MAP_INIT(name) \
162 }
163
164#define DECLARE_RWSEM(lockname) \
165 struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
166
167extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name,
168 struct lock_class_key *key);
169
170#define init_rwsem(sem) \
171do { \
172 static struct lock_class_key __key; \
173 \
174 __init_rwsem((sem), #sem, &__key); \
175} while (0)
176
177static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
178{
179 return rw_base_is_locked(&sem->rwbase);
180}
181
182static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
183{
184 WARN_ON(!rwsem_is_locked(sem));
185}
186
187static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
188{
189 WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
190}
191
192static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
193{
194 return rw_base_is_contended(&sem->rwbase);
195}
196
197#endif /* CONFIG_PREEMPT_RT */
198
199/*
200 * The functions below are the same for all rwsem implementations including
201 * the RT specific variant.
202 */
203
204static inline void rwsem_assert_held(const struct rw_semaphore *sem)
205{
206 if (IS_ENABLED(CONFIG_LOCKDEP))
207 lockdep_assert_held(sem);
208 else
209 rwsem_assert_held_nolockdep(sem);
210}
211
212static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
213{
214 if (IS_ENABLED(CONFIG_LOCKDEP))
215 lockdep_assert_held_write(sem);
216 else
217 rwsem_assert_held_write_nolockdep(sem);
218}
219
220/*
221 * lock for reading
222 */
223extern void down_read(struct rw_semaphore *sem);
224extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
225extern int __must_check down_read_killable(struct rw_semaphore *sem);
226
227/*
228 * trylock for reading -- returns 1 if successful, 0 if contention
229 */
230extern int down_read_trylock(struct rw_semaphore *sem);
231
232/*
233 * lock for writing
234 */
235extern void down_write(struct rw_semaphore *sem);
236extern int __must_check down_write_killable(struct rw_semaphore *sem);
237
238/*
239 * trylock for writing -- returns 1 if successful, 0 if contention
240 */
241extern int down_write_trylock(struct rw_semaphore *sem);
242
243/*
244 * release a read lock
245 */
246extern void up_read(struct rw_semaphore *sem);
247
248/*
249 * release a write lock
250 */
251extern void up_write(struct rw_semaphore *sem);
252
253DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
254DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
255DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T), _RET == 0)
256
257DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
258DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
259DEFINE_GUARD_COND(rwsem_write, _kill, down_write_killable(_T), _RET == 0)
260
261/*
262 * downgrade write lock to read lock
263 */
264extern void downgrade_write(struct rw_semaphore *sem);
265
266#ifdef CONFIG_DEBUG_LOCK_ALLOC
267/*
268 * nested locking. NOTE: rwsems are not allowed to recurse
269 * (which occurs if the same task tries to acquire the same
270 * lock instance multiple times), but multiple locks of the
271 * same lock class might be taken, if the order of the locks
272 * is always the same. This ordering rule can be expressed
273 * to lockdep via the _nested() APIs, but enumerating the
274 * subclasses that are used. (If the nesting relationship is
275 * static then another method for expressing nested locking is
276 * the explicit definition of lock class keys and the use of
277 * lockdep_set_class() at lock initialization time.
278 * See Documentation/locking/lockdep-design.rst for more details.)
279 */
280extern void down_read_nested(struct rw_semaphore *sem, int subclass);
281extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
282extern void down_write_nested(struct rw_semaphore *sem, int subclass);
283extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
284extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
285
286# define down_write_nest_lock(sem, nest_lock) \
287do { \
288 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
289 _down_write_nest_lock(sem, &(nest_lock)->dep_map); \
290} while (0)
291
292/*
293 * Take/release a lock when not the owner will release it.
294 *
295 * [ This API should be avoided as much as possible - the
296 * proper abstraction for this case is completions. ]
297 */
298extern void down_read_non_owner(struct rw_semaphore *sem);
299extern void up_read_non_owner(struct rw_semaphore *sem);
300#else
301# define down_read_nested(sem, subclass) down_read(sem)
302# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
303# define down_write_nest_lock(sem, nest_lock) down_write(sem)
304# define down_write_nested(sem, subclass) down_write(sem)
305# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
306# define down_read_non_owner(sem) down_read(sem)
307# define up_read_non_owner(sem) up_read(sem)
308#endif
309
310#endif /* _LINUX_RWSEM_H */
311