| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef __LINUX_LOCKREF_H | 
|---|
| 3 | #define __LINUX_LOCKREF_H | 
|---|
| 4 |  | 
|---|
| 5 | /* | 
|---|
| 6 | * Locked reference counts. | 
|---|
| 7 | * | 
|---|
| 8 | * These are different from just plain atomic refcounts in that they | 
|---|
| 9 | * are atomic with respect to the spinlock that goes with them.  In | 
|---|
| 10 | * particular, there can be implementations that don't actually get | 
|---|
| 11 | * the spinlock for the common decrement/increment operations, but they | 
|---|
| 12 | * still have to check that the operation is done semantically as if | 
|---|
| 13 | * the spinlock had been taken (using a cmpxchg operation that covers | 
|---|
| 14 | * both the lock and the count word, or using memory transactions, for | 
|---|
| 15 | * example). | 
|---|
| 16 | */ | 
|---|
| 17 |  | 
|---|
| 18 | #include <linux/spinlock.h> | 
|---|
| 19 | #include <generated/bounds.h> | 
|---|
| 20 |  | 
|---|
| 21 | #define USE_CMPXCHG_LOCKREF \ | 
|---|
| 22 | (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ | 
|---|
| 23 | IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) | 
|---|
| 24 |  | 
|---|
| 25 | struct lockref { | 
|---|
| 26 | union { | 
|---|
| 27 | #if USE_CMPXCHG_LOCKREF | 
|---|
| 28 | aligned_u64 lock_count; | 
|---|
| 29 | #endif | 
|---|
| 30 | struct { | 
|---|
| 31 | spinlock_t lock; | 
|---|
| 32 | int count; | 
|---|
| 33 | }; | 
|---|
| 34 | }; | 
|---|
| 35 | }; | 
|---|
| 36 |  | 
|---|
| 37 | /** | 
|---|
| 38 | * lockref_init - Initialize a lockref | 
|---|
| 39 | * @lockref: pointer to lockref structure | 
|---|
| 40 | * | 
|---|
| 41 | * Initializes @lockref->count to 1. | 
|---|
| 42 | */ | 
|---|
| 43 | static inline void lockref_init(struct lockref *lockref) | 
|---|
| 44 | { | 
|---|
| 45 | spin_lock_init(&lockref->lock); | 
|---|
| 46 | lockref->count = 1; | 
|---|
| 47 | } | 
|---|
| 48 |  | 
|---|
| 49 | void lockref_get(struct lockref *lockref); | 
|---|
| 50 | int lockref_put_return(struct lockref *lockref); | 
|---|
| 51 | bool lockref_get_not_zero(struct lockref *lockref); | 
|---|
| 52 | bool lockref_put_or_lock(struct lockref *lockref); | 
|---|
| 53 |  | 
|---|
| 54 | void lockref_mark_dead(struct lockref *lockref); | 
|---|
| 55 | bool lockref_get_not_dead(struct lockref *lockref); | 
|---|
| 56 |  | 
|---|
| 57 | /* Must be called under spinlock for reliable results */ | 
|---|
| 58 | static inline bool __lockref_is_dead(const struct lockref *l) | 
|---|
| 59 | { | 
|---|
| 60 | return ((int)l->count < 0); | 
|---|
| 61 | } | 
|---|
| 62 |  | 
|---|
| 63 | #endif /* __LINUX_LOCKREF_H */ | 
|---|
| 64 |  | 
|---|