| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef __LINUX_BIT_SPINLOCK_H | 
|---|
| 3 | #define __LINUX_BIT_SPINLOCK_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/kernel.h> | 
|---|
| 6 | #include <linux/preempt.h> | 
|---|
| 7 | #include <linux/atomic.h> | 
|---|
| 8 | #include <linux/bug.h> | 
|---|
| 9 |  | 
|---|
| 10 | /* | 
|---|
| 11 | *  bit-based spin_lock() | 
|---|
| 12 | * | 
|---|
| 13 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | 
|---|
| 14 | * are significantly faster. | 
|---|
| 15 | */ | 
|---|
| 16 | static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr) | 
|---|
| 17 | { | 
|---|
| 18 | /* | 
|---|
| 19 | * Assuming the lock is uncontended, this never enters | 
|---|
| 20 | * the body of the outer loop. If it is contended, then | 
|---|
| 21 | * within the inner loop a non-atomic test is used to | 
|---|
| 22 | * busywait with less bus contention for a good time to | 
|---|
| 23 | * attempt to acquire the lock bit. | 
|---|
| 24 | */ | 
|---|
| 25 | preempt_disable(); | 
|---|
| 26 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 
|---|
| 27 | while (unlikely(test_and_set_bit_lock(bitnum, addr))) { | 
|---|
| 28 | preempt_enable(); | 
|---|
| 29 | do { | 
|---|
| 30 | cpu_relax(); | 
|---|
| 31 | } while (test_bit(bitnum, addr)); | 
|---|
| 32 | preempt_disable(); | 
|---|
| 33 | } | 
|---|
| 34 | #endif | 
|---|
| 35 | __acquire(bitlock); | 
|---|
| 36 | } | 
|---|
| 37 |  | 
|---|
| 38 | /* | 
|---|
| 39 | * Return true if it was acquired | 
|---|
| 40 | */ | 
|---|
| 41 | static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr) | 
|---|
| 42 | { | 
|---|
| 43 | preempt_disable(); | 
|---|
| 44 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 
|---|
| 45 | if (unlikely(test_and_set_bit_lock(bitnum, addr))) { | 
|---|
| 46 | preempt_enable(); | 
|---|
| 47 | return 0; | 
|---|
| 48 | } | 
|---|
| 49 | #endif | 
|---|
| 50 | __acquire(bitlock); | 
|---|
| 51 | return 1; | 
|---|
| 52 | } | 
|---|
| 53 |  | 
|---|
| 54 | /* | 
|---|
| 55 | *  bit-based spin_unlock() | 
|---|
| 56 | */ | 
|---|
| 57 | static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr) | 
|---|
| 58 | { | 
|---|
| 59 | #ifdef CONFIG_DEBUG_SPINLOCK | 
|---|
| 60 | BUG_ON(!test_bit(bitnum, addr)); | 
|---|
| 61 | #endif | 
|---|
| 62 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 
|---|
| 63 | clear_bit_unlock(nr: bitnum, addr); | 
|---|
| 64 | #endif | 
|---|
| 65 | preempt_enable(); | 
|---|
| 66 | __release(bitlock); | 
|---|
| 67 | } | 
|---|
| 68 |  | 
|---|
| 69 | /* | 
|---|
| 70 | *  bit-based spin_unlock() | 
|---|
| 71 | *  non-atomic version, which can be used eg. if the bit lock itself is | 
|---|
| 72 | *  protecting the rest of the flags in the word. | 
|---|
| 73 | */ | 
|---|
| 74 | static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr) | 
|---|
| 75 | { | 
|---|
| 76 | #ifdef CONFIG_DEBUG_SPINLOCK | 
|---|
| 77 | BUG_ON(!test_bit(bitnum, addr)); | 
|---|
| 78 | #endif | 
|---|
| 79 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 
|---|
| 80 | __clear_bit_unlock(nr: bitnum, addr); | 
|---|
| 81 | #endif | 
|---|
| 82 | preempt_enable(); | 
|---|
| 83 | __release(bitlock); | 
|---|
| 84 | } | 
|---|
| 85 |  | 
|---|
| 86 | /* | 
|---|
| 87 | * Return true if the lock is held. | 
|---|
| 88 | */ | 
|---|
| 89 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | 
|---|
| 90 | { | 
|---|
| 91 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 
|---|
| 92 | return test_bit(bitnum, addr); | 
|---|
| 93 | #elif defined CONFIG_PREEMPT_COUNT | 
|---|
| 94 | return preempt_count(); | 
|---|
| 95 | #else | 
|---|
| 96 | return 1; | 
|---|
| 97 | #endif | 
|---|
| 98 | } | 
|---|
| 99 |  | 
|---|
| 100 | #endif /* __LINUX_BIT_SPINLOCK_H */ | 
|---|
| 101 |  | 
|---|
| 102 |  | 
|---|