| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | /* | 
|---|
| 3 | * Variant of atomic_t specialized for reference counts. | 
|---|
| 4 | * | 
|---|
| 5 | * The interface matches the atomic_t interface (to aid in porting) but only | 
|---|
| 6 | * provides the few functions one should use for reference counting. | 
|---|
| 7 | * | 
|---|
| 8 | * Saturation semantics | 
|---|
| 9 | * ==================== | 
|---|
| 10 | * | 
|---|
| 11 | * refcount_t differs from atomic_t in that the counter saturates at | 
|---|
| 12 | * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the | 
|---|
| 13 | * counter and causing 'spurious' use-after-free issues. In order to avoid the | 
|---|
| 14 | * cost associated with introducing cmpxchg() loops into all of the saturating | 
|---|
| 15 | * operations, we temporarily allow the counter to take on an unchecked value | 
|---|
| 16 | * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow | 
|---|
| 17 | * or overflow has occurred. Although this is racy when multiple threads | 
|---|
| 18 | * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly | 
|---|
| 19 | * equidistant from 0 and INT_MAX we minimise the scope for error: | 
|---|
| 20 | * | 
|---|
| 21 | * 	                           INT_MAX     REFCOUNT_SATURATED   UINT_MAX | 
|---|
| 22 | *   0                          (0x7fff_ffff)    (0xc000_0000)    (0xffff_ffff) | 
|---|
| 23 | *   +--------------------------------+----------------+----------------+ | 
|---|
| 24 | *                                     <---------- bad value! ----------> | 
|---|
| 25 | * | 
|---|
| 26 | * (in a signed view of the world, the "bad value" range corresponds to | 
|---|
| 27 | * a negative counter value). | 
|---|
| 28 | * | 
|---|
| 29 | * As an example, consider a refcount_inc() operation that causes the counter | 
|---|
| 30 | * to overflow: | 
|---|
| 31 | * | 
|---|
| 32 | * 	int old = atomic_fetch_add_relaxed(r); | 
|---|
| 33 | *	// old is INT_MAX, refcount now INT_MIN (0x8000_0000) | 
|---|
| 34 | *	if (old < 0) | 
|---|
| 35 | *		atomic_set(r, REFCOUNT_SATURATED); | 
|---|
| 36 | * | 
|---|
| 37 | * If another thread also performs a refcount_inc() operation between the two | 
|---|
| 38 | * atomic operations, then the count will continue to edge closer to 0. If it | 
|---|
| 39 | * reaches a value of 1 before /any/ of the threads reset it to the saturated | 
|---|
| 40 | * value, then a concurrent refcount_dec_and_test() may erroneously free the | 
|---|
| 41 | * underlying object. | 
|---|
| 42 | * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently | 
|---|
| 43 | * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK). | 
|---|
| 44 | * With the current PID limit, if no batched refcounting operations are used and | 
|---|
| 45 | * the attacker can't repeatedly trigger kernel oopses in the middle of refcount | 
|---|
| 46 | * operations, this makes it impossible for a saturated refcount to leave the | 
|---|
| 47 | * saturation range, even if it is possible for multiple uses of the same | 
|---|
| 48 | * refcount to nest in the context of a single task: | 
|---|
| 49 | * | 
|---|
| 50 | *     (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT = | 
|---|
| 51 | *     0x40000000 / 0x400000 = 0x100 = 256 | 
|---|
| 52 | * | 
|---|
| 53 | * If hundreds of references are added/removed with a single refcounting | 
|---|
| 54 | * operation, it may potentially be possible to leave the saturation range; but | 
|---|
| 55 | * given the precise timing details involved with the round-robin scheduling of | 
|---|
| 56 | * each thread manipulating the refcount and the need to hit the race multiple | 
|---|
| 57 | * times in succession, there doesn't appear to be a practical avenue of attack | 
|---|
| 58 | * even if using refcount_add() operations with larger increments. | 
|---|
| 59 | * | 
|---|
| 60 | * Memory ordering | 
|---|
| 61 | * =============== | 
|---|
| 62 | * | 
|---|
| 63 | * Memory ordering rules are slightly relaxed wrt regular atomic_t functions | 
|---|
| 64 | * and provide only what is strictly required for refcounts. | 
|---|
| 65 | * | 
|---|
| 66 | * The increments are fully relaxed; these will not provide ordering. The | 
|---|
| 67 | * rationale is that whatever is used to obtain the object we're increasing the | 
|---|
| 68 | * reference count on will provide the ordering. For locked data structures, | 
|---|
| 69 | * its the lock acquire, for RCU/lockless data structures its the dependent | 
|---|
| 70 | * load. | 
|---|
| 71 | * | 
|---|
| 72 | * Do note that inc_not_zero() provides a control dependency which will order | 
|---|
| 73 | * future stores against the inc, this ensures we'll never modify the object | 
|---|
| 74 | * if we did not in fact acquire a reference. | 
|---|
| 75 | * | 
|---|
| 76 | * The decrements will provide release order, such that all the prior loads and | 
|---|
| 77 | * stores will be issued before, it also provides a control dependency, which | 
|---|
| 78 | * will order us against the subsequent free(). | 
|---|
| 79 | * | 
|---|
| 80 | * The control dependency is against the load of the cmpxchg (ll/sc) that | 
|---|
| 81 | * succeeded. This means the stores aren't fully ordered, but this is fine | 
|---|
| 82 | * because the 1->0 transition indicates no concurrency. | 
|---|
| 83 | * | 
|---|
| 84 | * Note that the allocator is responsible for ordering things between free() | 
|---|
| 85 | * and alloc(). | 
|---|
| 86 | * | 
|---|
| 87 | * The decrements dec_and_test() and sub_and_test() also provide acquire | 
|---|
| 88 | * ordering on success. | 
|---|
| 89 | * | 
|---|
| 90 | * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() provide | 
|---|
| 91 | * acquire and release ordering for cases when the memory occupied by the | 
|---|
| 92 | * object might be reused to store another object. This is important for the | 
|---|
| 93 | * cases where secondary validation is required to detect such reuse, e.g. | 
|---|
| 94 | * SLAB_TYPESAFE_BY_RCU. The secondary validation checks have to happen after | 
|---|
| 95 | * the refcount is taken, hence acquire order is necessary. Similarly, when the | 
|---|
| 96 | * object is initialized, all stores to its attributes should be visible before | 
|---|
| 97 | * the refcount is set, otherwise a stale attribute value might be used by | 
|---|
| 98 | * another task which succeeds in taking a refcount to the new object. | 
|---|
| 99 | */ | 
|---|
| 100 |  | 
|---|
| 101 | #ifndef _LINUX_REFCOUNT_H | 
|---|
| 102 | #define _LINUX_REFCOUNT_H | 
|---|
| 103 |  | 
|---|
| 104 | #include <linux/atomic.h> | 
|---|
| 105 | #include <linux/bug.h> | 
|---|
| 106 | #include <linux/compiler.h> | 
|---|
| 107 | #include <linux/limits.h> | 
|---|
| 108 | #include <linux/refcount_types.h> | 
|---|
| 109 | #include <linux/spinlock_types.h> | 
|---|
| 110 |  | 
|---|
| 111 | struct mutex; | 
|---|
| 112 |  | 
|---|
| 113 | #define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), } | 
|---|
| 114 | #define REFCOUNT_MAX		INT_MAX | 
|---|
| 115 | #define REFCOUNT_SATURATED	(INT_MIN / 2) | 
|---|
| 116 |  | 
|---|
| 117 | enum refcount_saturation_type { | 
|---|
| 118 | REFCOUNT_ADD_NOT_ZERO_OVF, | 
|---|
| 119 | REFCOUNT_ADD_OVF, | 
|---|
| 120 | REFCOUNT_ADD_UAF, | 
|---|
| 121 | REFCOUNT_SUB_UAF, | 
|---|
| 122 | REFCOUNT_DEC_LEAK, | 
|---|
| 123 | }; | 
|---|
| 124 |  | 
|---|
| 125 | void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t); | 
|---|
| 126 |  | 
|---|
| 127 | /** | 
|---|
| 128 | * refcount_set - set a refcount's value | 
|---|
| 129 | * @r: the refcount | 
|---|
| 130 | * @n: value to which the refcount will be set | 
|---|
| 131 | */ | 
|---|
| 132 | static inline void refcount_set(refcount_t *r, int n) | 
|---|
| 133 | { | 
|---|
| 134 | atomic_set(v: &r->refs, i: n); | 
|---|
| 135 | } | 
|---|
| 136 |  | 
|---|
| 137 | /** | 
|---|
| 138 | * refcount_set_release - set a refcount's value with release ordering | 
|---|
| 139 | * @r: the refcount | 
|---|
| 140 | * @n: value to which the refcount will be set | 
|---|
| 141 | * | 
|---|
| 142 | * This function should be used when memory occupied by the object might be | 
|---|
| 143 | * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU. | 
|---|
| 144 | * | 
|---|
| 145 | * Provides release memory ordering which will order previous memory operations | 
|---|
| 146 | * against this store. This ensures all updates to this object are visible | 
|---|
| 147 | * once the refcount is set and stale values from the object previously | 
|---|
| 148 | * occupying this memory are overwritten with new ones. | 
|---|
| 149 | * | 
|---|
| 150 | * This function should be called only after new object is fully initialized. | 
|---|
| 151 | * After this call the object should be considered visible to other tasks even | 
|---|
| 152 | * if it was not yet added into an object collection normally used to discover | 
|---|
| 153 | * it. This is because other tasks might have discovered the object previously | 
|---|
| 154 | * occupying the same memory and after memory reuse they can succeed in taking | 
|---|
| 155 | * refcount to the new object and start using it. | 
|---|
| 156 | */ | 
|---|
| 157 | static inline void refcount_set_release(refcount_t *r, int n) | 
|---|
| 158 | { | 
|---|
| 159 | atomic_set_release(v: &r->refs, i: n); | 
|---|
| 160 | } | 
|---|
| 161 |  | 
|---|
| 162 | /** | 
|---|
| 163 | * refcount_read - get a refcount's value | 
|---|
| 164 | * @r: the refcount | 
|---|
| 165 | * | 
|---|
| 166 | * Return: the refcount's value | 
|---|
| 167 | */ | 
|---|
| 168 | static inline unsigned int refcount_read(const refcount_t *r) | 
|---|
| 169 | { | 
|---|
| 170 | return atomic_read(v: &r->refs); | 
|---|
| 171 | } | 
|---|
| 172 |  | 
|---|
| 173 | static inline __must_check __signed_wrap | 
|---|
| 174 | bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) | 
|---|
| 175 | { | 
|---|
| 176 | int old = refcount_read(r); | 
|---|
| 177 |  | 
|---|
| 178 | do { | 
|---|
| 179 | if (!old) | 
|---|
| 180 | break; | 
|---|
| 181 | } while (!atomic_try_cmpxchg_relaxed(v: &r->refs, old: &old, new: old + i)); | 
|---|
| 182 |  | 
|---|
| 183 | if (oldp) | 
|---|
| 184 | *oldp = old; | 
|---|
| 185 |  | 
|---|
| 186 | if (unlikely(old < 0 || old + i < 0)) | 
|---|
| 187 | refcount_warn_saturate(r, t: REFCOUNT_ADD_NOT_ZERO_OVF); | 
|---|
| 188 |  | 
|---|
| 189 | return old; | 
|---|
| 190 | } | 
|---|
| 191 |  | 
|---|
| 192 | /** | 
|---|
| 193 | * refcount_add_not_zero - add a value to a refcount unless it is 0 | 
|---|
| 194 | * @i: the value to add to the refcount | 
|---|
| 195 | * @r: the refcount | 
|---|
| 196 | * | 
|---|
| 197 | * Will saturate at REFCOUNT_SATURATED and WARN. | 
|---|
| 198 | * | 
|---|
| 199 | * Provides no memory ordering, it is assumed the caller has guaranteed the | 
|---|
| 200 | * object memory to be stable (RCU, etc.). It does provide a control dependency | 
|---|
| 201 | * and thereby orders future stores. See the comment on top. | 
|---|
| 202 | * | 
|---|
| 203 | * Use of this function is not recommended for the normal reference counting | 
|---|
| 204 | * use case in which references are taken and released one at a time.  In these | 
|---|
| 205 | * cases, refcount_inc(), or one of its variants, should instead be used to | 
|---|
| 206 | * increment a reference count. | 
|---|
| 207 | * | 
|---|
| 208 | * Return: false if the passed refcount is 0, true otherwise | 
|---|
| 209 | */ | 
|---|
| 210 | static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) | 
|---|
| 211 | { | 
|---|
| 212 | return __refcount_add_not_zero(i, r, NULL); | 
|---|
| 213 | } | 
|---|
| 214 |  | 
|---|
| 215 | static inline __must_check __signed_wrap | 
|---|
| 216 | bool __refcount_add_not_zero_limited_acquire(int i, refcount_t *r, int *oldp, | 
|---|
| 217 | int limit) | 
|---|
| 218 | { | 
|---|
| 219 | int old = refcount_read(r); | 
|---|
| 220 |  | 
|---|
| 221 | do { | 
|---|
| 222 | if (!old) | 
|---|
| 223 | break; | 
|---|
| 224 |  | 
|---|
| 225 | if (i > limit - old) { | 
|---|
| 226 | if (oldp) | 
|---|
| 227 | *oldp = old; | 
|---|
| 228 | return false; | 
|---|
| 229 | } | 
|---|
| 230 | } while (!atomic_try_cmpxchg_acquire(v: &r->refs, old: &old, new: old + i)); | 
|---|
| 231 |  | 
|---|
| 232 | if (oldp) | 
|---|
| 233 | *oldp = old; | 
|---|
| 234 |  | 
|---|
| 235 | if (unlikely(old < 0 || old + i < 0)) | 
|---|
| 236 | refcount_warn_saturate(r, t: REFCOUNT_ADD_NOT_ZERO_OVF); | 
|---|
| 237 |  | 
|---|
| 238 | return old; | 
|---|
| 239 | } | 
|---|
| 240 |  | 
|---|
| 241 | static inline __must_check bool | 
|---|
| 242 | __refcount_inc_not_zero_limited_acquire(refcount_t *r, int *oldp, int limit) | 
|---|
| 243 | { | 
|---|
| 244 | return __refcount_add_not_zero_limited_acquire(i: 1, r, oldp, limit); | 
|---|
| 245 | } | 
|---|
| 246 |  | 
|---|
| 247 | static inline __must_check __signed_wrap | 
|---|
| 248 | bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp) | 
|---|
| 249 | { | 
|---|
| 250 | return __refcount_add_not_zero_limited_acquire(i, r, oldp, INT_MAX); | 
|---|
| 251 | } | 
|---|
| 252 |  | 
|---|
| 253 | /** | 
|---|
| 254 | * refcount_add_not_zero_acquire - add a value to a refcount with acquire ordering unless it is 0 | 
|---|
| 255 | * | 
|---|
| 256 | * @i: the value to add to the refcount | 
|---|
| 257 | * @r: the refcount | 
|---|
| 258 | * | 
|---|
| 259 | * Will saturate at REFCOUNT_SATURATED and WARN. | 
|---|
| 260 | * | 
|---|
| 261 | * This function should be used when memory occupied by the object might be | 
|---|
| 262 | * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU. | 
|---|
| 263 | * | 
|---|
| 264 | * Provides acquire memory ordering on success, it is assumed the caller has | 
|---|
| 265 | * guaranteed the object memory to be stable (RCU, etc.). It does provide a | 
|---|
| 266 | * control dependency and thereby orders future stores. See the comment on top. | 
|---|
| 267 | * | 
|---|
| 268 | * Use of this function is not recommended for the normal reference counting | 
|---|
| 269 | * use case in which references are taken and released one at a time.  In these | 
|---|
| 270 | * cases, refcount_inc_not_zero_acquire() should instead be used to increment a | 
|---|
| 271 | * reference count. | 
|---|
| 272 | * | 
|---|
| 273 | * Return: false if the passed refcount is 0, true otherwise | 
|---|
| 274 | */ | 
|---|
| 275 | static inline __must_check bool refcount_add_not_zero_acquire(int i, refcount_t *r) | 
|---|
| 276 | { | 
|---|
| 277 | return __refcount_add_not_zero_acquire(i, r, NULL); | 
|---|
| 278 | } | 
|---|
| 279 |  | 
|---|
| 280 | static inline __signed_wrap | 
|---|
| 281 | void __refcount_add(int i, refcount_t *r, int *oldp) | 
|---|
| 282 | { | 
|---|
| 283 | int old = atomic_fetch_add_relaxed(i, v: &r->refs); | 
|---|
| 284 |  | 
|---|
| 285 | if (oldp) | 
|---|
| 286 | *oldp = old; | 
|---|
| 287 |  | 
|---|
| 288 | if (unlikely(!old)) | 
|---|
| 289 | refcount_warn_saturate(r, t: REFCOUNT_ADD_UAF); | 
|---|
| 290 | else if (unlikely(old < 0 || old + i < 0)) | 
|---|
| 291 | refcount_warn_saturate(r, t: REFCOUNT_ADD_OVF); | 
|---|
| 292 | } | 
|---|
| 293 |  | 
|---|
| 294 | /** | 
|---|
| 295 | * refcount_add - add a value to a refcount | 
|---|
| 296 | * @i: the value to add to the refcount | 
|---|
| 297 | * @r: the refcount | 
|---|
| 298 | * | 
|---|
| 299 | * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN. | 
|---|
| 300 | * | 
|---|
| 301 | * Provides no memory ordering, it is assumed the caller has guaranteed the | 
|---|
| 302 | * object memory to be stable (RCU, etc.). It does provide a control dependency | 
|---|
| 303 | * and thereby orders future stores. See the comment on top. | 
|---|
| 304 | * | 
|---|
| 305 | * Use of this function is not recommended for the normal reference counting | 
|---|
| 306 | * use case in which references are taken and released one at a time.  In these | 
|---|
| 307 | * cases, refcount_inc(), or one of its variants, should instead be used to | 
|---|
| 308 | * increment a reference count. | 
|---|
| 309 | */ | 
|---|
| 310 | static inline void refcount_add(int i, refcount_t *r) | 
|---|
| 311 | { | 
|---|
| 312 | __refcount_add(i, r, NULL); | 
|---|
| 313 | } | 
|---|
| 314 |  | 
|---|
| 315 | static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp) | 
|---|
| 316 | { | 
|---|
| 317 | return __refcount_add_not_zero(i: 1, r, oldp); | 
|---|
| 318 | } | 
|---|
| 319 |  | 
|---|
| 320 | /** | 
|---|
| 321 | * refcount_inc_not_zero - increment a refcount unless it is 0 | 
|---|
| 322 | * @r: the refcount to increment | 
|---|
| 323 | * | 
|---|
| 324 | * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED | 
|---|
| 325 | * and WARN. | 
|---|
| 326 | * | 
|---|
| 327 | * Provides no memory ordering, it is assumed the caller has guaranteed the | 
|---|
| 328 | * object memory to be stable (RCU, etc.). It does provide a control dependency | 
|---|
| 329 | * and thereby orders future stores. See the comment on top. | 
|---|
| 330 | * | 
|---|
| 331 | * Return: true if the increment was successful, false otherwise | 
|---|
| 332 | */ | 
|---|
| 333 | static inline __must_check bool refcount_inc_not_zero(refcount_t *r) | 
|---|
| 334 | { | 
|---|
| 335 | return __refcount_inc_not_zero(r, NULL); | 
|---|
| 336 | } | 
|---|
| 337 |  | 
|---|
| 338 | static inline __must_check bool __refcount_inc_not_zero_acquire(refcount_t *r, int *oldp) | 
|---|
| 339 | { | 
|---|
| 340 | return __refcount_add_not_zero_acquire(i: 1, r, oldp); | 
|---|
| 341 | } | 
|---|
| 342 |  | 
|---|
| 343 | /** | 
|---|
| 344 | * refcount_inc_not_zero_acquire - increment a refcount with acquire ordering unless it is 0 | 
|---|
| 345 | * @r: the refcount to increment | 
|---|
| 346 | * | 
|---|
| 347 | * Similar to refcount_inc_not_zero(), but provides acquire memory ordering on | 
|---|
| 348 | * success. | 
|---|
| 349 | * | 
|---|
| 350 | * This function should be used when memory occupied by the object might be | 
|---|
| 351 | * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU. | 
|---|
| 352 | * | 
|---|
| 353 | * Provides acquire memory ordering on success, it is assumed the caller has | 
|---|
| 354 | * guaranteed the object memory to be stable (RCU, etc.). It does provide a | 
|---|
| 355 | * control dependency and thereby orders future stores. See the comment on top. | 
|---|
| 356 | * | 
|---|
| 357 | * Return: true if the increment was successful, false otherwise | 
|---|
| 358 | */ | 
|---|
| 359 | static inline __must_check bool refcount_inc_not_zero_acquire(refcount_t *r) | 
|---|
| 360 | { | 
|---|
| 361 | return __refcount_inc_not_zero_acquire(r, NULL); | 
|---|
| 362 | } | 
|---|
| 363 |  | 
|---|
| 364 | static inline void __refcount_inc(refcount_t *r, int *oldp) | 
|---|
| 365 | { | 
|---|
| 366 | __refcount_add(i: 1, r, oldp); | 
|---|
| 367 | } | 
|---|
| 368 |  | 
|---|
| 369 | /** | 
|---|
| 370 | * refcount_inc - increment a refcount | 
|---|
| 371 | * @r: the refcount to increment | 
|---|
| 372 | * | 
|---|
| 373 | * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN. | 
|---|
| 374 | * | 
|---|
| 375 | * Provides no memory ordering, it is assumed the caller already has a | 
|---|
| 376 | * reference on the object. | 
|---|
| 377 | * | 
|---|
| 378 | * Will WARN if the refcount is 0, as this represents a possible use-after-free | 
|---|
| 379 | * condition. | 
|---|
| 380 | */ | 
|---|
| 381 | static inline void refcount_inc(refcount_t *r) | 
|---|
| 382 | { | 
|---|
| 383 | __refcount_inc(r, NULL); | 
|---|
| 384 | } | 
|---|
| 385 |  | 
|---|
| 386 | static inline __must_check __signed_wrap | 
|---|
| 387 | bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp) | 
|---|
| 388 | { | 
|---|
| 389 | int old = atomic_fetch_sub_release(i, v: &r->refs); | 
|---|
| 390 |  | 
|---|
| 391 | if (oldp) | 
|---|
| 392 | *oldp = old; | 
|---|
| 393 |  | 
|---|
| 394 | if (old > 0 && old == i) { | 
|---|
| 395 | smp_acquire__after_ctrl_dep(); | 
|---|
| 396 | return true; | 
|---|
| 397 | } | 
|---|
| 398 |  | 
|---|
| 399 | if (unlikely(old <= 0 || old - i < 0)) | 
|---|
| 400 | refcount_warn_saturate(r, t: REFCOUNT_SUB_UAF); | 
|---|
| 401 |  | 
|---|
| 402 | return false; | 
|---|
| 403 | } | 
|---|
| 404 |  | 
|---|
| 405 | /** | 
|---|
| 406 | * refcount_sub_and_test - subtract from a refcount and test if it is 0 | 
|---|
| 407 | * @i: amount to subtract from the refcount | 
|---|
| 408 | * @r: the refcount | 
|---|
| 409 | * | 
|---|
| 410 | * Similar to atomic_dec_and_test(), but it will WARN, return false and | 
|---|
| 411 | * ultimately leak on underflow and will fail to decrement when saturated | 
|---|
| 412 | * at REFCOUNT_SATURATED. | 
|---|
| 413 | * | 
|---|
| 414 | * Provides release memory ordering, such that prior loads and stores are done | 
|---|
| 415 | * before, and provides an acquire ordering on success such that free() | 
|---|
| 416 | * must come after. | 
|---|
| 417 | * | 
|---|
| 418 | * Use of this function is not recommended for the normal reference counting | 
|---|
| 419 | * use case in which references are taken and released one at a time.  In these | 
|---|
| 420 | * cases, refcount_dec(), or one of its variants, should instead be used to | 
|---|
| 421 | * decrement a reference count. | 
|---|
| 422 | * | 
|---|
| 423 | * Return: true if the resulting refcount is 0, false otherwise | 
|---|
| 424 | */ | 
|---|
| 425 | static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) | 
|---|
| 426 | { | 
|---|
| 427 | return __refcount_sub_and_test(i, r, NULL); | 
|---|
| 428 | } | 
|---|
| 429 |  | 
|---|
| 430 | static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp) | 
|---|
| 431 | { | 
|---|
| 432 | return __refcount_sub_and_test(i: 1, r, oldp); | 
|---|
| 433 | } | 
|---|
| 434 |  | 
|---|
| 435 | /** | 
|---|
| 436 | * refcount_dec_and_test - decrement a refcount and test if it is 0 | 
|---|
| 437 | * @r: the refcount | 
|---|
| 438 | * | 
|---|
| 439 | * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to | 
|---|
| 440 | * decrement when saturated at REFCOUNT_SATURATED. | 
|---|
| 441 | * | 
|---|
| 442 | * Provides release memory ordering, such that prior loads and stores are done | 
|---|
| 443 | * before, and provides an acquire ordering on success such that free() | 
|---|
| 444 | * must come after. | 
|---|
| 445 | * | 
|---|
| 446 | * Return: true if the resulting refcount is 0, false otherwise | 
|---|
| 447 | */ | 
|---|
| 448 | static inline __must_check bool refcount_dec_and_test(refcount_t *r) | 
|---|
| 449 | { | 
|---|
| 450 | return __refcount_dec_and_test(r, NULL); | 
|---|
| 451 | } | 
|---|
| 452 |  | 
|---|
| 453 | static inline void __refcount_dec(refcount_t *r, int *oldp) | 
|---|
| 454 | { | 
|---|
| 455 | int old = atomic_fetch_sub_release(i: 1, v: &r->refs); | 
|---|
| 456 |  | 
|---|
| 457 | if (oldp) | 
|---|
| 458 | *oldp = old; | 
|---|
| 459 |  | 
|---|
| 460 | if (unlikely(old <= 1)) | 
|---|
| 461 | refcount_warn_saturate(r, t: REFCOUNT_DEC_LEAK); | 
|---|
| 462 | } | 
|---|
| 463 |  | 
|---|
| 464 | /** | 
|---|
| 465 | * refcount_dec - decrement a refcount | 
|---|
| 466 | * @r: the refcount | 
|---|
| 467 | * | 
|---|
| 468 | * Similar to atomic_dec(), it will WARN on underflow and fail to decrement | 
|---|
| 469 | * when saturated at REFCOUNT_SATURATED. | 
|---|
| 470 | * | 
|---|
| 471 | * Provides release memory ordering, such that prior loads and stores are done | 
|---|
| 472 | * before. | 
|---|
| 473 | */ | 
|---|
| 474 | static inline void refcount_dec(refcount_t *r) | 
|---|
| 475 | { | 
|---|
| 476 | __refcount_dec(r, NULL); | 
|---|
| 477 | } | 
|---|
| 478 |  | 
|---|
| 479 | extern __must_check bool refcount_dec_if_one(refcount_t *r); | 
|---|
| 480 | extern __must_check bool refcount_dec_not_one(refcount_t *r); | 
|---|
| 481 | extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock); | 
|---|
| 482 | extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock); | 
|---|
| 483 | extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, | 
|---|
| 484 | spinlock_t *lock, | 
|---|
| 485 | unsigned long *flags) __cond_acquires(lock); | 
|---|
| 486 | #endif /* _LINUX_REFCOUNT_H */ | 
|---|
| 487 |  | 
|---|