| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * ratelimit.c - Do something with rate limit. |
| 4 | * |
| 5 | * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> |
| 6 | * |
| 7 | * 2008-05-01 rewrite the function and use a ratelimit_state data struct as |
| 8 | * parameter. Now every user can use their own standalone ratelimit_state. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/ratelimit.h> |
| 12 | #include <linux/jiffies.h> |
| 13 | #include <linux/export.h> |
| 14 | |
| 15 | /* |
| 16 | * __ratelimit - rate limiting |
| 17 | * @rs: ratelimit_state data |
| 18 | * @func: name of calling function |
| 19 | * |
| 20 | * This enforces a rate limit: not more than @rs->burst callbacks |
| 21 | * in every @rs->interval |
| 22 | * |
| 23 | * RETURNS: |
| 24 | * 0 means callbacks will be suppressed. |
| 25 | * 1 means go ahead and do it. |
| 26 | */ |
| 27 | int ___ratelimit(struct ratelimit_state *rs, const char *func) |
| 28 | { |
| 29 | /* Paired with WRITE_ONCE() in .proc_handler(). |
| 30 | * Changing two values seperately could be inconsistent |
| 31 | * and some message could be lost. (See: net_ratelimit_state). |
| 32 | */ |
| 33 | int interval = READ_ONCE(rs->interval); |
| 34 | int burst = READ_ONCE(rs->burst); |
| 35 | unsigned long flags; |
| 36 | int ret = 0; |
| 37 | |
| 38 | /* |
| 39 | * Zero interval says never limit, otherwise, non-positive burst |
| 40 | * says always limit. |
| 41 | */ |
| 42 | if (interval <= 0 || burst <= 0) { |
| 43 | WARN_ONCE(interval < 0 || burst < 0, "Negative interval (%d) or burst (%d): Uninitialized ratelimit_state structure?\n" , interval, burst); |
| 44 | ret = interval == 0 || burst > 0; |
| 45 | if (!(READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED) || (!interval && !burst) || |
| 46 | !raw_spin_trylock_irqsave(&rs->lock, flags)) |
| 47 | goto nolock_ret; |
| 48 | |
| 49 | /* Force re-initialization once re-enabled. */ |
| 50 | rs->flags &= ~RATELIMIT_INITIALIZED; |
| 51 | goto unlock_ret; |
| 52 | } |
| 53 | |
| 54 | /* |
| 55 | * If we contend on this state's lock then just check if |
| 56 | * the current burst is used or not. It might cause |
| 57 | * false positive when we are past the interval and |
| 58 | * the current lock owner is just about to reset it. |
| 59 | */ |
| 60 | if (!raw_spin_trylock_irqsave(&rs->lock, flags)) { |
| 61 | if (READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED && |
| 62 | atomic_read(v: &rs->rs_n_left) > 0 && atomic_dec_return(v: &rs->rs_n_left) >= 0) |
| 63 | ret = 1; |
| 64 | goto nolock_ret; |
| 65 | } |
| 66 | |
| 67 | if (!(rs->flags & RATELIMIT_INITIALIZED)) { |
| 68 | rs->begin = jiffies; |
| 69 | rs->flags |= RATELIMIT_INITIALIZED; |
| 70 | atomic_set(v: &rs->rs_n_left, i: rs->burst); |
| 71 | } |
| 72 | |
| 73 | if (time_is_before_jiffies(rs->begin + interval)) { |
| 74 | int m; |
| 75 | |
| 76 | /* |
| 77 | * Reset rs_n_left ASAP to reduce false positives |
| 78 | * in parallel calls, see above. |
| 79 | */ |
| 80 | atomic_set(v: &rs->rs_n_left, i: rs->burst); |
| 81 | rs->begin = jiffies; |
| 82 | |
| 83 | if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { |
| 84 | m = ratelimit_state_reset_miss(rs); |
| 85 | if (m) { |
| 86 | printk_deferred(KERN_WARNING |
| 87 | "%s: %d callbacks suppressed\n" , func, m); |
| 88 | } |
| 89 | } |
| 90 | } |
| 91 | |
| 92 | /* Note that the burst might be taken by a parallel call. */ |
| 93 | if (atomic_read(v: &rs->rs_n_left) > 0 && atomic_dec_return(v: &rs->rs_n_left) >= 0) |
| 94 | ret = 1; |
| 95 | |
| 96 | unlock_ret: |
| 97 | raw_spin_unlock_irqrestore(&rs->lock, flags); |
| 98 | |
| 99 | nolock_ret: |
| 100 | if (!ret) |
| 101 | ratelimit_state_inc_miss(rs); |
| 102 | |
| 103 | return ret; |
| 104 | } |
| 105 | EXPORT_SYMBOL(___ratelimit); |
| 106 | |