| 1 | /* | 
|---|
| 2 | *		INETPEER - A storage for permanent information about peers | 
|---|
| 3 | * | 
|---|
| 4 | *  This source is covered by the GNU GPL, the same as all kernel sources. | 
|---|
| 5 | * | 
|---|
| 6 | *  Authors:	Andrey V. Savochkin <saw@msu.ru> | 
|---|
| 7 | */ | 
|---|
| 8 |  | 
|---|
| 9 | #include <linux/cache.h> | 
|---|
| 10 | #include <linux/module.h> | 
|---|
| 11 | #include <linux/types.h> | 
|---|
| 12 | #include <linux/slab.h> | 
|---|
| 13 | #include <linux/interrupt.h> | 
|---|
| 14 | #include <linux/spinlock.h> | 
|---|
| 15 | #include <linux/random.h> | 
|---|
| 16 | #include <linux/timer.h> | 
|---|
| 17 | #include <linux/time.h> | 
|---|
| 18 | #include <linux/kernel.h> | 
|---|
| 19 | #include <linux/mm.h> | 
|---|
| 20 | #include <linux/net.h> | 
|---|
| 21 | #include <linux/workqueue.h> | 
|---|
| 22 | #include <net/ip.h> | 
|---|
| 23 | #include <net/inetpeer.h> | 
|---|
| 24 | #include <net/secure_seq.h> | 
|---|
| 25 |  | 
|---|
| 26 | /* | 
|---|
| 27 | *  Theory of operations. | 
|---|
| 28 | *  We keep one entry for each peer IP address.  The nodes contains long-living | 
|---|
| 29 | *  information about the peer which doesn't depend on routes. | 
|---|
| 30 | * | 
|---|
| 31 | *  Nodes are removed only when reference counter goes to 0. | 
|---|
| 32 | *  When it's happened the node may be removed when a sufficient amount of | 
|---|
| 33 | *  time has been passed since its last use.  The less-recently-used entry can | 
|---|
| 34 | *  also be removed if the pool is overloaded i.e. if the total amount of | 
|---|
| 35 | *  entries is greater-or-equal than the threshold. | 
|---|
| 36 | * | 
|---|
| 37 | *  Node pool is organised as an RB tree. | 
|---|
| 38 | *  Such an implementation has been chosen not just for fun.  It's a way to | 
|---|
| 39 | *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge | 
|---|
| 40 | *  amount of long living nodes in a single hash slot would significantly delay | 
|---|
| 41 | *  lookups performed with disabled BHs. | 
|---|
| 42 | * | 
|---|
| 43 | *  Serialisation issues. | 
|---|
| 44 | *  1.  Nodes may appear in the tree only with the pool lock held. | 
|---|
| 45 | *  2.  Nodes may disappear from the tree only with the pool lock held | 
|---|
| 46 | *      AND reference count being 0. | 
|---|
| 47 | *  3.  Global variable peer_total is modified under the pool lock. | 
|---|
| 48 | *  4.  struct inet_peer fields modification: | 
|---|
| 49 | *		rb_node: pool lock | 
|---|
| 50 | *		refcnt: atomically against modifications on other CPU; | 
|---|
| 51 | *		   usually under some other lock to prevent node disappearing | 
|---|
| 52 | *		daddr: unchangeable | 
|---|
| 53 | */ | 
|---|
| 54 |  | 
|---|
| 55 | static struct kmem_cache *peer_cachep __ro_after_init; | 
|---|
| 56 |  | 
|---|
| 57 | void inet_peer_base_init(struct inet_peer_base *bp) | 
|---|
| 58 | { | 
|---|
| 59 | bp->rb_root = RB_ROOT; | 
|---|
| 60 | seqlock_init(&bp->lock); | 
|---|
| 61 | bp->total = 0; | 
|---|
| 62 | } | 
|---|
| 63 | EXPORT_IPV6_MOD_GPL(inet_peer_base_init); | 
|---|
| 64 |  | 
|---|
| 65 | #define PEER_MAX_GC 32 | 
|---|
| 66 |  | 
|---|
| 67 | /* Exported for sysctl_net_ipv4.  */ | 
|---|
| 68 | int inet_peer_threshold __read_mostly;	/* start to throw entries more | 
|---|
| 69 | * aggressively at this stage */ | 
|---|
| 70 | int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */ | 
|---|
| 71 | int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */ | 
|---|
| 72 |  | 
|---|
| 73 | /* Called from ip_output.c:ip_init  */ | 
|---|
| 74 | void __init inet_initpeers(void) | 
|---|
| 75 | { | 
|---|
| 76 | u64 nr_entries; | 
|---|
| 77 |  | 
|---|
| 78 | /* 1% of physical memory */ | 
|---|
| 79 | nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT, | 
|---|
| 80 | 100 * L1_CACHE_ALIGN(sizeof(struct inet_peer))); | 
|---|
| 81 |  | 
|---|
| 82 | inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128); | 
|---|
| 83 |  | 
|---|
| 84 | peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC); | 
|---|
| 85 | } | 
|---|
| 86 |  | 
|---|
| 87 | /* Called with rcu_read_lock() or base->lock held */ | 
|---|
| 88 | static struct inet_peer *lookup(const struct inetpeer_addr *daddr, | 
|---|
| 89 | struct inet_peer_base *base, | 
|---|
| 90 | unsigned int seq, | 
|---|
| 91 | struct inet_peer *gc_stack[], | 
|---|
| 92 | unsigned int *gc_cnt, | 
|---|
| 93 | struct rb_node **parent_p, | 
|---|
| 94 | struct rb_node ***pp_p) | 
|---|
| 95 | { | 
|---|
| 96 | struct rb_node **pp, *parent, *next; | 
|---|
| 97 | struct inet_peer *p; | 
|---|
| 98 | u32 now; | 
|---|
| 99 |  | 
|---|
| 100 | pp = &base->rb_root.rb_node; | 
|---|
| 101 | parent = NULL; | 
|---|
| 102 | while (1) { | 
|---|
| 103 | int cmp; | 
|---|
| 104 |  | 
|---|
| 105 | next = rcu_dereference_raw(*pp); | 
|---|
| 106 | if (!next) | 
|---|
| 107 | break; | 
|---|
| 108 | parent = next; | 
|---|
| 109 | p = rb_entry(parent, struct inet_peer, rb_node); | 
|---|
| 110 | cmp = inetpeer_addr_cmp(a: daddr, b: &p->daddr); | 
|---|
| 111 | if (cmp == 0) { | 
|---|
| 112 | now = jiffies; | 
|---|
| 113 | if (READ_ONCE(p->dtime) != now) | 
|---|
| 114 | WRITE_ONCE(p->dtime, now); | 
|---|
| 115 | return p; | 
|---|
| 116 | } | 
|---|
| 117 | if (gc_stack) { | 
|---|
| 118 | if (*gc_cnt < PEER_MAX_GC) | 
|---|
| 119 | gc_stack[(*gc_cnt)++] = p; | 
|---|
| 120 | } else if (unlikely(read_seqretry(&base->lock, seq))) { | 
|---|
| 121 | break; | 
|---|
| 122 | } | 
|---|
| 123 | if (cmp == -1) | 
|---|
| 124 | pp = &next->rb_left; | 
|---|
| 125 | else | 
|---|
| 126 | pp = &next->rb_right; | 
|---|
| 127 | } | 
|---|
| 128 | *parent_p = parent; | 
|---|
| 129 | *pp_p = pp; | 
|---|
| 130 | return NULL; | 
|---|
| 131 | } | 
|---|
| 132 |  | 
|---|
| 133 | /* perform garbage collect on all items stacked during a lookup */ | 
|---|
| 134 | static void inet_peer_gc(struct inet_peer_base *base, | 
|---|
| 135 | struct inet_peer *gc_stack[], | 
|---|
| 136 | unsigned int gc_cnt) | 
|---|
| 137 | { | 
|---|
| 138 | int peer_threshold, peer_maxttl, peer_minttl; | 
|---|
| 139 | struct inet_peer *p; | 
|---|
| 140 | __u32 delta, ttl; | 
|---|
| 141 | int i; | 
|---|
| 142 |  | 
|---|
| 143 | peer_threshold = READ_ONCE(inet_peer_threshold); | 
|---|
| 144 | peer_maxttl = READ_ONCE(inet_peer_maxttl); | 
|---|
| 145 | peer_minttl = READ_ONCE(inet_peer_minttl); | 
|---|
| 146 |  | 
|---|
| 147 | if (base->total >= peer_threshold) | 
|---|
| 148 | ttl = 0; /* be aggressive */ | 
|---|
| 149 | else | 
|---|
| 150 | ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ * | 
|---|
| 151 | base->total / peer_threshold * HZ; | 
|---|
| 152 | for (i = 0; i < gc_cnt; i++) { | 
|---|
| 153 | p = gc_stack[i]; | 
|---|
| 154 |  | 
|---|
| 155 | delta = (__u32)jiffies - READ_ONCE(p->dtime); | 
|---|
| 156 |  | 
|---|
| 157 | if (delta < ttl || !refcount_dec_if_one(r: &p->refcnt)) | 
|---|
| 158 | gc_stack[i] = NULL; | 
|---|
| 159 | } | 
|---|
| 160 | for (i = 0; i < gc_cnt; i++) { | 
|---|
| 161 | p = gc_stack[i]; | 
|---|
| 162 | if (p) { | 
|---|
| 163 | rb_erase(&p->rb_node, &base->rb_root); | 
|---|
| 164 | base->total--; | 
|---|
| 165 | kfree_rcu(p, rcu); | 
|---|
| 166 | } | 
|---|
| 167 | } | 
|---|
| 168 | } | 
|---|
| 169 |  | 
|---|
| 170 | /* Must be called under RCU : No refcount change is done here. */ | 
|---|
| 171 | struct inet_peer *inet_getpeer(struct inet_peer_base *base, | 
|---|
| 172 | const struct inetpeer_addr *daddr) | 
|---|
| 173 | { | 
|---|
| 174 | struct inet_peer *p, *gc_stack[PEER_MAX_GC]; | 
|---|
| 175 | struct rb_node **pp, *parent; | 
|---|
| 176 | unsigned int gc_cnt, seq; | 
|---|
| 177 |  | 
|---|
| 178 | /* Attempt a lockless lookup first. | 
|---|
| 179 | * Because of a concurrent writer, we might not find an existing entry. | 
|---|
| 180 | */ | 
|---|
| 181 | seq = read_seqbegin(sl: &base->lock); | 
|---|
| 182 | p = lookup(daddr, base, seq, NULL, gc_cnt: &gc_cnt, parent_p: &parent, pp_p: &pp); | 
|---|
| 183 |  | 
|---|
| 184 | if (p) | 
|---|
| 185 | return p; | 
|---|
| 186 |  | 
|---|
| 187 | /* retry an exact lookup, taking the lock before. | 
|---|
| 188 | * At least, nodes should be hot in our cache. | 
|---|
| 189 | */ | 
|---|
| 190 | parent = NULL; | 
|---|
| 191 | write_seqlock_bh(sl: &base->lock); | 
|---|
| 192 |  | 
|---|
| 193 | gc_cnt = 0; | 
|---|
| 194 | p = lookup(daddr, base, seq, gc_stack, gc_cnt: &gc_cnt, parent_p: &parent, pp_p: &pp); | 
|---|
| 195 | if (!p) { | 
|---|
| 196 | p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); | 
|---|
| 197 | if (p) { | 
|---|
| 198 | p->daddr = *daddr; | 
|---|
| 199 | p->dtime = (__u32)jiffies; | 
|---|
| 200 | refcount_set(r: &p->refcnt, n: 1); | 
|---|
| 201 | atomic_set(v: &p->rid, i: 0); | 
|---|
| 202 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; | 
|---|
| 203 | p->rate_tokens = 0; | 
|---|
| 204 | p->n_redirects = 0; | 
|---|
| 205 | /* 60*HZ is arbitrary, but chosen enough high so that the first | 
|---|
| 206 | * calculation of tokens is at its maximum. | 
|---|
| 207 | */ | 
|---|
| 208 | p->rate_last = jiffies - 60*HZ; | 
|---|
| 209 |  | 
|---|
| 210 | rb_link_node(node: &p->rb_node, parent, rb_link: pp); | 
|---|
| 211 | rb_insert_color(&p->rb_node, &base->rb_root); | 
|---|
| 212 | base->total++; | 
|---|
| 213 | } | 
|---|
| 214 | } | 
|---|
| 215 | if (gc_cnt) | 
|---|
| 216 | inet_peer_gc(base, gc_stack, gc_cnt); | 
|---|
| 217 | write_sequnlock_bh(sl: &base->lock); | 
|---|
| 218 |  | 
|---|
| 219 | return p; | 
|---|
| 220 | } | 
|---|
| 221 | EXPORT_IPV6_MOD_GPL(inet_getpeer); | 
|---|
| 222 |  | 
|---|
| 223 | void inet_putpeer(struct inet_peer *p) | 
|---|
| 224 | { | 
|---|
| 225 | if (refcount_dec_and_test(r: &p->refcnt)) | 
|---|
| 226 | kfree_rcu(p, rcu); | 
|---|
| 227 | } | 
|---|
| 228 |  | 
|---|
| 229 | /* | 
|---|
| 230 | *	Check transmit rate limitation for given message. | 
|---|
| 231 | *	The rate information is held in the inet_peer entries now. | 
|---|
| 232 | *	This function is generic and could be used for other purposes | 
|---|
| 233 | *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov. | 
|---|
| 234 | * | 
|---|
| 235 | *	Note that the same inet_peer fields are modified by functions in | 
|---|
| 236 | *	route.c too, but these work for packet destinations while xrlim_allow | 
|---|
| 237 | *	works for icmp destinations. This means the rate limiting information | 
|---|
| 238 | *	for one "ip object" is shared - and these ICMPs are twice limited: | 
|---|
| 239 | *	by source and by destination. | 
|---|
| 240 | * | 
|---|
| 241 | *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate | 
|---|
| 242 | *			  SHOULD allow setting of rate limits | 
|---|
| 243 | * | 
|---|
| 244 | * 	Shared between ICMPv4 and ICMPv6. | 
|---|
| 245 | */ | 
|---|
| 246 | #define XRLIM_BURST_FACTOR 6 | 
|---|
| 247 | bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) | 
|---|
| 248 | { | 
|---|
| 249 | unsigned long now, token, otoken, delta; | 
|---|
| 250 | bool rc = false; | 
|---|
| 251 |  | 
|---|
| 252 | if (!peer) | 
|---|
| 253 | return true; | 
|---|
| 254 |  | 
|---|
| 255 | token = otoken = READ_ONCE(peer->rate_tokens); | 
|---|
| 256 | now = jiffies; | 
|---|
| 257 | delta = now - READ_ONCE(peer->rate_last); | 
|---|
| 258 | if (delta) { | 
|---|
| 259 | WRITE_ONCE(peer->rate_last, now); | 
|---|
| 260 | token += delta; | 
|---|
| 261 | if (token > XRLIM_BURST_FACTOR * timeout) | 
|---|
| 262 | token = XRLIM_BURST_FACTOR * timeout; | 
|---|
| 263 | } | 
|---|
| 264 | if (token >= timeout) { | 
|---|
| 265 | token -= timeout; | 
|---|
| 266 | rc = true; | 
|---|
| 267 | } | 
|---|
| 268 | if (token != otoken) | 
|---|
| 269 | WRITE_ONCE(peer->rate_tokens, token); | 
|---|
| 270 | return rc; | 
|---|
| 271 | } | 
|---|
| 272 | EXPORT_IPV6_MOD(inet_peer_xrlim_allow); | 
|---|
| 273 |  | 
|---|
| 274 | void inetpeer_invalidate_tree(struct inet_peer_base *base) | 
|---|
| 275 | { | 
|---|
| 276 | struct rb_node *p = rb_first(&base->rb_root); | 
|---|
| 277 |  | 
|---|
| 278 | while (p) { | 
|---|
| 279 | struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node); | 
|---|
| 280 |  | 
|---|
| 281 | p = rb_next(p); | 
|---|
| 282 | rb_erase(&peer->rb_node, &base->rb_root); | 
|---|
| 283 | inet_putpeer(p: peer); | 
|---|
| 284 | cond_resched(); | 
|---|
| 285 | } | 
|---|
| 286 |  | 
|---|
| 287 | base->total = 0; | 
|---|
| 288 | } | 
|---|
| 289 | EXPORT_IPV6_MOD(inetpeer_invalidate_tree); | 
|---|
| 290 |  | 
|---|