| 1 | // SPDX-License-Identifier: GPL-2.0-or-later | 
|---|
| 2 | /* | 
|---|
| 3 | *	Routines having to do with the 'struct sk_buff' memory handlers. | 
|---|
| 4 | * | 
|---|
| 5 | *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk> | 
|---|
| 6 | *			Florian La Roche <rzsfl@rz.uni-sb.de> | 
|---|
| 7 | * | 
|---|
| 8 | *	Fixes: | 
|---|
| 9 | *		Alan Cox	:	Fixed the worst of the load | 
|---|
| 10 | *					balancer bugs. | 
|---|
| 11 | *		Dave Platt	:	Interrupt stacking fix. | 
|---|
| 12 | *	Richard Kooijman	:	Timestamp fixes. | 
|---|
| 13 | *		Alan Cox	:	Changed buffer format. | 
|---|
| 14 | *		Alan Cox	:	destructor hook for AF_UNIX etc. | 
|---|
| 15 | *		Linus Torvalds	:	Better skb_clone. | 
|---|
| 16 | *		Alan Cox	:	Added skb_copy. | 
|---|
| 17 | *		Alan Cox	:	Added all the changed routines Linus | 
|---|
| 18 | *					only put in the headers | 
|---|
| 19 | *		Ray VanTassle	:	Fixed --skb->lock in free | 
|---|
| 20 | *		Alan Cox	:	skb_copy copy arp field | 
|---|
| 21 | *		Andi Kleen	:	slabified it. | 
|---|
| 22 | *		Robert Olsson	:	Removed skb_head_pool | 
|---|
| 23 | * | 
|---|
| 24 | *	NOTE: | 
|---|
| 25 | *		The __skb_ routines should be called with interrupts | 
|---|
| 26 | *	disabled, or you better be *real* sure that the operation is atomic | 
|---|
| 27 | *	with respect to whatever list is being frobbed (e.g. via lock_sock() | 
|---|
| 28 | *	or via disabling bottom half handlers, etc). | 
|---|
| 29 | */ | 
|---|
| 30 |  | 
|---|
| 31 | /* | 
|---|
| 32 | *	The functions in this file will not compile correctly with gcc 2.4.x | 
|---|
| 33 | */ | 
|---|
| 34 |  | 
|---|
| 35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 
|---|
| 36 |  | 
|---|
| 37 | #include <linux/module.h> | 
|---|
| 38 | #include <linux/types.h> | 
|---|
| 39 | #include <linux/kernel.h> | 
|---|
| 40 | #include <linux/mm.h> | 
|---|
| 41 | #include <linux/interrupt.h> | 
|---|
| 42 | #include <linux/in.h> | 
|---|
| 43 | #include <linux/inet.h> | 
|---|
| 44 | #include <linux/slab.h> | 
|---|
| 45 | #include <linux/tcp.h> | 
|---|
| 46 | #include <linux/udp.h> | 
|---|
| 47 | #include <linux/sctp.h> | 
|---|
| 48 | #include <linux/netdevice.h> | 
|---|
| 49 | #ifdef CONFIG_NET_CLS_ACT | 
|---|
| 50 | #include <net/pkt_sched.h> | 
|---|
| 51 | #endif | 
|---|
| 52 | #include <linux/string.h> | 
|---|
| 53 | #include <linux/skbuff.h> | 
|---|
| 54 | #include <linux/skbuff_ref.h> | 
|---|
| 55 | #include <linux/splice.h> | 
|---|
| 56 | #include <linux/cache.h> | 
|---|
| 57 | #include <linux/rtnetlink.h> | 
|---|
| 58 | #include <linux/init.h> | 
|---|
| 59 | #include <linux/scatterlist.h> | 
|---|
| 60 | #include <linux/errqueue.h> | 
|---|
| 61 | #include <linux/prefetch.h> | 
|---|
| 62 | #include <linux/bitfield.h> | 
|---|
| 63 | #include <linux/if_vlan.h> | 
|---|
| 64 | #include <linux/mpls.h> | 
|---|
| 65 | #include <linux/kcov.h> | 
|---|
| 66 | #include <linux/iov_iter.h> | 
|---|
| 67 | #include <linux/crc32.h> | 
|---|
| 68 |  | 
|---|
| 69 | #include <net/protocol.h> | 
|---|
| 70 | #include <net/dst.h> | 
|---|
| 71 | #include <net/sock.h> | 
|---|
| 72 | #include <net/checksum.h> | 
|---|
| 73 | #include <net/gro.h> | 
|---|
| 74 | #include <net/gso.h> | 
|---|
| 75 | #include <net/hotdata.h> | 
|---|
| 76 | #include <net/ip6_checksum.h> | 
|---|
| 77 | #include <net/xfrm.h> | 
|---|
| 78 | #include <net/mpls.h> | 
|---|
| 79 | #include <net/mptcp.h> | 
|---|
| 80 | #include <net/mctp.h> | 
|---|
| 81 | #include <net/page_pool/helpers.h> | 
|---|
| 82 | #include <net/psp/types.h> | 
|---|
| 83 | #include <net/dropreason.h> | 
|---|
| 84 |  | 
|---|
| 85 | #include <linux/uaccess.h> | 
|---|
| 86 | #include <trace/events/skb.h> | 
|---|
| 87 | #include <linux/highmem.h> | 
|---|
| 88 | #include <linux/capability.h> | 
|---|
| 89 | #include <linux/user_namespace.h> | 
|---|
| 90 | #include <linux/indirect_call_wrapper.h> | 
|---|
| 91 | #include <linux/textsearch.h> | 
|---|
| 92 |  | 
|---|
| 93 | #include "dev.h" | 
|---|
| 94 | #include "devmem.h" | 
|---|
| 95 | #include "netmem_priv.h" | 
|---|
| 96 | #include "sock_destructor.h" | 
|---|
| 97 |  | 
|---|
| 98 | #ifdef CONFIG_SKB_EXTENSIONS | 
|---|
| 99 | static struct kmem_cache *skbuff_ext_cache __ro_after_init; | 
|---|
| 100 | #endif | 
|---|
| 101 |  | 
|---|
| 102 | #define GRO_MAX_HEAD_PAD (GRO_MAX_HEAD + NET_SKB_PAD + NET_IP_ALIGN) | 
|---|
| 103 | #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(max(MAX_TCP_HEADER, \ | 
|---|
| 104 | GRO_MAX_HEAD_PAD)) | 
|---|
| 105 |  | 
|---|
| 106 | /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. | 
|---|
| 107 | * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique | 
|---|
| 108 | * size, and we can differentiate heads from skb_small_head_cache | 
|---|
| 109 | * vs system slabs by looking at their size (skb_end_offset()). | 
|---|
| 110 | */ | 
|---|
| 111 | #define SKB_SMALL_HEAD_CACHE_SIZE					\ | 
|---|
| 112 | (is_power_of_2(SKB_SMALL_HEAD_SIZE) ?			\ | 
|---|
| 113 | (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) :	\ | 
|---|
| 114 | SKB_SMALL_HEAD_SIZE) | 
|---|
| 115 |  | 
|---|
| 116 | #define SKB_SMALL_HEAD_HEADROOM						\ | 
|---|
| 117 | SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) | 
|---|
| 118 |  | 
|---|
| 119 | /* kcm_write_msgs() relies on casting paged frags to bio_vec to use | 
|---|
| 120 | * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the | 
|---|
| 121 | * netmem is a page. | 
|---|
| 122 | */ | 
|---|
| 123 | static_assert(offsetof(struct bio_vec, bv_page) == | 
|---|
| 124 | offsetof(skb_frag_t, netmem)); | 
|---|
| 125 | static_assert(sizeof_field(struct bio_vec, bv_page) == | 
|---|
| 126 | sizeof_field(skb_frag_t, netmem)); | 
|---|
| 127 |  | 
|---|
| 128 | static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len)); | 
|---|
| 129 | static_assert(sizeof_field(struct bio_vec, bv_len) == | 
|---|
| 130 | sizeof_field(skb_frag_t, len)); | 
|---|
| 131 |  | 
|---|
| 132 | static_assert(offsetof(struct bio_vec, bv_offset) == | 
|---|
| 133 | offsetof(skb_frag_t, offset)); | 
|---|
| 134 | static_assert(sizeof_field(struct bio_vec, bv_offset) == | 
|---|
| 135 | sizeof_field(skb_frag_t, offset)); | 
|---|
| 136 |  | 
|---|
| 137 | #undef FN | 
|---|
| 138 | #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, | 
|---|
| 139 | static const char * const drop_reasons[] = { | 
|---|
| 140 | [SKB_CONSUMED] = "CONSUMED", | 
|---|
| 141 | DEFINE_DROP_REASON(FN, FN) | 
|---|
| 142 | }; | 
|---|
| 143 |  | 
|---|
| 144 | static const struct drop_reason_list drop_reasons_core = { | 
|---|
| 145 | .reasons = drop_reasons, | 
|---|
| 146 | .n_reasons = ARRAY_SIZE(drop_reasons), | 
|---|
| 147 | }; | 
|---|
| 148 |  | 
|---|
| 149 | const struct drop_reason_list __rcu * | 
|---|
| 150 | drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = { | 
|---|
| 151 | [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core), | 
|---|
| 152 | }; | 
|---|
| 153 | EXPORT_SYMBOL(drop_reasons_by_subsys); | 
|---|
| 154 |  | 
|---|
| 155 | /** | 
|---|
| 156 | * drop_reasons_register_subsys - register another drop reason subsystem | 
|---|
| 157 | * @subsys: the subsystem to register, must not be the core | 
|---|
| 158 | * @list: the list of drop reasons within the subsystem, must point to | 
|---|
| 159 | *	a statically initialized list | 
|---|
| 160 | */ | 
|---|
| 161 | void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys, | 
|---|
| 162 | const struct drop_reason_list *list) | 
|---|
| 163 | { | 
|---|
| 164 | if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || | 
|---|
| 165 | subsys >= ARRAY_SIZE(drop_reasons_by_subsys), | 
|---|
| 166 | "invalid subsystem %d\n", subsys)) | 
|---|
| 167 | return; | 
|---|
| 168 |  | 
|---|
| 169 | /* must point to statically allocated memory, so INIT is OK */ | 
|---|
| 170 | RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list); | 
|---|
| 171 | } | 
|---|
| 172 | EXPORT_SYMBOL_GPL(drop_reasons_register_subsys); | 
|---|
| 173 |  | 
|---|
| 174 | /** | 
|---|
| 175 | * drop_reasons_unregister_subsys - unregister a drop reason subsystem | 
|---|
| 176 | * @subsys: the subsystem to remove, must not be the core | 
|---|
| 177 | * | 
|---|
| 178 | * Note: This will synchronize_rcu() to ensure no users when it returns. | 
|---|
| 179 | */ | 
|---|
| 180 | void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys) | 
|---|
| 181 | { | 
|---|
| 182 | if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || | 
|---|
| 183 | subsys >= ARRAY_SIZE(drop_reasons_by_subsys), | 
|---|
| 184 | "invalid subsystem %d\n", subsys)) | 
|---|
| 185 | return; | 
|---|
| 186 |  | 
|---|
| 187 | RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL); | 
|---|
| 188 |  | 
|---|
| 189 | synchronize_rcu(); | 
|---|
| 190 | } | 
|---|
| 191 | EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys); | 
|---|
| 192 |  | 
|---|
| 193 | /** | 
|---|
| 194 | *	skb_panic - private function for out-of-line support | 
|---|
| 195 | *	@skb:	buffer | 
|---|
| 196 | *	@sz:	size | 
|---|
| 197 | *	@addr:	address | 
|---|
| 198 | *	@msg:	skb_over_panic or skb_under_panic | 
|---|
| 199 | * | 
|---|
| 200 | *	Out-of-line support for skb_put() and skb_push(). | 
|---|
| 201 | *	Called via the wrapper skb_over_panic() or skb_under_panic(). | 
|---|
| 202 | *	Keep out of line to prevent kernel bloat. | 
|---|
| 203 | *	__builtin_return_address is not used because it is not always reliable. | 
|---|
| 204 | */ | 
|---|
| 205 | static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, | 
|---|
| 206 | const char msg[]) | 
|---|
| 207 | { | 
|---|
| 208 | pr_emerg( "%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", | 
|---|
| 209 | msg, addr, skb->len, sz, skb->head, skb->data, | 
|---|
| 210 | (unsigned long)skb->tail, (unsigned long)skb->end, | 
|---|
| 211 | skb->dev ? skb->dev->name : "<NULL>"); | 
|---|
| 212 | BUG(); | 
|---|
| 213 | } | 
|---|
| 214 |  | 
|---|
| 215 | static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) | 
|---|
| 216 | { | 
|---|
| 217 | skb_panic(skb, sz, addr, msg: __func__); | 
|---|
| 218 | } | 
|---|
| 219 |  | 
|---|
| 220 | static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) | 
|---|
| 221 | { | 
|---|
| 222 | skb_panic(skb, sz, addr, msg: __func__); | 
|---|
| 223 | } | 
|---|
| 224 |  | 
|---|
| 225 | #define NAPI_SKB_CACHE_SIZE	64 | 
|---|
| 226 | #define NAPI_SKB_CACHE_BULK	16 | 
|---|
| 227 | #define NAPI_SKB_CACHE_HALF	(NAPI_SKB_CACHE_SIZE / 2) | 
|---|
| 228 |  | 
|---|
| 229 | struct napi_alloc_cache { | 
|---|
| 230 | local_lock_t bh_lock; | 
|---|
| 231 | struct page_frag_cache page; | 
|---|
| 232 | unsigned int skb_count; | 
|---|
| 233 | void *skb_cache[NAPI_SKB_CACHE_SIZE]; | 
|---|
| 234 | }; | 
|---|
| 235 |  | 
|---|
| 236 | static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); | 
|---|
| 237 | static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = { | 
|---|
| 238 | .bh_lock = INIT_LOCAL_LOCK(bh_lock), | 
|---|
| 239 | }; | 
|---|
| 240 |  | 
|---|
| 241 | void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) | 
|---|
| 242 | { | 
|---|
| 243 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); | 
|---|
| 244 | void *data; | 
|---|
| 245 |  | 
|---|
| 246 | fragsz = SKB_DATA_ALIGN(fragsz); | 
|---|
| 247 |  | 
|---|
| 248 | local_lock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 249 | data = __page_frag_alloc_align(nc: &nc->page, fragsz, | 
|---|
| 250 | GFP_ATOMIC | __GFP_NOWARN, align_mask); | 
|---|
| 251 | local_unlock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 252 | return data; | 
|---|
| 253 |  | 
|---|
| 254 | } | 
|---|
| 255 | EXPORT_SYMBOL(__napi_alloc_frag_align); | 
|---|
| 256 |  | 
|---|
| 257 | void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) | 
|---|
| 258 | { | 
|---|
| 259 | void *data; | 
|---|
| 260 |  | 
|---|
| 261 | if (in_hardirq() || irqs_disabled()) { | 
|---|
| 262 | struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); | 
|---|
| 263 |  | 
|---|
| 264 | fragsz = SKB_DATA_ALIGN(fragsz); | 
|---|
| 265 | data = __page_frag_alloc_align(nc, fragsz, | 
|---|
| 266 | GFP_ATOMIC | __GFP_NOWARN, | 
|---|
| 267 | align_mask); | 
|---|
| 268 | } else { | 
|---|
| 269 | local_bh_disable(); | 
|---|
| 270 | data = __napi_alloc_frag_align(fragsz, align_mask); | 
|---|
| 271 | local_bh_enable(); | 
|---|
| 272 | } | 
|---|
| 273 | return data; | 
|---|
| 274 | } | 
|---|
| 275 | EXPORT_SYMBOL(__netdev_alloc_frag_align); | 
|---|
| 276 |  | 
|---|
| 277 | static struct sk_buff *napi_skb_cache_get(void) | 
|---|
| 278 | { | 
|---|
| 279 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); | 
|---|
| 280 | struct sk_buff *skb; | 
|---|
| 281 |  | 
|---|
| 282 | local_lock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 283 | if (unlikely(!nc->skb_count)) { | 
|---|
| 284 | nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, | 
|---|
| 285 | GFP_ATOMIC | __GFP_NOWARN, | 
|---|
| 286 | NAPI_SKB_CACHE_BULK, | 
|---|
| 287 | nc->skb_cache); | 
|---|
| 288 | if (unlikely(!nc->skb_count)) { | 
|---|
| 289 | local_unlock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 290 | return NULL; | 
|---|
| 291 | } | 
|---|
| 292 | } | 
|---|
| 293 |  | 
|---|
| 294 | skb = nc->skb_cache[--nc->skb_count]; | 
|---|
| 295 | local_unlock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 296 | kasan_mempool_unpoison_object(ptr: skb, size: kmem_cache_size(s: net_hotdata.skbuff_cache)); | 
|---|
| 297 |  | 
|---|
| 298 | return skb; | 
|---|
| 299 | } | 
|---|
| 300 |  | 
|---|
| 301 | /** | 
|---|
| 302 | * napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache | 
|---|
| 303 | * @skbs: pointer to an at least @n-sized array to fill with skb pointers | 
|---|
| 304 | * @n: number of entries to provide | 
|---|
| 305 | * | 
|---|
| 306 | * Tries to obtain @n &sk_buff entries from the NAPI percpu cache and writes | 
|---|
| 307 | * the pointers into the provided array @skbs. If there are less entries | 
|---|
| 308 | * available, tries to replenish the cache and bulk-allocates the diff from | 
|---|
| 309 | * the MM layer if needed. | 
|---|
| 310 | * The heads are being zeroed with either memset() or %__GFP_ZERO, so they are | 
|---|
| 311 | * ready for {,__}build_skb_around() and don't have any data buffers attached. | 
|---|
| 312 | * Must be called *only* from the BH context. | 
|---|
| 313 | * | 
|---|
| 314 | * Return: number of successfully allocated skbs (@n if no actual allocation | 
|---|
| 315 | *	   needed or kmem_cache_alloc_bulk() didn't fail). | 
|---|
| 316 | */ | 
|---|
| 317 | u32 napi_skb_cache_get_bulk(void **skbs, u32 n) | 
|---|
| 318 | { | 
|---|
| 319 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); | 
|---|
| 320 | u32 bulk, total = n; | 
|---|
| 321 |  | 
|---|
| 322 | local_lock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 323 |  | 
|---|
| 324 | if (nc->skb_count >= n) | 
|---|
| 325 | goto get; | 
|---|
| 326 |  | 
|---|
| 327 | /* No enough cached skbs. Try refilling the cache first */ | 
|---|
| 328 | bulk = min(NAPI_SKB_CACHE_SIZE - nc->skb_count, NAPI_SKB_CACHE_BULK); | 
|---|
| 329 | nc->skb_count += kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, | 
|---|
| 330 | GFP_ATOMIC | __GFP_NOWARN, bulk, | 
|---|
| 331 | &nc->skb_cache[nc->skb_count]); | 
|---|
| 332 | if (likely(nc->skb_count >= n)) | 
|---|
| 333 | goto get; | 
|---|
| 334 |  | 
|---|
| 335 | /* Still not enough. Bulk-allocate the missing part directly, zeroed */ | 
|---|
| 336 | n -= kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, | 
|---|
| 337 | GFP_ATOMIC | __GFP_ZERO | __GFP_NOWARN, | 
|---|
| 338 | n - nc->skb_count, &skbs[nc->skb_count]); | 
|---|
| 339 | if (likely(nc->skb_count >= n)) | 
|---|
| 340 | goto get; | 
|---|
| 341 |  | 
|---|
| 342 | /* kmem_cache didn't allocate the number we need, limit the output */ | 
|---|
| 343 | total -= n - nc->skb_count; | 
|---|
| 344 | n = nc->skb_count; | 
|---|
| 345 |  | 
|---|
| 346 | get: | 
|---|
| 347 | for (u32 base = nc->skb_count - n, i = 0; i < n; i++) { | 
|---|
| 348 | u32 cache_size = kmem_cache_size(s: net_hotdata.skbuff_cache); | 
|---|
| 349 |  | 
|---|
| 350 | skbs[i] = nc->skb_cache[base + i]; | 
|---|
| 351 |  | 
|---|
| 352 | kasan_mempool_unpoison_object(ptr: skbs[i], size: cache_size); | 
|---|
| 353 | memset(s: skbs[i], c: 0, offsetof(struct sk_buff, tail)); | 
|---|
| 354 | } | 
|---|
| 355 |  | 
|---|
| 356 | nc->skb_count -= n; | 
|---|
| 357 | local_unlock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 358 |  | 
|---|
| 359 | return total; | 
|---|
| 360 | } | 
|---|
| 361 | EXPORT_SYMBOL_GPL(napi_skb_cache_get_bulk); | 
|---|
| 362 |  | 
|---|
| 363 | static inline void __finalize_skb_around(struct sk_buff *skb, void *data, | 
|---|
| 364 | unsigned int size) | 
|---|
| 365 | { | 
|---|
| 366 | struct skb_shared_info *shinfo; | 
|---|
| 367 |  | 
|---|
| 368 | size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 
|---|
| 369 |  | 
|---|
| 370 | /* Assumes caller memset cleared SKB */ | 
|---|
| 371 | skb->truesize = SKB_TRUESIZE(size); | 
|---|
| 372 | refcount_set(r: &skb->users, n: 1); | 
|---|
| 373 | skb->head = data; | 
|---|
| 374 | skb->data = data; | 
|---|
| 375 | skb_reset_tail_pointer(skb); | 
|---|
| 376 | skb_set_end_offset(skb, offset: size); | 
|---|
| 377 | skb->mac_header = (typeof(skb->mac_header))~0U; | 
|---|
| 378 | skb->transport_header = (typeof(skb->transport_header))~0U; | 
|---|
| 379 | skb->alloc_cpu = raw_smp_processor_id(); | 
|---|
| 380 | /* make sure we initialize shinfo sequentially */ | 
|---|
| 381 | shinfo = skb_shinfo(skb); | 
|---|
| 382 | memset(s: shinfo, c: 0, offsetof(struct skb_shared_info, dataref)); | 
|---|
| 383 | atomic_set(v: &shinfo->dataref, i: 1); | 
|---|
| 384 |  | 
|---|
| 385 | skb_set_kcov_handle(skb, kcov_handle: kcov_common_handle()); | 
|---|
| 386 | } | 
|---|
| 387 |  | 
|---|
| 388 | static inline void *__slab_build_skb(void *data, unsigned int *size) | 
|---|
| 389 | { | 
|---|
| 390 | void *resized; | 
|---|
| 391 |  | 
|---|
| 392 | /* Must find the allocation size (and grow it to match). */ | 
|---|
| 393 | *size = ksize(objp: data); | 
|---|
| 394 | /* krealloc() will immediately return "data" when | 
|---|
| 395 | * "ksize(data)" is requested: it is the existing upper | 
|---|
| 396 | * bounds. As a result, GFP_ATOMIC will be ignored. Note | 
|---|
| 397 | * that this "new" pointer needs to be passed back to the | 
|---|
| 398 | * caller for use so the __alloc_size hinting will be | 
|---|
| 399 | * tracked correctly. | 
|---|
| 400 | */ | 
|---|
| 401 | resized = krealloc(data, *size, GFP_ATOMIC); | 
|---|
| 402 | WARN_ON_ONCE(resized != data); | 
|---|
| 403 | return resized; | 
|---|
| 404 | } | 
|---|
| 405 |  | 
|---|
| 406 | /* build_skb() variant which can operate on slab buffers. | 
|---|
| 407 | * Note that this should be used sparingly as slab buffers | 
|---|
| 408 | * cannot be combined efficiently by GRO! | 
|---|
| 409 | */ | 
|---|
| 410 | struct sk_buff *slab_build_skb(void *data) | 
|---|
| 411 | { | 
|---|
| 412 | struct sk_buff *skb; | 
|---|
| 413 | unsigned int size; | 
|---|
| 414 |  | 
|---|
| 415 | skb = kmem_cache_alloc(net_hotdata.skbuff_cache, | 
|---|
| 416 | GFP_ATOMIC | __GFP_NOWARN); | 
|---|
| 417 | if (unlikely(!skb)) | 
|---|
| 418 | return NULL; | 
|---|
| 419 |  | 
|---|
| 420 | memset(s: skb, c: 0, offsetof(struct sk_buff, tail)); | 
|---|
| 421 | data = __slab_build_skb(data, size: &size); | 
|---|
| 422 | __finalize_skb_around(skb, data, size); | 
|---|
| 423 |  | 
|---|
| 424 | return skb; | 
|---|
| 425 | } | 
|---|
| 426 | EXPORT_SYMBOL(slab_build_skb); | 
|---|
| 427 |  | 
|---|
| 428 | /* Caller must provide SKB that is memset cleared */ | 
|---|
| 429 | static void __build_skb_around(struct sk_buff *skb, void *data, | 
|---|
| 430 | unsigned int frag_size) | 
|---|
| 431 | { | 
|---|
| 432 | unsigned int size = frag_size; | 
|---|
| 433 |  | 
|---|
| 434 | /* frag_size == 0 is considered deprecated now. Callers | 
|---|
| 435 | * using slab buffer should use slab_build_skb() instead. | 
|---|
| 436 | */ | 
|---|
| 437 | if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) | 
|---|
| 438 | data = __slab_build_skb(data, size: &size); | 
|---|
| 439 |  | 
|---|
| 440 | __finalize_skb_around(skb, data, size); | 
|---|
| 441 | } | 
|---|
| 442 |  | 
|---|
| 443 | /** | 
|---|
| 444 | * __build_skb - build a network buffer | 
|---|
| 445 | * @data: data buffer provided by caller | 
|---|
| 446 | * @frag_size: size of data (must not be 0) | 
|---|
| 447 | * | 
|---|
| 448 | * Allocate a new &sk_buff. Caller provides space holding head and | 
|---|
| 449 | * skb_shared_info. @data must have been allocated from the page | 
|---|
| 450 | * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc() | 
|---|
| 451 | * allocation is deprecated, and callers should use slab_build_skb() | 
|---|
| 452 | * instead.) | 
|---|
| 453 | * The return is the new skb buffer. | 
|---|
| 454 | * On a failure the return is %NULL, and @data is not freed. | 
|---|
| 455 | * Notes : | 
|---|
| 456 | *  Before IO, driver allocates only data buffer where NIC put incoming frame | 
|---|
| 457 | *  Driver should add room at head (NET_SKB_PAD) and | 
|---|
| 458 | *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) | 
|---|
| 459 | *  After IO, driver calls build_skb(), to allocate sk_buff and populate it | 
|---|
| 460 | *  before giving packet to stack. | 
|---|
| 461 | *  RX rings only contains data buffers, not full skbs. | 
|---|
| 462 | */ | 
|---|
| 463 | struct sk_buff *__build_skb(void *data, unsigned int frag_size) | 
|---|
| 464 | { | 
|---|
| 465 | struct sk_buff *skb; | 
|---|
| 466 |  | 
|---|
| 467 | skb = kmem_cache_alloc(net_hotdata.skbuff_cache, | 
|---|
| 468 | GFP_ATOMIC | __GFP_NOWARN); | 
|---|
| 469 | if (unlikely(!skb)) | 
|---|
| 470 | return NULL; | 
|---|
| 471 |  | 
|---|
| 472 | memset(s: skb, c: 0, offsetof(struct sk_buff, tail)); | 
|---|
| 473 | __build_skb_around(skb, data, frag_size); | 
|---|
| 474 |  | 
|---|
| 475 | return skb; | 
|---|
| 476 | } | 
|---|
| 477 |  | 
|---|
| 478 | /* build_skb() is wrapper over __build_skb(), that specifically | 
|---|
| 479 | * takes care of skb->head and skb->pfmemalloc | 
|---|
| 480 | */ | 
|---|
| 481 | struct sk_buff *build_skb(void *data, unsigned int frag_size) | 
|---|
| 482 | { | 
|---|
| 483 | struct sk_buff *skb = __build_skb(data, frag_size); | 
|---|
| 484 |  | 
|---|
| 485 | if (likely(skb && frag_size)) { | 
|---|
| 486 | skb->head_frag = 1; | 
|---|
| 487 | skb_propagate_pfmemalloc(page: virt_to_head_page(x: data), skb); | 
|---|
| 488 | } | 
|---|
| 489 | return skb; | 
|---|
| 490 | } | 
|---|
| 491 | EXPORT_SYMBOL(build_skb); | 
|---|
| 492 |  | 
|---|
| 493 | /** | 
|---|
| 494 | * build_skb_around - build a network buffer around provided skb | 
|---|
| 495 | * @skb: sk_buff provide by caller, must be memset cleared | 
|---|
| 496 | * @data: data buffer provided by caller | 
|---|
| 497 | * @frag_size: size of data | 
|---|
| 498 | */ | 
|---|
| 499 | struct sk_buff *build_skb_around(struct sk_buff *skb, | 
|---|
| 500 | void *data, unsigned int frag_size) | 
|---|
| 501 | { | 
|---|
| 502 | if (unlikely(!skb)) | 
|---|
| 503 | return NULL; | 
|---|
| 504 |  | 
|---|
| 505 | __build_skb_around(skb, data, frag_size); | 
|---|
| 506 |  | 
|---|
| 507 | if (frag_size) { | 
|---|
| 508 | skb->head_frag = 1; | 
|---|
| 509 | skb_propagate_pfmemalloc(page: virt_to_head_page(x: data), skb); | 
|---|
| 510 | } | 
|---|
| 511 | return skb; | 
|---|
| 512 | } | 
|---|
| 513 | EXPORT_SYMBOL(build_skb_around); | 
|---|
| 514 |  | 
|---|
| 515 | /** | 
|---|
| 516 | * __napi_build_skb - build a network buffer | 
|---|
| 517 | * @data: data buffer provided by caller | 
|---|
| 518 | * @frag_size: size of data | 
|---|
| 519 | * | 
|---|
| 520 | * Version of __build_skb() that uses NAPI percpu caches to obtain | 
|---|
| 521 | * skbuff_head instead of inplace allocation. | 
|---|
| 522 | * | 
|---|
| 523 | * Returns a new &sk_buff on success, %NULL on allocation failure. | 
|---|
| 524 | */ | 
|---|
| 525 | static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) | 
|---|
| 526 | { | 
|---|
| 527 | struct sk_buff *skb; | 
|---|
| 528 |  | 
|---|
| 529 | skb = napi_skb_cache_get(); | 
|---|
| 530 | if (unlikely(!skb)) | 
|---|
| 531 | return NULL; | 
|---|
| 532 |  | 
|---|
| 533 | memset(s: skb, c: 0, offsetof(struct sk_buff, tail)); | 
|---|
| 534 | __build_skb_around(skb, data, frag_size); | 
|---|
| 535 |  | 
|---|
| 536 | return skb; | 
|---|
| 537 | } | 
|---|
| 538 |  | 
|---|
| 539 | /** | 
|---|
| 540 | * napi_build_skb - build a network buffer | 
|---|
| 541 | * @data: data buffer provided by caller | 
|---|
| 542 | * @frag_size: size of data | 
|---|
| 543 | * | 
|---|
| 544 | * Version of __napi_build_skb() that takes care of skb->head_frag | 
|---|
| 545 | * and skb->pfmemalloc when the data is a page or page fragment. | 
|---|
| 546 | * | 
|---|
| 547 | * Returns a new &sk_buff on success, %NULL on allocation failure. | 
|---|
| 548 | */ | 
|---|
| 549 | struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) | 
|---|
| 550 | { | 
|---|
| 551 | struct sk_buff *skb = __napi_build_skb(data, frag_size); | 
|---|
| 552 |  | 
|---|
| 553 | if (likely(skb) && frag_size) { | 
|---|
| 554 | skb->head_frag = 1; | 
|---|
| 555 | skb_propagate_pfmemalloc(page: virt_to_head_page(x: data), skb); | 
|---|
| 556 | } | 
|---|
| 557 |  | 
|---|
| 558 | return skb; | 
|---|
| 559 | } | 
|---|
| 560 | EXPORT_SYMBOL(napi_build_skb); | 
|---|
| 561 |  | 
|---|
| 562 | /* | 
|---|
| 563 | * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells | 
|---|
| 564 | * the caller if emergency pfmemalloc reserves are being used. If it is and | 
|---|
| 565 | * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves | 
|---|
| 566 | * may be used. Otherwise, the packet data may be discarded until enough | 
|---|
| 567 | * memory is free | 
|---|
| 568 | */ | 
|---|
| 569 | static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, | 
|---|
| 570 | bool *pfmemalloc) | 
|---|
| 571 | { | 
|---|
| 572 | bool ret_pfmemalloc = false; | 
|---|
| 573 | size_t obj_size; | 
|---|
| 574 | void *obj; | 
|---|
| 575 |  | 
|---|
| 576 | obj_size = SKB_HEAD_ALIGN(*size); | 
|---|
| 577 | if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && | 
|---|
| 578 | !(flags & KMALLOC_NOT_NORMAL_BITS)) { | 
|---|
| 579 | obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, | 
|---|
| 580 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, | 
|---|
| 581 | node); | 
|---|
| 582 | *size = SKB_SMALL_HEAD_CACHE_SIZE; | 
|---|
| 583 | if (obj || !(gfp_pfmemalloc_allowed(gfp_mask: flags))) | 
|---|
| 584 | goto out; | 
|---|
| 585 | /* Try again but now we are using pfmemalloc reserves */ | 
|---|
| 586 | ret_pfmemalloc = true; | 
|---|
| 587 | obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node); | 
|---|
| 588 | goto out; | 
|---|
| 589 | } | 
|---|
| 590 |  | 
|---|
| 591 | obj_size = kmalloc_size_roundup(size: obj_size); | 
|---|
| 592 | /* The following cast might truncate high-order bits of obj_size, this | 
|---|
| 593 | * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. | 
|---|
| 594 | */ | 
|---|
| 595 | *size = (unsigned int)obj_size; | 
|---|
| 596 |  | 
|---|
| 597 | /* | 
|---|
| 598 | * Try a regular allocation, when that fails and we're not entitled | 
|---|
| 599 | * to the reserves, fail. | 
|---|
| 600 | */ | 
|---|
| 601 | obj = kmalloc_node_track_caller(obj_size, | 
|---|
| 602 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, | 
|---|
| 603 | node); | 
|---|
| 604 | if (obj || !(gfp_pfmemalloc_allowed(gfp_mask: flags))) | 
|---|
| 605 | goto out; | 
|---|
| 606 |  | 
|---|
| 607 | /* Try again but now we are using pfmemalloc reserves */ | 
|---|
| 608 | ret_pfmemalloc = true; | 
|---|
| 609 | obj = kmalloc_node_track_caller(obj_size, flags, node); | 
|---|
| 610 |  | 
|---|
| 611 | out: | 
|---|
| 612 | if (pfmemalloc) | 
|---|
| 613 | *pfmemalloc = ret_pfmemalloc; | 
|---|
| 614 |  | 
|---|
| 615 | return obj; | 
|---|
| 616 | } | 
|---|
| 617 |  | 
|---|
| 618 | /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few | 
|---|
| 619 | *	'private' fields and also do memory statistics to find all the | 
|---|
| 620 | *	[BEEP] leaks. | 
|---|
| 621 | * | 
|---|
| 622 | */ | 
|---|
| 623 |  | 
|---|
| 624 | /** | 
|---|
| 625 | *	__alloc_skb	-	allocate a network buffer | 
|---|
| 626 | *	@size: size to allocate | 
|---|
| 627 | *	@gfp_mask: allocation mask | 
|---|
| 628 | *	@flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache | 
|---|
| 629 | *		instead of head cache and allocate a cloned (child) skb. | 
|---|
| 630 | *		If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for | 
|---|
| 631 | *		allocations in case the data is required for writeback | 
|---|
| 632 | *	@node: numa node to allocate memory on | 
|---|
| 633 | * | 
|---|
| 634 | *	Allocate a new &sk_buff. The returned buffer has no headroom and a | 
|---|
| 635 | *	tail room of at least size bytes. The object has a reference count | 
|---|
| 636 | *	of one. The return is the buffer. On a failure the return is %NULL. | 
|---|
| 637 | * | 
|---|
| 638 | *	Buffers may only be allocated from interrupts using a @gfp_mask of | 
|---|
| 639 | *	%GFP_ATOMIC. | 
|---|
| 640 | */ | 
|---|
| 641 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | 
|---|
| 642 | int flags, int node) | 
|---|
| 643 | { | 
|---|
| 644 | struct kmem_cache *cache; | 
|---|
| 645 | struct sk_buff *skb; | 
|---|
| 646 | bool pfmemalloc; | 
|---|
| 647 | u8 *data; | 
|---|
| 648 |  | 
|---|
| 649 | cache = (flags & SKB_ALLOC_FCLONE) | 
|---|
| 650 | ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; | 
|---|
| 651 |  | 
|---|
| 652 | if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) | 
|---|
| 653 | gfp_mask |= __GFP_MEMALLOC; | 
|---|
| 654 |  | 
|---|
| 655 | /* Get the HEAD */ | 
|---|
| 656 | if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && | 
|---|
| 657 | likely(node == NUMA_NO_NODE || node == numa_mem_id())) | 
|---|
| 658 | skb = napi_skb_cache_get(); | 
|---|
| 659 | else | 
|---|
| 660 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); | 
|---|
| 661 | if (unlikely(!skb)) | 
|---|
| 662 | return NULL; | 
|---|
| 663 | prefetchw(x: skb); | 
|---|
| 664 |  | 
|---|
| 665 | /* We do our best to align skb_shared_info on a separate cache | 
|---|
| 666 | * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives | 
|---|
| 667 | * aligned memory blocks, unless SLUB/SLAB debug is enabled. | 
|---|
| 668 | * Both skb->head and skb_shared_info are cache line aligned. | 
|---|
| 669 | */ | 
|---|
| 670 | data = kmalloc_reserve(size: &size, flags: gfp_mask, node, pfmemalloc: &pfmemalloc); | 
|---|
| 671 | if (unlikely(!data)) | 
|---|
| 672 | goto nodata; | 
|---|
| 673 | /* kmalloc_size_roundup() might give us more room than requested. | 
|---|
| 674 | * Put skb_shared_info exactly at the end of allocated zone, | 
|---|
| 675 | * to allow max possible filling before reallocation. | 
|---|
| 676 | */ | 
|---|
| 677 | prefetchw(x: data + SKB_WITH_OVERHEAD(size)); | 
|---|
| 678 |  | 
|---|
| 679 | /* | 
|---|
| 680 | * Only clear those fields we need to clear, not those that we will | 
|---|
| 681 | * actually initialise below. Hence, don't put any more fields after | 
|---|
| 682 | * the tail pointer in struct sk_buff! | 
|---|
| 683 | */ | 
|---|
| 684 | memset(s: skb, c: 0, offsetof(struct sk_buff, tail)); | 
|---|
| 685 | __build_skb_around(skb, data, frag_size: size); | 
|---|
| 686 | skb->pfmemalloc = pfmemalloc; | 
|---|
| 687 |  | 
|---|
| 688 | if (flags & SKB_ALLOC_FCLONE) { | 
|---|
| 689 | struct sk_buff_fclones *fclones; | 
|---|
| 690 |  | 
|---|
| 691 | fclones = container_of(skb, struct sk_buff_fclones, skb1); | 
|---|
| 692 |  | 
|---|
| 693 | skb->fclone = SKB_FCLONE_ORIG; | 
|---|
| 694 | refcount_set(r: &fclones->fclone_ref, n: 1); | 
|---|
| 695 | } | 
|---|
| 696 |  | 
|---|
| 697 | return skb; | 
|---|
| 698 |  | 
|---|
| 699 | nodata: | 
|---|
| 700 | kmem_cache_free(s: cache, objp: skb); | 
|---|
| 701 | return NULL; | 
|---|
| 702 | } | 
|---|
| 703 | EXPORT_SYMBOL(__alloc_skb); | 
|---|
| 704 |  | 
|---|
| 705 | /** | 
|---|
| 706 | *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device | 
|---|
| 707 | *	@dev: network device to receive on | 
|---|
| 708 | *	@len: length to allocate | 
|---|
| 709 | *	@gfp_mask: get_free_pages mask, passed to alloc_skb | 
|---|
| 710 | * | 
|---|
| 711 | *	Allocate a new &sk_buff and assign it a usage count of one. The | 
|---|
| 712 | *	buffer has NET_SKB_PAD headroom built in. Users should allocate | 
|---|
| 713 | *	the headroom they think they need without accounting for the | 
|---|
| 714 | *	built in space. The built in space is used for optimisations. | 
|---|
| 715 | * | 
|---|
| 716 | *	%NULL is returned if there is no free memory. | 
|---|
| 717 | */ | 
|---|
| 718 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, | 
|---|
| 719 | gfp_t gfp_mask) | 
|---|
| 720 | { | 
|---|
| 721 | struct page_frag_cache *nc; | 
|---|
| 722 | struct sk_buff *skb; | 
|---|
| 723 | bool pfmemalloc; | 
|---|
| 724 | void *data; | 
|---|
| 725 |  | 
|---|
| 726 | len += NET_SKB_PAD; | 
|---|
| 727 |  | 
|---|
| 728 | /* If requested length is either too small or too big, | 
|---|
| 729 | * we use kmalloc() for skb->head allocation. | 
|---|
| 730 | */ | 
|---|
| 731 | if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) || | 
|---|
| 732 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || | 
|---|
| 733 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { | 
|---|
| 734 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); | 
|---|
| 735 | if (!skb) | 
|---|
| 736 | goto skb_fail; | 
|---|
| 737 | goto skb_success; | 
|---|
| 738 | } | 
|---|
| 739 |  | 
|---|
| 740 | len = SKB_HEAD_ALIGN(len); | 
|---|
| 741 |  | 
|---|
| 742 | if (sk_memalloc_socks()) | 
|---|
| 743 | gfp_mask |= __GFP_MEMALLOC; | 
|---|
| 744 |  | 
|---|
| 745 | if (in_hardirq() || irqs_disabled()) { | 
|---|
| 746 | nc = this_cpu_ptr(&netdev_alloc_cache); | 
|---|
| 747 | data = page_frag_alloc(nc, fragsz: len, gfp_mask); | 
|---|
| 748 | pfmemalloc = page_frag_cache_is_pfmemalloc(nc); | 
|---|
| 749 | } else { | 
|---|
| 750 | local_bh_disable(); | 
|---|
| 751 | local_lock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 752 |  | 
|---|
| 753 | nc = this_cpu_ptr(&napi_alloc_cache.page); | 
|---|
| 754 | data = page_frag_alloc(nc, fragsz: len, gfp_mask); | 
|---|
| 755 | pfmemalloc = page_frag_cache_is_pfmemalloc(nc); | 
|---|
| 756 |  | 
|---|
| 757 | local_unlock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 758 | local_bh_enable(); | 
|---|
| 759 | } | 
|---|
| 760 |  | 
|---|
| 761 | if (unlikely(!data)) | 
|---|
| 762 | return NULL; | 
|---|
| 763 |  | 
|---|
| 764 | skb = __build_skb(data, frag_size: len); | 
|---|
| 765 | if (unlikely(!skb)) { | 
|---|
| 766 | skb_free_frag(addr: data); | 
|---|
| 767 | return NULL; | 
|---|
| 768 | } | 
|---|
| 769 |  | 
|---|
| 770 | if (pfmemalloc) | 
|---|
| 771 | skb->pfmemalloc = 1; | 
|---|
| 772 | skb->head_frag = 1; | 
|---|
| 773 |  | 
|---|
| 774 | skb_success: | 
|---|
| 775 | skb_reserve(skb, NET_SKB_PAD); | 
|---|
| 776 | skb->dev = dev; | 
|---|
| 777 |  | 
|---|
| 778 | skb_fail: | 
|---|
| 779 | return skb; | 
|---|
| 780 | } | 
|---|
| 781 | EXPORT_SYMBOL(__netdev_alloc_skb); | 
|---|
| 782 |  | 
|---|
| 783 | /** | 
|---|
| 784 | *	napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance | 
|---|
| 785 | *	@napi: napi instance this buffer was allocated for | 
|---|
| 786 | *	@len: length to allocate | 
|---|
| 787 | * | 
|---|
| 788 | *	Allocate a new sk_buff for use in NAPI receive.  This buffer will | 
|---|
| 789 | *	attempt to allocate the head from a special reserved region used | 
|---|
| 790 | *	only for NAPI Rx allocation.  By doing this we can save several | 
|---|
| 791 | *	CPU cycles by avoiding having to disable and re-enable IRQs. | 
|---|
| 792 | * | 
|---|
| 793 | *	%NULL is returned if there is no free memory. | 
|---|
| 794 | */ | 
|---|
| 795 | struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) | 
|---|
| 796 | { | 
|---|
| 797 | gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN; | 
|---|
| 798 | struct napi_alloc_cache *nc; | 
|---|
| 799 | struct sk_buff *skb; | 
|---|
| 800 | bool pfmemalloc; | 
|---|
| 801 | void *data; | 
|---|
| 802 |  | 
|---|
| 803 | DEBUG_NET_WARN_ON_ONCE(!in_softirq()); | 
|---|
| 804 | len += NET_SKB_PAD + NET_IP_ALIGN; | 
|---|
| 805 |  | 
|---|
| 806 | /* If requested length is either too small or too big, | 
|---|
| 807 | * we use kmalloc() for skb->head allocation. | 
|---|
| 808 | */ | 
|---|
| 809 | if (len <= SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) || | 
|---|
| 810 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || | 
|---|
| 811 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { | 
|---|
| 812 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, | 
|---|
| 813 | NUMA_NO_NODE); | 
|---|
| 814 | if (!skb) | 
|---|
| 815 | goto skb_fail; | 
|---|
| 816 | goto skb_success; | 
|---|
| 817 | } | 
|---|
| 818 |  | 
|---|
| 819 | len = SKB_HEAD_ALIGN(len); | 
|---|
| 820 |  | 
|---|
| 821 | if (sk_memalloc_socks()) | 
|---|
| 822 | gfp_mask |= __GFP_MEMALLOC; | 
|---|
| 823 |  | 
|---|
| 824 | local_lock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 825 | nc = this_cpu_ptr(&napi_alloc_cache); | 
|---|
| 826 |  | 
|---|
| 827 | data = page_frag_alloc(nc: &nc->page, fragsz: len, gfp_mask); | 
|---|
| 828 | pfmemalloc = page_frag_cache_is_pfmemalloc(nc: &nc->page); | 
|---|
| 829 | local_unlock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 830 |  | 
|---|
| 831 | if (unlikely(!data)) | 
|---|
| 832 | return NULL; | 
|---|
| 833 |  | 
|---|
| 834 | skb = __napi_build_skb(data, frag_size: len); | 
|---|
| 835 | if (unlikely(!skb)) { | 
|---|
| 836 | skb_free_frag(addr: data); | 
|---|
| 837 | return NULL; | 
|---|
| 838 | } | 
|---|
| 839 |  | 
|---|
| 840 | if (pfmemalloc) | 
|---|
| 841 | skb->pfmemalloc = 1; | 
|---|
| 842 | skb->head_frag = 1; | 
|---|
| 843 |  | 
|---|
| 844 | skb_success: | 
|---|
| 845 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | 
|---|
| 846 | skb->dev = napi->dev; | 
|---|
| 847 |  | 
|---|
| 848 | skb_fail: | 
|---|
| 849 | return skb; | 
|---|
| 850 | } | 
|---|
| 851 | EXPORT_SYMBOL(napi_alloc_skb); | 
|---|
| 852 |  | 
|---|
| 853 | void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, | 
|---|
| 854 | int off, int size, unsigned int truesize) | 
|---|
| 855 | { | 
|---|
| 856 | DEBUG_NET_WARN_ON_ONCE(size > truesize); | 
|---|
| 857 |  | 
|---|
| 858 | skb_fill_netmem_desc(skb, i, netmem, off, size); | 
|---|
| 859 | skb->len += size; | 
|---|
| 860 | skb->data_len += size; | 
|---|
| 861 | skb->truesize += truesize; | 
|---|
| 862 | } | 
|---|
| 863 | EXPORT_SYMBOL(skb_add_rx_frag_netmem); | 
|---|
| 864 |  | 
|---|
| 865 | void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, | 
|---|
| 866 | unsigned int truesize) | 
|---|
| 867 | { | 
|---|
| 868 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
|---|
| 869 |  | 
|---|
| 870 | DEBUG_NET_WARN_ON_ONCE(size > truesize); | 
|---|
| 871 |  | 
|---|
| 872 | skb_frag_size_add(frag, delta: size); | 
|---|
| 873 | skb->len += size; | 
|---|
| 874 | skb->data_len += size; | 
|---|
| 875 | skb->truesize += truesize; | 
|---|
| 876 | } | 
|---|
| 877 | EXPORT_SYMBOL(skb_coalesce_rx_frag); | 
|---|
| 878 |  | 
|---|
| 879 | static void skb_drop_list(struct sk_buff **listp) | 
|---|
| 880 | { | 
|---|
| 881 | kfree_skb_list(segs: *listp); | 
|---|
| 882 | *listp = NULL; | 
|---|
| 883 | } | 
|---|
| 884 |  | 
|---|
| 885 | static inline void skb_drop_fraglist(struct sk_buff *skb) | 
|---|
| 886 | { | 
|---|
| 887 | skb_drop_list(listp: &skb_shinfo(skb)->frag_list); | 
|---|
| 888 | } | 
|---|
| 889 |  | 
|---|
| 890 | static void skb_clone_fraglist(struct sk_buff *skb) | 
|---|
| 891 | { | 
|---|
| 892 | struct sk_buff *list; | 
|---|
| 893 |  | 
|---|
| 894 | skb_walk_frags(skb, list) | 
|---|
| 895 | skb_get(skb: list); | 
|---|
| 896 | } | 
|---|
| 897 |  | 
|---|
| 898 | int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, | 
|---|
| 899 | unsigned int headroom) | 
|---|
| 900 | { | 
|---|
| 901 | #if IS_ENABLED(CONFIG_PAGE_POOL) | 
|---|
| 902 | u32 size, truesize, len, max_head_size, off; | 
|---|
| 903 | struct sk_buff *skb = *pskb, *nskb; | 
|---|
| 904 | int err, i, head_off; | 
|---|
| 905 | void *data; | 
|---|
| 906 |  | 
|---|
| 907 | /* XDP does not support fraglist so we need to linearize | 
|---|
| 908 | * the skb. | 
|---|
| 909 | */ | 
|---|
| 910 | if (skb_has_frag_list(skb)) | 
|---|
| 911 | return -EOPNOTSUPP; | 
|---|
| 912 |  | 
|---|
| 913 | max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); | 
|---|
| 914 | if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) | 
|---|
| 915 | return -ENOMEM; | 
|---|
| 916 |  | 
|---|
| 917 | size = min_t(u32, skb->len, max_head_size); | 
|---|
| 918 | truesize = SKB_HEAD_ALIGN(size) + headroom; | 
|---|
| 919 | data = page_pool_dev_alloc_va(pool, &truesize); | 
|---|
| 920 | if (!data) | 
|---|
| 921 | return -ENOMEM; | 
|---|
| 922 |  | 
|---|
| 923 | nskb = napi_build_skb(data, truesize); | 
|---|
| 924 | if (!nskb) { | 
|---|
| 925 | page_pool_free_va(pool, data, true); | 
|---|
| 926 | return -ENOMEM; | 
|---|
| 927 | } | 
|---|
| 928 |  | 
|---|
| 929 | skb_reserve(nskb, headroom); | 
|---|
| 930 | skb_copy_header(nskb, skb); | 
|---|
| 931 | skb_mark_for_recycle(nskb); | 
|---|
| 932 |  | 
|---|
| 933 | err = skb_copy_bits(skb, 0, nskb->data, size); | 
|---|
| 934 | if (err) { | 
|---|
| 935 | consume_skb(nskb); | 
|---|
| 936 | return err; | 
|---|
| 937 | } | 
|---|
| 938 | skb_put(nskb, size); | 
|---|
| 939 |  | 
|---|
| 940 | head_off = skb_headroom(nskb) - skb_headroom(skb); | 
|---|
| 941 | skb_headers_offset_update(nskb, head_off); | 
|---|
| 942 |  | 
|---|
| 943 | off = size; | 
|---|
| 944 | len = skb->len - off; | 
|---|
| 945 | for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { | 
|---|
| 946 | struct page *page; | 
|---|
| 947 | u32 page_off; | 
|---|
| 948 |  | 
|---|
| 949 | size = min_t(u32, len, PAGE_SIZE); | 
|---|
| 950 | truesize = size; | 
|---|
| 951 |  | 
|---|
| 952 | page = page_pool_dev_alloc(pool, &page_off, &truesize); | 
|---|
| 953 | if (!page) { | 
|---|
| 954 | consume_skb(nskb); | 
|---|
| 955 | return -ENOMEM; | 
|---|
| 956 | } | 
|---|
| 957 |  | 
|---|
| 958 | skb_add_rx_frag(nskb, i, page, page_off, size, truesize); | 
|---|
| 959 | err = skb_copy_bits(skb, off, page_address(page) + page_off, | 
|---|
| 960 | size); | 
|---|
| 961 | if (err) { | 
|---|
| 962 | consume_skb(nskb); | 
|---|
| 963 | return err; | 
|---|
| 964 | } | 
|---|
| 965 |  | 
|---|
| 966 | len -= size; | 
|---|
| 967 | off += size; | 
|---|
| 968 | } | 
|---|
| 969 |  | 
|---|
| 970 | consume_skb(skb); | 
|---|
| 971 | *pskb = nskb; | 
|---|
| 972 |  | 
|---|
| 973 | return 0; | 
|---|
| 974 | #else | 
|---|
| 975 | return -EOPNOTSUPP; | 
|---|
| 976 | #endif | 
|---|
| 977 | } | 
|---|
| 978 | EXPORT_SYMBOL(skb_pp_cow_data); | 
|---|
| 979 |  | 
|---|
| 980 | int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, | 
|---|
| 981 | const struct bpf_prog *prog) | 
|---|
| 982 | { | 
|---|
| 983 | if (!prog->aux->xdp_has_frags) | 
|---|
| 984 | return -EINVAL; | 
|---|
| 985 |  | 
|---|
| 986 | return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM); | 
|---|
| 987 | } | 
|---|
| 988 | EXPORT_SYMBOL(skb_cow_data_for_xdp); | 
|---|
| 989 |  | 
|---|
| 990 | #if IS_ENABLED(CONFIG_PAGE_POOL) | 
|---|
| 991 | bool napi_pp_put_page(netmem_ref netmem) | 
|---|
| 992 | { | 
|---|
| 993 | netmem = netmem_compound_head(netmem); | 
|---|
| 994 |  | 
|---|
| 995 | if (unlikely(!netmem_is_pp(netmem))) | 
|---|
| 996 | return false; | 
|---|
| 997 |  | 
|---|
| 998 | page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false); | 
|---|
| 999 |  | 
|---|
| 1000 | return true; | 
|---|
| 1001 | } | 
|---|
| 1002 | EXPORT_SYMBOL(napi_pp_put_page); | 
|---|
| 1003 | #endif | 
|---|
| 1004 |  | 
|---|
| 1005 | static bool skb_pp_recycle(struct sk_buff *skb, void *data) | 
|---|
| 1006 | { | 
|---|
| 1007 | if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) | 
|---|
| 1008 | return false; | 
|---|
| 1009 | return napi_pp_put_page(page_to_netmem(virt_to_page(data))); | 
|---|
| 1010 | } | 
|---|
| 1011 |  | 
|---|
| 1012 | /** | 
|---|
| 1013 | * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb | 
|---|
| 1014 | * @skb:	page pool aware skb | 
|---|
| 1015 | * | 
|---|
| 1016 | * Increase the fragment reference count (pp_ref_count) of a skb. This is | 
|---|
| 1017 | * intended to gain fragment references only for page pool aware skbs, | 
|---|
| 1018 | * i.e. when skb->pp_recycle is true, and not for fragments in a | 
|---|
| 1019 | * non-pp-recycling skb. It has a fallback to increase references on normal | 
|---|
| 1020 | * pages, as page pool aware skbs may also have normal page fragments. | 
|---|
| 1021 | */ | 
|---|
| 1022 | static int skb_pp_frag_ref(struct sk_buff *skb) | 
|---|
| 1023 | { | 
|---|
| 1024 | struct skb_shared_info *shinfo; | 
|---|
| 1025 | netmem_ref head_netmem; | 
|---|
| 1026 | int i; | 
|---|
| 1027 |  | 
|---|
| 1028 | if (!skb->pp_recycle) | 
|---|
| 1029 | return -EINVAL; | 
|---|
| 1030 |  | 
|---|
| 1031 | shinfo = skb_shinfo(skb); | 
|---|
| 1032 |  | 
|---|
| 1033 | for (i = 0; i < shinfo->nr_frags; i++) { | 
|---|
| 1034 | head_netmem = netmem_compound_head(netmem: shinfo->frags[i].netmem); | 
|---|
| 1035 | if (likely(netmem_is_pp(head_netmem))) | 
|---|
| 1036 | page_pool_ref_netmem(netmem: head_netmem); | 
|---|
| 1037 | else | 
|---|
| 1038 | page_ref_inc(page: netmem_to_page(netmem: head_netmem)); | 
|---|
| 1039 | } | 
|---|
| 1040 | return 0; | 
|---|
| 1041 | } | 
|---|
| 1042 |  | 
|---|
| 1043 | static void skb_kfree_head(void *head, unsigned int end_offset) | 
|---|
| 1044 | { | 
|---|
| 1045 | if (end_offset == SKB_SMALL_HEAD_HEADROOM) | 
|---|
| 1046 | kmem_cache_free(s: net_hotdata.skb_small_head_cache, objp: head); | 
|---|
| 1047 | else | 
|---|
| 1048 | kfree(objp: head); | 
|---|
| 1049 | } | 
|---|
| 1050 |  | 
|---|
| 1051 | static void skb_free_head(struct sk_buff *skb) | 
|---|
| 1052 | { | 
|---|
| 1053 | unsigned char *head = skb->head; | 
|---|
| 1054 |  | 
|---|
| 1055 | if (skb->head_frag) { | 
|---|
| 1056 | if (skb_pp_recycle(skb, data: head)) | 
|---|
| 1057 | return; | 
|---|
| 1058 | skb_free_frag(addr: head); | 
|---|
| 1059 | } else { | 
|---|
| 1060 | skb_kfree_head(head, end_offset: skb_end_offset(skb)); | 
|---|
| 1061 | } | 
|---|
| 1062 | } | 
|---|
| 1063 |  | 
|---|
| 1064 | static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) | 
|---|
| 1065 | { | 
|---|
| 1066 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 
|---|
| 1067 | int i; | 
|---|
| 1068 |  | 
|---|
| 1069 | if (!skb_data_unref(skb, shinfo)) | 
|---|
| 1070 | goto exit; | 
|---|
| 1071 |  | 
|---|
| 1072 | if (skb_zcopy(skb)) { | 
|---|
| 1073 | bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; | 
|---|
| 1074 |  | 
|---|
| 1075 | skb_zcopy_clear(skb, zerocopy_success: true); | 
|---|
| 1076 | if (skip_unref) | 
|---|
| 1077 | goto free_head; | 
|---|
| 1078 | } | 
|---|
| 1079 |  | 
|---|
| 1080 | for (i = 0; i < shinfo->nr_frags; i++) | 
|---|
| 1081 | __skb_frag_unref(frag: &shinfo->frags[i], recycle: skb->pp_recycle); | 
|---|
| 1082 |  | 
|---|
| 1083 | free_head: | 
|---|
| 1084 | if (shinfo->frag_list) | 
|---|
| 1085 | kfree_skb_list_reason(segs: shinfo->frag_list, reason); | 
|---|
| 1086 |  | 
|---|
| 1087 | skb_free_head(skb); | 
|---|
| 1088 | exit: | 
|---|
| 1089 | /* When we clone an SKB we copy the reycling bit. The pp_recycle | 
|---|
| 1090 | * bit is only set on the head though, so in order to avoid races | 
|---|
| 1091 | * while trying to recycle fragments on __skb_frag_unref() we need | 
|---|
| 1092 | * to make one SKB responsible for triggering the recycle path. | 
|---|
| 1093 | * So disable the recycling bit if an SKB is cloned and we have | 
|---|
| 1094 | * additional references to the fragmented part of the SKB. | 
|---|
| 1095 | * Eventually the last SKB will have the recycling bit set and it's | 
|---|
| 1096 | * dataref set to 0, which will trigger the recycling | 
|---|
| 1097 | */ | 
|---|
| 1098 | skb->pp_recycle = 0; | 
|---|
| 1099 | } | 
|---|
| 1100 |  | 
|---|
| 1101 | /* | 
|---|
| 1102 | *	Free an skbuff by memory without cleaning the state. | 
|---|
| 1103 | */ | 
|---|
| 1104 | static void kfree_skbmem(struct sk_buff *skb) | 
|---|
| 1105 | { | 
|---|
| 1106 | struct sk_buff_fclones *fclones; | 
|---|
| 1107 |  | 
|---|
| 1108 | switch (skb->fclone) { | 
|---|
| 1109 | case SKB_FCLONE_UNAVAILABLE: | 
|---|
| 1110 | kmem_cache_free(s: net_hotdata.skbuff_cache, objp: skb); | 
|---|
| 1111 | return; | 
|---|
| 1112 |  | 
|---|
| 1113 | case SKB_FCLONE_ORIG: | 
|---|
| 1114 | fclones = container_of(skb, struct sk_buff_fclones, skb1); | 
|---|
| 1115 |  | 
|---|
| 1116 | /* We usually free the clone (TX completion) before original skb | 
|---|
| 1117 | * This test would have no chance to be true for the clone, | 
|---|
| 1118 | * while here, branch prediction will be good. | 
|---|
| 1119 | */ | 
|---|
| 1120 | if (refcount_read(r: &fclones->fclone_ref) == 1) | 
|---|
| 1121 | goto fastpath; | 
|---|
| 1122 | break; | 
|---|
| 1123 |  | 
|---|
| 1124 | default: /* SKB_FCLONE_CLONE */ | 
|---|
| 1125 | fclones = container_of(skb, struct sk_buff_fclones, skb2); | 
|---|
| 1126 | break; | 
|---|
| 1127 | } | 
|---|
| 1128 | if (!refcount_dec_and_test(r: &fclones->fclone_ref)) | 
|---|
| 1129 | return; | 
|---|
| 1130 | fastpath: | 
|---|
| 1131 | kmem_cache_free(s: net_hotdata.skbuff_fclone_cache, objp: fclones); | 
|---|
| 1132 | } | 
|---|
| 1133 |  | 
|---|
| 1134 | void skb_release_head_state(struct sk_buff *skb) | 
|---|
| 1135 | { | 
|---|
| 1136 | skb_dst_drop(skb); | 
|---|
| 1137 | if (skb->destructor) { | 
|---|
| 1138 | DEBUG_NET_WARN_ON_ONCE(in_hardirq()); | 
|---|
| 1139 | skb->destructor(skb); | 
|---|
| 1140 | } | 
|---|
| 1141 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | 
|---|
| 1142 | nf_conntrack_put(nfct: skb_nfct(skb)); | 
|---|
| 1143 | #endif | 
|---|
| 1144 | skb_ext_put(skb); | 
|---|
| 1145 | } | 
|---|
| 1146 |  | 
|---|
| 1147 | /* Free everything but the sk_buff shell. */ | 
|---|
| 1148 | static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) | 
|---|
| 1149 | { | 
|---|
| 1150 | skb_release_head_state(skb); | 
|---|
| 1151 | if (likely(skb->head)) | 
|---|
| 1152 | skb_release_data(skb, reason); | 
|---|
| 1153 | } | 
|---|
| 1154 |  | 
|---|
| 1155 | /** | 
|---|
| 1156 | *	__kfree_skb - private function | 
|---|
| 1157 | *	@skb: buffer | 
|---|
| 1158 | * | 
|---|
| 1159 | *	Free an sk_buff. Release anything attached to the buffer. | 
|---|
| 1160 | *	Clean the state. This is an internal helper function. Users should | 
|---|
| 1161 | *	always call kfree_skb | 
|---|
| 1162 | */ | 
|---|
| 1163 |  | 
|---|
| 1164 | void __kfree_skb(struct sk_buff *skb) | 
|---|
| 1165 | { | 
|---|
| 1166 | skb_release_all(skb, reason: SKB_DROP_REASON_NOT_SPECIFIED); | 
|---|
| 1167 | kfree_skbmem(skb); | 
|---|
| 1168 | } | 
|---|
| 1169 | EXPORT_SYMBOL(__kfree_skb); | 
|---|
| 1170 |  | 
|---|
| 1171 | static __always_inline | 
|---|
| 1172 | bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, | 
|---|
| 1173 | enum skb_drop_reason reason) | 
|---|
| 1174 | { | 
|---|
| 1175 | if (unlikely(!skb_unref(skb))) | 
|---|
| 1176 | return false; | 
|---|
| 1177 |  | 
|---|
| 1178 | DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET || | 
|---|
| 1179 | u32_get_bits(reason, | 
|---|
| 1180 | SKB_DROP_REASON_SUBSYS_MASK) >= | 
|---|
| 1181 | SKB_DROP_REASON_SUBSYS_NUM); | 
|---|
| 1182 |  | 
|---|
| 1183 | if (reason == SKB_CONSUMED) | 
|---|
| 1184 | trace_consume_skb(skb, location: __builtin_return_address(0)); | 
|---|
| 1185 | else | 
|---|
| 1186 | trace_kfree_skb(skb, location: __builtin_return_address(0), reason, rx_sk: sk); | 
|---|
| 1187 | return true; | 
|---|
| 1188 | } | 
|---|
| 1189 |  | 
|---|
| 1190 | /** | 
|---|
| 1191 | *	sk_skb_reason_drop - free an sk_buff with special reason | 
|---|
| 1192 | *	@sk: the socket to receive @skb, or NULL if not applicable | 
|---|
| 1193 | *	@skb: buffer to free | 
|---|
| 1194 | *	@reason: reason why this skb is dropped | 
|---|
| 1195 | * | 
|---|
| 1196 | *	Drop a reference to the buffer and free it if the usage count has hit | 
|---|
| 1197 | *	zero. Meanwhile, pass the receiving socket and drop reason to | 
|---|
| 1198 | *	'kfree_skb' tracepoint. | 
|---|
| 1199 | */ | 
|---|
| 1200 | void __fix_address | 
|---|
| 1201 | sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) | 
|---|
| 1202 | { | 
|---|
| 1203 | if (__sk_skb_reason_drop(sk, skb, reason)) | 
|---|
| 1204 | __kfree_skb(skb); | 
|---|
| 1205 | } | 
|---|
| 1206 | EXPORT_SYMBOL(sk_skb_reason_drop); | 
|---|
| 1207 |  | 
|---|
| 1208 | #define KFREE_SKB_BULK_SIZE	16 | 
|---|
| 1209 |  | 
|---|
| 1210 | struct skb_free_array { | 
|---|
| 1211 | unsigned int skb_count; | 
|---|
| 1212 | void *skb_array[KFREE_SKB_BULK_SIZE]; | 
|---|
| 1213 | }; | 
|---|
| 1214 |  | 
|---|
| 1215 | static void kfree_skb_add_bulk(struct sk_buff *skb, | 
|---|
| 1216 | struct skb_free_array *sa, | 
|---|
| 1217 | enum skb_drop_reason reason) | 
|---|
| 1218 | { | 
|---|
| 1219 | /* if SKB is a clone, don't handle this case */ | 
|---|
| 1220 | if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) { | 
|---|
| 1221 | __kfree_skb(skb); | 
|---|
| 1222 | return; | 
|---|
| 1223 | } | 
|---|
| 1224 |  | 
|---|
| 1225 | skb_release_all(skb, reason); | 
|---|
| 1226 | sa->skb_array[sa->skb_count++] = skb; | 
|---|
| 1227 |  | 
|---|
| 1228 | if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { | 
|---|
| 1229 | kmem_cache_free_bulk(s: net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE, | 
|---|
| 1230 | p: sa->skb_array); | 
|---|
| 1231 | sa->skb_count = 0; | 
|---|
| 1232 | } | 
|---|
| 1233 | } | 
|---|
| 1234 |  | 
|---|
| 1235 | void __fix_address | 
|---|
| 1236 | kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) | 
|---|
| 1237 | { | 
|---|
| 1238 | struct skb_free_array sa; | 
|---|
| 1239 |  | 
|---|
| 1240 | sa.skb_count = 0; | 
|---|
| 1241 |  | 
|---|
| 1242 | while (segs) { | 
|---|
| 1243 | struct sk_buff *next = segs->next; | 
|---|
| 1244 |  | 
|---|
| 1245 | if (__sk_skb_reason_drop(NULL, skb: segs, reason)) { | 
|---|
| 1246 | skb_poison_list(skb: segs); | 
|---|
| 1247 | kfree_skb_add_bulk(skb: segs, sa: &sa, reason); | 
|---|
| 1248 | } | 
|---|
| 1249 |  | 
|---|
| 1250 | segs = next; | 
|---|
| 1251 | } | 
|---|
| 1252 |  | 
|---|
| 1253 | if (sa.skb_count) | 
|---|
| 1254 | kmem_cache_free_bulk(s: net_hotdata.skbuff_cache, size: sa.skb_count, p: sa.skb_array); | 
|---|
| 1255 | } | 
|---|
| 1256 | EXPORT_SYMBOL(kfree_skb_list_reason); | 
|---|
| 1257 |  | 
|---|
| 1258 | /* Dump skb information and contents. | 
|---|
| 1259 | * | 
|---|
| 1260 | * Must only be called from net_ratelimit()-ed paths. | 
|---|
| 1261 | * | 
|---|
| 1262 | * Dumps whole packets if full_pkt, only headers otherwise. | 
|---|
| 1263 | */ | 
|---|
| 1264 | void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) | 
|---|
| 1265 | { | 
|---|
| 1266 | struct skb_shared_info *sh = skb_shinfo(skb); | 
|---|
| 1267 | struct net_device *dev = skb->dev; | 
|---|
| 1268 | struct sock *sk = skb->sk; | 
|---|
| 1269 | struct sk_buff *list_skb; | 
|---|
| 1270 | bool has_mac, has_trans; | 
|---|
| 1271 | int headroom, tailroom; | 
|---|
| 1272 | int i, len, seg_len; | 
|---|
| 1273 |  | 
|---|
| 1274 | if (full_pkt) | 
|---|
| 1275 | len = skb->len; | 
|---|
| 1276 | else | 
|---|
| 1277 | len = min_t(int, skb->len, MAX_HEADER + 128); | 
|---|
| 1278 |  | 
|---|
| 1279 | headroom = skb_headroom(skb); | 
|---|
| 1280 | tailroom = skb_tailroom(skb); | 
|---|
| 1281 |  | 
|---|
| 1282 | has_mac = skb_mac_header_was_set(skb); | 
|---|
| 1283 | has_trans = skb_transport_header_was_set(skb); | 
|---|
| 1284 |  | 
|---|
| 1285 | printk( "%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" | 
|---|
| 1286 | "mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n" | 
|---|
| 1287 | "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" | 
|---|
| 1288 | "csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n" | 
|---|
| 1289 | "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n" | 
|---|
| 1290 | "priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n" | 
|---|
| 1291 | "encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n", | 
|---|
| 1292 | level, skb->len, headroom, skb_headlen(skb), tailroom, | 
|---|
| 1293 | has_mac ? skb->mac_header : -1, | 
|---|
| 1294 | has_mac ? skb_mac_header_len(skb) : -1, | 
|---|
| 1295 | skb->mac_len, | 
|---|
| 1296 | skb->network_header, | 
|---|
| 1297 | has_trans ? skb_network_header_len(skb) : -1, | 
|---|
| 1298 | has_trans ? skb->transport_header : -1, | 
|---|
| 1299 | sh->tx_flags, sh->nr_frags, | 
|---|
| 1300 | sh->gso_size, sh->gso_type, sh->gso_segs, | 
|---|
| 1301 | skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, | 
|---|
| 1302 | skb->csum_complete_sw, skb->csum_valid, skb->csum_level, | 
|---|
| 1303 | skb->hash, skb->sw_hash, skb->l4_hash, | 
|---|
| 1304 | ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, | 
|---|
| 1305 | skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, | 
|---|
| 1306 | skb->encapsulation, skb->inner_protocol, skb->inner_mac_header, | 
|---|
| 1307 | skb->inner_network_header, skb->inner_transport_header); | 
|---|
| 1308 |  | 
|---|
| 1309 | if (dev) | 
|---|
| 1310 | printk( "%sdev name=%s feat=%pNF\n", | 
|---|
| 1311 | level, dev->name, &dev->features); | 
|---|
| 1312 | if (sk) | 
|---|
| 1313 | printk( "%ssk family=%hu type=%u proto=%u\n", | 
|---|
| 1314 | level, sk->sk_family, sk->sk_type, sk->sk_protocol); | 
|---|
| 1315 |  | 
|---|
| 1316 | if (full_pkt && headroom) | 
|---|
| 1317 | print_hex_dump(level, prefix_str: "skb headroom: ", prefix_type: DUMP_PREFIX_OFFSET, | 
|---|
| 1318 | rowsize: 16, groupsize: 1, buf: skb->head, len: headroom, ascii: false); | 
|---|
| 1319 |  | 
|---|
| 1320 | seg_len = min_t(int, skb_headlen(skb), len); | 
|---|
| 1321 | if (seg_len) | 
|---|
| 1322 | print_hex_dump(level, prefix_str: "skb linear:   ", prefix_type: DUMP_PREFIX_OFFSET, | 
|---|
| 1323 | rowsize: 16, groupsize: 1, buf: skb->data, len: seg_len, ascii: false); | 
|---|
| 1324 | len -= seg_len; | 
|---|
| 1325 |  | 
|---|
| 1326 | if (full_pkt && tailroom) | 
|---|
| 1327 | print_hex_dump(level, prefix_str: "skb tailroom: ", prefix_type: DUMP_PREFIX_OFFSET, | 
|---|
| 1328 | rowsize: 16, groupsize: 1, buf: skb_tail_pointer(skb), len: tailroom, ascii: false); | 
|---|
| 1329 |  | 
|---|
| 1330 | for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { | 
|---|
| 1331 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
|---|
| 1332 | u32 p_off, p_len, copied; | 
|---|
| 1333 | struct page *p; | 
|---|
| 1334 | u8 *vaddr; | 
|---|
| 1335 |  | 
|---|
| 1336 | if (skb_frag_is_net_iov(frag)) { | 
|---|
| 1337 | printk( "%sskb frag %d: not readable\n", level, i); | 
|---|
| 1338 | len -= skb_frag_size(frag); | 
|---|
| 1339 | if (!len) | 
|---|
| 1340 | break; | 
|---|
| 1341 | continue; | 
|---|
| 1342 | } | 
|---|
| 1343 |  | 
|---|
| 1344 | skb_frag_foreach_page(frag, skb_frag_off(frag), | 
|---|
| 1345 | skb_frag_size(frag), p, p_off, p_len, | 
|---|
| 1346 | copied) { | 
|---|
| 1347 | seg_len = min_t(int, p_len, len); | 
|---|
| 1348 | vaddr = kmap_atomic(page: p); | 
|---|
| 1349 | print_hex_dump(level, prefix_str: "skb frag:     ", | 
|---|
| 1350 | prefix_type: DUMP_PREFIX_OFFSET, | 
|---|
| 1351 | rowsize: 16, groupsize: 1, buf: vaddr + p_off, len: seg_len, ascii: false); | 
|---|
| 1352 | kunmap_atomic(vaddr); | 
|---|
| 1353 | len -= seg_len; | 
|---|
| 1354 | if (!len) | 
|---|
| 1355 | break; | 
|---|
| 1356 | } | 
|---|
| 1357 | } | 
|---|
| 1358 |  | 
|---|
| 1359 | if (full_pkt && skb_has_frag_list(skb)) { | 
|---|
| 1360 | printk( "skb fraglist:\n"); | 
|---|
| 1361 | skb_walk_frags(skb, list_skb) | 
|---|
| 1362 | skb_dump(level, skb: list_skb, full_pkt: true); | 
|---|
| 1363 | } | 
|---|
| 1364 | } | 
|---|
| 1365 | EXPORT_SYMBOL(skb_dump); | 
|---|
| 1366 |  | 
|---|
| 1367 | /** | 
|---|
| 1368 | *	skb_tx_error - report an sk_buff xmit error | 
|---|
| 1369 | *	@skb: buffer that triggered an error | 
|---|
| 1370 | * | 
|---|
| 1371 | *	Report xmit error if a device callback is tracking this skb. | 
|---|
| 1372 | *	skb must be freed afterwards. | 
|---|
| 1373 | */ | 
|---|
| 1374 | void skb_tx_error(struct sk_buff *skb) | 
|---|
| 1375 | { | 
|---|
| 1376 | if (skb) { | 
|---|
| 1377 | skb_zcopy_downgrade_managed(skb); | 
|---|
| 1378 | skb_zcopy_clear(skb, zerocopy_success: true); | 
|---|
| 1379 | } | 
|---|
| 1380 | } | 
|---|
| 1381 | EXPORT_SYMBOL(skb_tx_error); | 
|---|
| 1382 |  | 
|---|
| 1383 | #ifdef CONFIG_TRACEPOINTS | 
|---|
| 1384 | /** | 
|---|
| 1385 | *	consume_skb - free an skbuff | 
|---|
| 1386 | *	@skb: buffer to free | 
|---|
| 1387 | * | 
|---|
| 1388 | *	Drop a ref to the buffer and free it if the usage count has hit zero | 
|---|
| 1389 | *	Functions identically to kfree_skb, but kfree_skb assumes that the frame | 
|---|
| 1390 | *	is being dropped after a failure and notes that | 
|---|
| 1391 | */ | 
|---|
| 1392 | void consume_skb(struct sk_buff *skb) | 
|---|
| 1393 | { | 
|---|
| 1394 | if (!skb_unref(skb)) | 
|---|
| 1395 | return; | 
|---|
| 1396 |  | 
|---|
| 1397 | trace_consume_skb(skb, location: __builtin_return_address(0)); | 
|---|
| 1398 | __kfree_skb(skb); | 
|---|
| 1399 | } | 
|---|
| 1400 | EXPORT_SYMBOL(consume_skb); | 
|---|
| 1401 | #endif | 
|---|
| 1402 |  | 
|---|
| 1403 | /** | 
|---|
| 1404 | *	__consume_stateless_skb - free an skbuff, assuming it is stateless | 
|---|
| 1405 | *	@skb: buffer to free | 
|---|
| 1406 | * | 
|---|
| 1407 | *	Alike consume_skb(), but this variant assumes that this is the last | 
|---|
| 1408 | *	skb reference and all the head states have been already dropped | 
|---|
| 1409 | */ | 
|---|
| 1410 | void __consume_stateless_skb(struct sk_buff *skb) | 
|---|
| 1411 | { | 
|---|
| 1412 | trace_consume_skb(skb, location: __builtin_return_address(0)); | 
|---|
| 1413 | skb_release_data(skb, reason: SKB_CONSUMED); | 
|---|
| 1414 | kfree_skbmem(skb); | 
|---|
| 1415 | } | 
|---|
| 1416 |  | 
|---|
| 1417 | static void napi_skb_cache_put(struct sk_buff *skb) | 
|---|
| 1418 | { | 
|---|
| 1419 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); | 
|---|
| 1420 | u32 i; | 
|---|
| 1421 |  | 
|---|
| 1422 | if (!kasan_mempool_poison_object(ptr: skb)) | 
|---|
| 1423 | return; | 
|---|
| 1424 |  | 
|---|
| 1425 | local_lock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 1426 | nc->skb_cache[nc->skb_count++] = skb; | 
|---|
| 1427 |  | 
|---|
| 1428 | if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { | 
|---|
| 1429 | for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) | 
|---|
| 1430 | kasan_mempool_unpoison_object(ptr: nc->skb_cache[i], | 
|---|
| 1431 | size: kmem_cache_size(s: net_hotdata.skbuff_cache)); | 
|---|
| 1432 |  | 
|---|
| 1433 | kmem_cache_free_bulk(s: net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF, | 
|---|
| 1434 | p: nc->skb_cache + NAPI_SKB_CACHE_HALF); | 
|---|
| 1435 | nc->skb_count = NAPI_SKB_CACHE_HALF; | 
|---|
| 1436 | } | 
|---|
| 1437 | local_unlock_nested_bh(&napi_alloc_cache.bh_lock); | 
|---|
| 1438 | } | 
|---|
| 1439 |  | 
|---|
| 1440 | void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) | 
|---|
| 1441 | { | 
|---|
| 1442 | skb_release_all(skb, reason); | 
|---|
| 1443 | napi_skb_cache_put(skb); | 
|---|
| 1444 | } | 
|---|
| 1445 |  | 
|---|
| 1446 | void napi_skb_free_stolen_head(struct sk_buff *skb) | 
|---|
| 1447 | { | 
|---|
| 1448 | if (unlikely(skb->slow_gro)) { | 
|---|
| 1449 | nf_reset_ct(skb); | 
|---|
| 1450 | skb_dst_drop(skb); | 
|---|
| 1451 | skb_ext_put(skb); | 
|---|
| 1452 | skb_orphan(skb); | 
|---|
| 1453 | skb->slow_gro = 0; | 
|---|
| 1454 | } | 
|---|
| 1455 | napi_skb_cache_put(skb); | 
|---|
| 1456 | } | 
|---|
| 1457 |  | 
|---|
| 1458 | void napi_consume_skb(struct sk_buff *skb, int budget) | 
|---|
| 1459 | { | 
|---|
| 1460 | /* Zero budget indicate non-NAPI context called us, like netpoll */ | 
|---|
| 1461 | if (unlikely(!budget)) { | 
|---|
| 1462 | dev_consume_skb_any(skb); | 
|---|
| 1463 | return; | 
|---|
| 1464 | } | 
|---|
| 1465 |  | 
|---|
| 1466 | DEBUG_NET_WARN_ON_ONCE(!in_softirq()); | 
|---|
| 1467 |  | 
|---|
| 1468 | if (!skb_unref(skb)) | 
|---|
| 1469 | return; | 
|---|
| 1470 |  | 
|---|
| 1471 | /* if reaching here SKB is ready to free */ | 
|---|
| 1472 | trace_consume_skb(skb, location: __builtin_return_address(0)); | 
|---|
| 1473 |  | 
|---|
| 1474 | /* if SKB is a clone, don't handle this case */ | 
|---|
| 1475 | if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { | 
|---|
| 1476 | __kfree_skb(skb); | 
|---|
| 1477 | return; | 
|---|
| 1478 | } | 
|---|
| 1479 |  | 
|---|
| 1480 | skb_release_all(skb, reason: SKB_CONSUMED); | 
|---|
| 1481 | napi_skb_cache_put(skb); | 
|---|
| 1482 | } | 
|---|
| 1483 | EXPORT_SYMBOL(napi_consume_skb); | 
|---|
| 1484 |  | 
|---|
| 1485 | /* Make sure a field is contained by headers group */ | 
|---|
| 1486 | #define CHECK_SKB_FIELD(field) \ | 
|---|
| 1487 | BUILD_BUG_ON(offsetof(struct sk_buff, field) !=		\ | 
|---|
| 1488 | offsetof(struct sk_buff, headers.field));	\ | 
|---|
| 1489 |  | 
|---|
| 1490 | static void (struct sk_buff *new, const struct sk_buff *old) | 
|---|
| 1491 | { | 
|---|
| 1492 | new->tstamp		= old->tstamp; | 
|---|
| 1493 | /* We do not copy old->sk */ | 
|---|
| 1494 | new->dev		= old->dev; | 
|---|
| 1495 | memcpy(to: new->cb, from: old->cb, len: sizeof(old->cb)); | 
|---|
| 1496 | skb_dst_copy(nskb: new, oskb: old); | 
|---|
| 1497 | __skb_ext_copy(dst: new, src: old); | 
|---|
| 1498 | __nf_copy(dst: new, src: old, copy: false); | 
|---|
| 1499 |  | 
|---|
| 1500 | /* Note : this field could be in the headers group. | 
|---|
| 1501 | * It is not yet because we do not want to have a 16 bit hole | 
|---|
| 1502 | */ | 
|---|
| 1503 | new->queue_mapping = old->queue_mapping; | 
|---|
| 1504 |  | 
|---|
| 1505 | memcpy(to: &new->headers, from: &old->headers, len: sizeof(new->headers)); | 
|---|
| 1506 | CHECK_SKB_FIELD(protocol); | 
|---|
| 1507 | CHECK_SKB_FIELD(csum); | 
|---|
| 1508 | CHECK_SKB_FIELD(hash); | 
|---|
| 1509 | CHECK_SKB_FIELD(priority); | 
|---|
| 1510 | CHECK_SKB_FIELD(skb_iif); | 
|---|
| 1511 | CHECK_SKB_FIELD(vlan_proto); | 
|---|
| 1512 | CHECK_SKB_FIELD(vlan_tci); | 
|---|
| 1513 | CHECK_SKB_FIELD(transport_header); | 
|---|
| 1514 | CHECK_SKB_FIELD(network_header); | 
|---|
| 1515 | CHECK_SKB_FIELD(mac_header); | 
|---|
| 1516 | CHECK_SKB_FIELD(inner_protocol); | 
|---|
| 1517 | CHECK_SKB_FIELD(inner_transport_header); | 
|---|
| 1518 | CHECK_SKB_FIELD(inner_network_header); | 
|---|
| 1519 | CHECK_SKB_FIELD(inner_mac_header); | 
|---|
| 1520 | CHECK_SKB_FIELD(mark); | 
|---|
| 1521 | #ifdef CONFIG_NETWORK_SECMARK | 
|---|
| 1522 | CHECK_SKB_FIELD(secmark); | 
|---|
| 1523 | #endif | 
|---|
| 1524 | #ifdef CONFIG_NET_RX_BUSY_POLL | 
|---|
| 1525 | CHECK_SKB_FIELD(napi_id); | 
|---|
| 1526 | #endif | 
|---|
| 1527 | CHECK_SKB_FIELD(alloc_cpu); | 
|---|
| 1528 | #ifdef CONFIG_XPS | 
|---|
| 1529 | CHECK_SKB_FIELD(sender_cpu); | 
|---|
| 1530 | #endif | 
|---|
| 1531 | #ifdef CONFIG_NET_SCHED | 
|---|
| 1532 | CHECK_SKB_FIELD(tc_index); | 
|---|
| 1533 | #endif | 
|---|
| 1534 |  | 
|---|
| 1535 | } | 
|---|
| 1536 |  | 
|---|
| 1537 | /* | 
|---|
| 1538 | * You should not add any new code to this function.  Add it to | 
|---|
| 1539 | * __copy_skb_header above instead. | 
|---|
| 1540 | */ | 
|---|
| 1541 | static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | 
|---|
| 1542 | { | 
|---|
| 1543 | #define C(x) n->x = skb->x | 
|---|
| 1544 |  | 
|---|
| 1545 | n->next = n->prev = NULL; | 
|---|
| 1546 | n->sk = NULL; | 
|---|
| 1547 | __copy_skb_header(new: n, old: skb); | 
|---|
| 1548 |  | 
|---|
| 1549 | C(len); | 
|---|
| 1550 | C(data_len); | 
|---|
| 1551 | C(mac_len); | 
|---|
| 1552 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; | 
|---|
| 1553 | n->cloned = 1; | 
|---|
| 1554 | n->nohdr = 0; | 
|---|
| 1555 | n->peeked = 0; | 
|---|
| 1556 | C(pfmemalloc); | 
|---|
| 1557 | C(pp_recycle); | 
|---|
| 1558 | n->destructor = NULL; | 
|---|
| 1559 | C(tail); | 
|---|
| 1560 | C(end); | 
|---|
| 1561 | C(head); | 
|---|
| 1562 | C(head_frag); | 
|---|
| 1563 | C(data); | 
|---|
| 1564 | C(truesize); | 
|---|
| 1565 | refcount_set(r: &n->users, n: 1); | 
|---|
| 1566 |  | 
|---|
| 1567 | atomic_inc(v: &(skb_shinfo(skb)->dataref)); | 
|---|
| 1568 | skb->cloned = 1; | 
|---|
| 1569 |  | 
|---|
| 1570 | return n; | 
|---|
| 1571 | #undef C | 
|---|
| 1572 | } | 
|---|
| 1573 |  | 
|---|
| 1574 | /** | 
|---|
| 1575 | * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg | 
|---|
| 1576 | * @first: first sk_buff of the msg | 
|---|
| 1577 | */ | 
|---|
| 1578 | struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) | 
|---|
| 1579 | { | 
|---|
| 1580 | struct sk_buff *n; | 
|---|
| 1581 |  | 
|---|
| 1582 | n = alloc_skb(size: 0, GFP_ATOMIC); | 
|---|
| 1583 | if (!n) | 
|---|
| 1584 | return NULL; | 
|---|
| 1585 |  | 
|---|
| 1586 | n->len = first->len; | 
|---|
| 1587 | n->data_len = first->len; | 
|---|
| 1588 | n->truesize = first->truesize; | 
|---|
| 1589 |  | 
|---|
| 1590 | skb_shinfo(n)->frag_list = first; | 
|---|
| 1591 |  | 
|---|
| 1592 | __copy_skb_header(new: n, old: first); | 
|---|
| 1593 | n->destructor = NULL; | 
|---|
| 1594 |  | 
|---|
| 1595 | return n; | 
|---|
| 1596 | } | 
|---|
| 1597 | EXPORT_SYMBOL_GPL(alloc_skb_for_msg); | 
|---|
| 1598 |  | 
|---|
| 1599 | /** | 
|---|
| 1600 | *	skb_morph	-	morph one skb into another | 
|---|
| 1601 | *	@dst: the skb to receive the contents | 
|---|
| 1602 | *	@src: the skb to supply the contents | 
|---|
| 1603 | * | 
|---|
| 1604 | *	This is identical to skb_clone except that the target skb is | 
|---|
| 1605 | *	supplied by the user. | 
|---|
| 1606 | * | 
|---|
| 1607 | *	The target skb is returned upon exit. | 
|---|
| 1608 | */ | 
|---|
| 1609 | struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) | 
|---|
| 1610 | { | 
|---|
| 1611 | skb_release_all(skb: dst, reason: SKB_CONSUMED); | 
|---|
| 1612 | return __skb_clone(n: dst, skb: src); | 
|---|
| 1613 | } | 
|---|
| 1614 | EXPORT_SYMBOL_GPL(skb_morph); | 
|---|
| 1615 |  | 
|---|
| 1616 | int mm_account_pinned_pages(struct mmpin *mmp, size_t size) | 
|---|
| 1617 | { | 
|---|
| 1618 | unsigned long max_pg, num_pg, new_pg, old_pg, rlim; | 
|---|
| 1619 | struct user_struct *user; | 
|---|
| 1620 |  | 
|---|
| 1621 | if (capable(CAP_IPC_LOCK) || !size) | 
|---|
| 1622 | return 0; | 
|---|
| 1623 |  | 
|---|
| 1624 | rlim = rlimit(RLIMIT_MEMLOCK); | 
|---|
| 1625 | if (rlim == RLIM_INFINITY) | 
|---|
| 1626 | return 0; | 
|---|
| 1627 |  | 
|---|
| 1628 | num_pg = (size >> PAGE_SHIFT) + 2;	/* worst case */ | 
|---|
| 1629 | max_pg = rlim >> PAGE_SHIFT; | 
|---|
| 1630 | user = mmp->user ? : current_user(); | 
|---|
| 1631 |  | 
|---|
| 1632 | old_pg = atomic_long_read(v: &user->locked_vm); | 
|---|
| 1633 | do { | 
|---|
| 1634 | new_pg = old_pg + num_pg; | 
|---|
| 1635 | if (new_pg > max_pg) | 
|---|
| 1636 | return -ENOBUFS; | 
|---|
| 1637 | } while (!atomic_long_try_cmpxchg(v: &user->locked_vm, old: &old_pg, new: new_pg)); | 
|---|
| 1638 |  | 
|---|
| 1639 | if (!mmp->user) { | 
|---|
| 1640 | mmp->user = get_uid(u: user); | 
|---|
| 1641 | mmp->num_pg = num_pg; | 
|---|
| 1642 | } else { | 
|---|
| 1643 | mmp->num_pg += num_pg; | 
|---|
| 1644 | } | 
|---|
| 1645 |  | 
|---|
| 1646 | return 0; | 
|---|
| 1647 | } | 
|---|
| 1648 | EXPORT_SYMBOL_GPL(mm_account_pinned_pages); | 
|---|
| 1649 |  | 
|---|
| 1650 | void mm_unaccount_pinned_pages(struct mmpin *mmp) | 
|---|
| 1651 | { | 
|---|
| 1652 | if (mmp->user) { | 
|---|
| 1653 | atomic_long_sub(i: mmp->num_pg, v: &mmp->user->locked_vm); | 
|---|
| 1654 | free_uid(mmp->user); | 
|---|
| 1655 | } | 
|---|
| 1656 | } | 
|---|
| 1657 | EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); | 
|---|
| 1658 |  | 
|---|
| 1659 | static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size, | 
|---|
| 1660 | bool devmem) | 
|---|
| 1661 | { | 
|---|
| 1662 | struct ubuf_info_msgzc *uarg; | 
|---|
| 1663 | struct sk_buff *skb; | 
|---|
| 1664 |  | 
|---|
| 1665 | WARN_ON_ONCE(!in_task()); | 
|---|
| 1666 |  | 
|---|
| 1667 | skb = sock_omalloc(sk, size: 0, GFP_KERNEL); | 
|---|
| 1668 | if (!skb) | 
|---|
| 1669 | return NULL; | 
|---|
| 1670 |  | 
|---|
| 1671 | BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); | 
|---|
| 1672 | uarg = (void *)skb->cb; | 
|---|
| 1673 | uarg->mmp.user = NULL; | 
|---|
| 1674 |  | 
|---|
| 1675 | if (likely(!devmem) && mm_account_pinned_pages(&uarg->mmp, size)) { | 
|---|
| 1676 | kfree_skb(skb); | 
|---|
| 1677 | return NULL; | 
|---|
| 1678 | } | 
|---|
| 1679 |  | 
|---|
| 1680 | uarg->ubuf.ops = &msg_zerocopy_ubuf_ops; | 
|---|
| 1681 | uarg->id = ((u32)atomic_inc_return(v: &sk->sk_zckey)) - 1; | 
|---|
| 1682 | uarg->len = 1; | 
|---|
| 1683 | uarg->bytelen = size; | 
|---|
| 1684 | uarg->zerocopy = 1; | 
|---|
| 1685 | uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; | 
|---|
| 1686 | refcount_set(r: &uarg->ubuf.refcnt, n: 1); | 
|---|
| 1687 | sock_hold(sk); | 
|---|
| 1688 |  | 
|---|
| 1689 | return &uarg->ubuf; | 
|---|
| 1690 | } | 
|---|
| 1691 |  | 
|---|
| 1692 | static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg) | 
|---|
| 1693 | { | 
|---|
| 1694 | return container_of((void *)uarg, struct sk_buff, cb); | 
|---|
| 1695 | } | 
|---|
| 1696 |  | 
|---|
| 1697 | struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, | 
|---|
| 1698 | struct ubuf_info *uarg, bool devmem) | 
|---|
| 1699 | { | 
|---|
| 1700 | if (uarg) { | 
|---|
| 1701 | struct ubuf_info_msgzc *uarg_zc; | 
|---|
| 1702 | const u32 byte_limit = 1 << 19;		/* limit to a few TSO */ | 
|---|
| 1703 | u32 bytelen, next; | 
|---|
| 1704 |  | 
|---|
| 1705 | /* there might be non MSG_ZEROCOPY users */ | 
|---|
| 1706 | if (uarg->ops != &msg_zerocopy_ubuf_ops) | 
|---|
| 1707 | return NULL; | 
|---|
| 1708 |  | 
|---|
| 1709 | /* realloc only when socket is locked (TCP, UDP cork), | 
|---|
| 1710 | * so uarg->len and sk_zckey access is serialized | 
|---|
| 1711 | */ | 
|---|
| 1712 | if (!sock_owned_by_user(sk)) { | 
|---|
| 1713 | WARN_ON_ONCE(1); | 
|---|
| 1714 | return NULL; | 
|---|
| 1715 | } | 
|---|
| 1716 |  | 
|---|
| 1717 | uarg_zc = uarg_to_msgzc(uarg); | 
|---|
| 1718 | bytelen = uarg_zc->bytelen + size; | 
|---|
| 1719 | if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) { | 
|---|
| 1720 | /* TCP can create new skb to attach new uarg */ | 
|---|
| 1721 | if (sk->sk_type == SOCK_STREAM) | 
|---|
| 1722 | goto new_alloc; | 
|---|
| 1723 | return NULL; | 
|---|
| 1724 | } | 
|---|
| 1725 |  | 
|---|
| 1726 | next = (u32)atomic_read(v: &sk->sk_zckey); | 
|---|
| 1727 | if ((u32)(uarg_zc->id + uarg_zc->len) == next) { | 
|---|
| 1728 | if (likely(!devmem) && | 
|---|
| 1729 | mm_account_pinned_pages(&uarg_zc->mmp, size)) | 
|---|
| 1730 | return NULL; | 
|---|
| 1731 | uarg_zc->len++; | 
|---|
| 1732 | uarg_zc->bytelen = bytelen; | 
|---|
| 1733 | atomic_set(v: &sk->sk_zckey, i: ++next); | 
|---|
| 1734 |  | 
|---|
| 1735 | /* no extra ref when appending to datagram (MSG_MORE) */ | 
|---|
| 1736 | if (sk->sk_type == SOCK_STREAM) | 
|---|
| 1737 | net_zcopy_get(uarg); | 
|---|
| 1738 |  | 
|---|
| 1739 | return uarg; | 
|---|
| 1740 | } | 
|---|
| 1741 | } | 
|---|
| 1742 |  | 
|---|
| 1743 | new_alloc: | 
|---|
| 1744 | return msg_zerocopy_alloc(sk, size, devmem); | 
|---|
| 1745 | } | 
|---|
| 1746 | EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); | 
|---|
| 1747 |  | 
|---|
| 1748 | static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) | 
|---|
| 1749 | { | 
|---|
| 1750 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); | 
|---|
| 1751 | u32 old_lo, old_hi; | 
|---|
| 1752 | u64 sum_len; | 
|---|
| 1753 |  | 
|---|
| 1754 | old_lo = serr->ee.ee_info; | 
|---|
| 1755 | old_hi = serr->ee.ee_data; | 
|---|
| 1756 | sum_len = old_hi - old_lo + 1ULL + len; | 
|---|
| 1757 |  | 
|---|
| 1758 | if (sum_len >= (1ULL << 32)) | 
|---|
| 1759 | return false; | 
|---|
| 1760 |  | 
|---|
| 1761 | if (lo != old_hi + 1) | 
|---|
| 1762 | return false; | 
|---|
| 1763 |  | 
|---|
| 1764 | serr->ee.ee_data += len; | 
|---|
| 1765 | return true; | 
|---|
| 1766 | } | 
|---|
| 1767 |  | 
|---|
| 1768 | static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg) | 
|---|
| 1769 | { | 
|---|
| 1770 | struct sk_buff *tail, *skb = skb_from_uarg(uarg); | 
|---|
| 1771 | struct sock_exterr_skb *serr; | 
|---|
| 1772 | struct sock *sk = skb->sk; | 
|---|
| 1773 | struct sk_buff_head *q; | 
|---|
| 1774 | unsigned long flags; | 
|---|
| 1775 | bool is_zerocopy; | 
|---|
| 1776 | u32 lo, hi; | 
|---|
| 1777 | u16 len; | 
|---|
| 1778 |  | 
|---|
| 1779 | mm_unaccount_pinned_pages(&uarg->mmp); | 
|---|
| 1780 |  | 
|---|
| 1781 | /* if !len, there was only 1 call, and it was aborted | 
|---|
| 1782 | * so do not queue a completion notification | 
|---|
| 1783 | */ | 
|---|
| 1784 | if (!uarg->len || sock_flag(sk, flag: SOCK_DEAD)) | 
|---|
| 1785 | goto release; | 
|---|
| 1786 |  | 
|---|
| 1787 | len = uarg->len; | 
|---|
| 1788 | lo = uarg->id; | 
|---|
| 1789 | hi = uarg->id + len - 1; | 
|---|
| 1790 | is_zerocopy = uarg->zerocopy; | 
|---|
| 1791 |  | 
|---|
| 1792 | serr = SKB_EXT_ERR(skb); | 
|---|
| 1793 | memset(s: serr, c: 0, n: sizeof(*serr)); | 
|---|
| 1794 | serr->ee.ee_errno = 0; | 
|---|
| 1795 | serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; | 
|---|
| 1796 | serr->ee.ee_data = hi; | 
|---|
| 1797 | serr->ee.ee_info = lo; | 
|---|
| 1798 | if (!is_zerocopy) | 
|---|
| 1799 | serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; | 
|---|
| 1800 |  | 
|---|
| 1801 | q = &sk->sk_error_queue; | 
|---|
| 1802 | spin_lock_irqsave(&q->lock, flags); | 
|---|
| 1803 | tail = skb_peek_tail(list_: q); | 
|---|
| 1804 | if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || | 
|---|
| 1805 | !skb_zerocopy_notify_extend(skb: tail, lo, len)) { | 
|---|
| 1806 | __skb_queue_tail(list: q, newsk: skb); | 
|---|
| 1807 | skb = NULL; | 
|---|
| 1808 | } | 
|---|
| 1809 | spin_unlock_irqrestore(lock: &q->lock, flags); | 
|---|
| 1810 |  | 
|---|
| 1811 | sk_error_report(sk); | 
|---|
| 1812 |  | 
|---|
| 1813 | release: | 
|---|
| 1814 | consume_skb(skb); | 
|---|
| 1815 | sock_put(sk); | 
|---|
| 1816 | } | 
|---|
| 1817 |  | 
|---|
| 1818 | static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg, | 
|---|
| 1819 | bool success) | 
|---|
| 1820 | { | 
|---|
| 1821 | struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); | 
|---|
| 1822 |  | 
|---|
| 1823 | uarg_zc->zerocopy = uarg_zc->zerocopy & success; | 
|---|
| 1824 |  | 
|---|
| 1825 | if (refcount_dec_and_test(r: &uarg->refcnt)) | 
|---|
| 1826 | __msg_zerocopy_callback(uarg: uarg_zc); | 
|---|
| 1827 | } | 
|---|
| 1828 |  | 
|---|
| 1829 | void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) | 
|---|
| 1830 | { | 
|---|
| 1831 | struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk; | 
|---|
| 1832 |  | 
|---|
| 1833 | atomic_dec(v: &sk->sk_zckey); | 
|---|
| 1834 | uarg_to_msgzc(uarg)->len--; | 
|---|
| 1835 |  | 
|---|
| 1836 | if (have_uref) | 
|---|
| 1837 | msg_zerocopy_complete(NULL, uarg, success: true); | 
|---|
| 1838 | } | 
|---|
| 1839 | EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); | 
|---|
| 1840 |  | 
|---|
| 1841 | const struct ubuf_info_ops msg_zerocopy_ubuf_ops = { | 
|---|
| 1842 | .complete = msg_zerocopy_complete, | 
|---|
| 1843 | }; | 
|---|
| 1844 | EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops); | 
|---|
| 1845 |  | 
|---|
| 1846 | int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, | 
|---|
| 1847 | struct msghdr *msg, int len, | 
|---|
| 1848 | struct ubuf_info *uarg, | 
|---|
| 1849 | struct net_devmem_dmabuf_binding *binding) | 
|---|
| 1850 | { | 
|---|
| 1851 | int err, orig_len = skb->len; | 
|---|
| 1852 |  | 
|---|
| 1853 | if (uarg->ops->link_skb) { | 
|---|
| 1854 | err = uarg->ops->link_skb(skb, uarg); | 
|---|
| 1855 | if (err) | 
|---|
| 1856 | return err; | 
|---|
| 1857 | } else { | 
|---|
| 1858 | struct ubuf_info *orig_uarg = skb_zcopy(skb); | 
|---|
| 1859 |  | 
|---|
| 1860 | /* An skb can only point to one uarg. This edge case happens | 
|---|
| 1861 | * when TCP appends to an skb, but zerocopy_realloc triggered | 
|---|
| 1862 | * a new alloc. | 
|---|
| 1863 | */ | 
|---|
| 1864 | if (orig_uarg && uarg != orig_uarg) | 
|---|
| 1865 | return -EEXIST; | 
|---|
| 1866 | } | 
|---|
| 1867 |  | 
|---|
| 1868 | err = __zerocopy_sg_from_iter(msg, sk, skb, from: &msg->msg_iter, length: len, | 
|---|
| 1869 | binding); | 
|---|
| 1870 | if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { | 
|---|
| 1871 | struct sock *save_sk = skb->sk; | 
|---|
| 1872 |  | 
|---|
| 1873 | /* Streams do not free skb on error. Reset to prev state. */ | 
|---|
| 1874 | iov_iter_revert(i: &msg->msg_iter, bytes: skb->len - orig_len); | 
|---|
| 1875 | skb->sk = sk; | 
|---|
| 1876 | ___pskb_trim(skb, len: orig_len); | 
|---|
| 1877 | skb->sk = save_sk; | 
|---|
| 1878 | return err; | 
|---|
| 1879 | } | 
|---|
| 1880 |  | 
|---|
| 1881 | skb_zcopy_set(skb, uarg, NULL); | 
|---|
| 1882 | return skb->len - orig_len; | 
|---|
| 1883 | } | 
|---|
| 1884 | EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); | 
|---|
| 1885 |  | 
|---|
| 1886 | void __skb_zcopy_downgrade_managed(struct sk_buff *skb) | 
|---|
| 1887 | { | 
|---|
| 1888 | int i; | 
|---|
| 1889 |  | 
|---|
| 1890 | skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS; | 
|---|
| 1891 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 
|---|
| 1892 | skb_frag_ref(skb, f: i); | 
|---|
| 1893 | } | 
|---|
| 1894 | EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed); | 
|---|
| 1895 |  | 
|---|
| 1896 | static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, | 
|---|
| 1897 | gfp_t gfp_mask) | 
|---|
| 1898 | { | 
|---|
| 1899 | if (skb_zcopy(skb: orig)) { | 
|---|
| 1900 | if (skb_zcopy(skb: nskb)) { | 
|---|
| 1901 | /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ | 
|---|
| 1902 | if (!gfp_mask) { | 
|---|
| 1903 | WARN_ON_ONCE(1); | 
|---|
| 1904 | return -ENOMEM; | 
|---|
| 1905 | } | 
|---|
| 1906 | if (skb_uarg(nskb) == skb_uarg(orig)) | 
|---|
| 1907 | return 0; | 
|---|
| 1908 | if (skb_copy_ubufs(skb: nskb, GFP_ATOMIC)) | 
|---|
| 1909 | return -EIO; | 
|---|
| 1910 | } | 
|---|
| 1911 | skb_zcopy_set(skb: nskb, skb_uarg(orig), NULL); | 
|---|
| 1912 | } | 
|---|
| 1913 | return 0; | 
|---|
| 1914 | } | 
|---|
| 1915 |  | 
|---|
| 1916 | /** | 
|---|
| 1917 | *	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel | 
|---|
| 1918 | *	@skb: the skb to modify | 
|---|
| 1919 | *	@gfp_mask: allocation priority | 
|---|
| 1920 | * | 
|---|
| 1921 | *	This must be called on skb with SKBFL_ZEROCOPY_ENABLE. | 
|---|
| 1922 | *	It will copy all frags into kernel and drop the reference | 
|---|
| 1923 | *	to userspace pages. | 
|---|
| 1924 | * | 
|---|
| 1925 | *	If this function is called from an interrupt gfp_mask() must be | 
|---|
| 1926 | *	%GFP_ATOMIC. | 
|---|
| 1927 | * | 
|---|
| 1928 | *	Returns 0 on success or a negative error code on failure | 
|---|
| 1929 | *	to allocate kernel memory to copy to. | 
|---|
| 1930 | */ | 
|---|
| 1931 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) | 
|---|
| 1932 | { | 
|---|
| 1933 | int num_frags = skb_shinfo(skb)->nr_frags; | 
|---|
| 1934 | struct page *page, *head = NULL; | 
|---|
| 1935 | int i, order, psize, new_frags; | 
|---|
| 1936 | u32 d_off; | 
|---|
| 1937 |  | 
|---|
| 1938 | if (skb_shared(skb) || skb_unclone(skb, pri: gfp_mask)) | 
|---|
| 1939 | return -EINVAL; | 
|---|
| 1940 |  | 
|---|
| 1941 | if (!skb_frags_readable(skb)) | 
|---|
| 1942 | return -EFAULT; | 
|---|
| 1943 |  | 
|---|
| 1944 | if (!num_frags) | 
|---|
| 1945 | goto release; | 
|---|
| 1946 |  | 
|---|
| 1947 | /* We might have to allocate high order pages, so compute what minimum | 
|---|
| 1948 | * page order is needed. | 
|---|
| 1949 | */ | 
|---|
| 1950 | order = 0; | 
|---|
| 1951 | while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) | 
|---|
| 1952 | order++; | 
|---|
| 1953 | psize = (PAGE_SIZE << order); | 
|---|
| 1954 |  | 
|---|
| 1955 | new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); | 
|---|
| 1956 | for (i = 0; i < new_frags; i++) { | 
|---|
| 1957 | page = alloc_pages(gfp_mask | __GFP_COMP, order); | 
|---|
| 1958 | if (!page) { | 
|---|
| 1959 | while (head) { | 
|---|
| 1960 | struct page *next = (struct page *)page_private(head); | 
|---|
| 1961 | put_page(page: head); | 
|---|
| 1962 | head = next; | 
|---|
| 1963 | } | 
|---|
| 1964 | return -ENOMEM; | 
|---|
| 1965 | } | 
|---|
| 1966 | set_page_private(page, private: (unsigned long)head); | 
|---|
| 1967 | head = page; | 
|---|
| 1968 | } | 
|---|
| 1969 |  | 
|---|
| 1970 | page = head; | 
|---|
| 1971 | d_off = 0; | 
|---|
| 1972 | for (i = 0; i < num_frags; i++) { | 
|---|
| 1973 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | 
|---|
| 1974 | u32 p_off, p_len, copied; | 
|---|
| 1975 | struct page *p; | 
|---|
| 1976 | u8 *vaddr; | 
|---|
| 1977 |  | 
|---|
| 1978 | skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), | 
|---|
| 1979 | p, p_off, p_len, copied) { | 
|---|
| 1980 | u32 copy, done = 0; | 
|---|
| 1981 | vaddr = kmap_atomic(page: p); | 
|---|
| 1982 |  | 
|---|
| 1983 | while (done < p_len) { | 
|---|
| 1984 | if (d_off == psize) { | 
|---|
| 1985 | d_off = 0; | 
|---|
| 1986 | page = (struct page *)page_private(page); | 
|---|
| 1987 | } | 
|---|
| 1988 | copy = min_t(u32, psize - d_off, p_len - done); | 
|---|
| 1989 | memcpy(page_address(page) + d_off, | 
|---|
| 1990 | from: vaddr + p_off + done, len: copy); | 
|---|
| 1991 | done += copy; | 
|---|
| 1992 | d_off += copy; | 
|---|
| 1993 | } | 
|---|
| 1994 | kunmap_atomic(vaddr); | 
|---|
| 1995 | } | 
|---|
| 1996 | } | 
|---|
| 1997 |  | 
|---|
| 1998 | /* skb frags release userspace buffers */ | 
|---|
| 1999 | for (i = 0; i < num_frags; i++) | 
|---|
| 2000 | skb_frag_unref(skb, f: i); | 
|---|
| 2001 |  | 
|---|
| 2002 | /* skb frags point to kernel buffers */ | 
|---|
| 2003 | for (i = 0; i < new_frags - 1; i++) { | 
|---|
| 2004 | __skb_fill_netmem_desc(skb, i, page_to_netmem(head), off: 0, size: psize); | 
|---|
| 2005 | head = (struct page *)page_private(head); | 
|---|
| 2006 | } | 
|---|
| 2007 | __skb_fill_netmem_desc(skb, i: new_frags - 1, page_to_netmem(head), off: 0, | 
|---|
| 2008 | size: d_off); | 
|---|
| 2009 | skb_shinfo(skb)->nr_frags = new_frags; | 
|---|
| 2010 |  | 
|---|
| 2011 | release: | 
|---|
| 2012 | skb_zcopy_clear(skb, zerocopy_success: false); | 
|---|
| 2013 | return 0; | 
|---|
| 2014 | } | 
|---|
| 2015 | EXPORT_SYMBOL_GPL(skb_copy_ubufs); | 
|---|
| 2016 |  | 
|---|
| 2017 | /** | 
|---|
| 2018 | *	skb_clone	-	duplicate an sk_buff | 
|---|
| 2019 | *	@skb: buffer to clone | 
|---|
| 2020 | *	@gfp_mask: allocation priority | 
|---|
| 2021 | * | 
|---|
| 2022 | *	Duplicate an &sk_buff. The new one is not owned by a socket. Both | 
|---|
| 2023 | *	copies share the same packet data but not structure. The new | 
|---|
| 2024 | *	buffer has a reference count of 1. If the allocation fails the | 
|---|
| 2025 | *	function returns %NULL otherwise the new buffer is returned. | 
|---|
| 2026 | * | 
|---|
| 2027 | *	If this function is called from an interrupt gfp_mask() must be | 
|---|
| 2028 | *	%GFP_ATOMIC. | 
|---|
| 2029 | */ | 
|---|
| 2030 |  | 
|---|
| 2031 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | 
|---|
| 2032 | { | 
|---|
| 2033 | struct sk_buff_fclones *fclones = container_of(skb, | 
|---|
| 2034 | struct sk_buff_fclones, | 
|---|
| 2035 | skb1); | 
|---|
| 2036 | struct sk_buff *n; | 
|---|
| 2037 |  | 
|---|
| 2038 | if (skb_orphan_frags(skb, gfp_mask)) | 
|---|
| 2039 | return NULL; | 
|---|
| 2040 |  | 
|---|
| 2041 | if (skb->fclone == SKB_FCLONE_ORIG && | 
|---|
| 2042 | refcount_read(r: &fclones->fclone_ref) == 1) { | 
|---|
| 2043 | n = &fclones->skb2; | 
|---|
| 2044 | refcount_set(r: &fclones->fclone_ref, n: 2); | 
|---|
| 2045 | n->fclone = SKB_FCLONE_CLONE; | 
|---|
| 2046 | } else { | 
|---|
| 2047 | if (skb_pfmemalloc(skb)) | 
|---|
| 2048 | gfp_mask |= __GFP_MEMALLOC; | 
|---|
| 2049 |  | 
|---|
| 2050 | n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask); | 
|---|
| 2051 | if (!n) | 
|---|
| 2052 | return NULL; | 
|---|
| 2053 |  | 
|---|
| 2054 | n->fclone = SKB_FCLONE_UNAVAILABLE; | 
|---|
| 2055 | } | 
|---|
| 2056 |  | 
|---|
| 2057 | return __skb_clone(n, skb); | 
|---|
| 2058 | } | 
|---|
| 2059 | EXPORT_SYMBOL(skb_clone); | 
|---|
| 2060 |  | 
|---|
| 2061 | void (struct sk_buff *skb, int off) | 
|---|
| 2062 | { | 
|---|
| 2063 | /* Only adjust this if it actually is csum_start rather than csum */ | 
|---|
| 2064 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 
|---|
| 2065 | skb->csum_start += off; | 
|---|
| 2066 | /* {transport,network,mac}_header and tail are relative to skb->head */ | 
|---|
| 2067 | skb->transport_header += off; | 
|---|
| 2068 | skb->network_header   += off; | 
|---|
| 2069 | if (skb_mac_header_was_set(skb)) | 
|---|
| 2070 | skb->mac_header += off; | 
|---|
| 2071 | skb->inner_transport_header += off; | 
|---|
| 2072 | skb->inner_network_header += off; | 
|---|
| 2073 | skb->inner_mac_header += off; | 
|---|
| 2074 | } | 
|---|
| 2075 | EXPORT_SYMBOL(skb_headers_offset_update); | 
|---|
| 2076 |  | 
|---|
| 2077 | void (struct sk_buff *new, const struct sk_buff *old) | 
|---|
| 2078 | { | 
|---|
| 2079 | __copy_skb_header(new, old); | 
|---|
| 2080 |  | 
|---|
| 2081 | skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; | 
|---|
| 2082 | skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; | 
|---|
| 2083 | skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; | 
|---|
| 2084 | } | 
|---|
| 2085 | EXPORT_SYMBOL(skb_copy_header); | 
|---|
| 2086 |  | 
|---|
| 2087 | static inline int skb_alloc_rx_flag(const struct sk_buff *skb) | 
|---|
| 2088 | { | 
|---|
| 2089 | if (skb_pfmemalloc(skb)) | 
|---|
| 2090 | return SKB_ALLOC_RX; | 
|---|
| 2091 | return 0; | 
|---|
| 2092 | } | 
|---|
| 2093 |  | 
|---|
| 2094 | /** | 
|---|
| 2095 | *	skb_copy	-	create private copy of an sk_buff | 
|---|
| 2096 | *	@skb: buffer to copy | 
|---|
| 2097 | *	@gfp_mask: allocation priority | 
|---|
| 2098 | * | 
|---|
| 2099 | *	Make a copy of both an &sk_buff and its data. This is used when the | 
|---|
| 2100 | *	caller wishes to modify the data and needs a private copy of the | 
|---|
| 2101 | *	data to alter. Returns %NULL on failure or the pointer to the buffer | 
|---|
| 2102 | *	on success. The returned buffer has a reference count of 1. | 
|---|
| 2103 | * | 
|---|
| 2104 | *	As by-product this function converts non-linear &sk_buff to linear | 
|---|
| 2105 | *	one, so that &sk_buff becomes completely private and caller is allowed | 
|---|
| 2106 | *	to modify all the data of returned buffer. This means that this | 
|---|
| 2107 | *	function is not recommended for use in circumstances when only | 
|---|
| 2108 | *	header is going to be modified. Use pskb_copy() instead. | 
|---|
| 2109 | */ | 
|---|
| 2110 |  | 
|---|
| 2111 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) | 
|---|
| 2112 | { | 
|---|
| 2113 | struct sk_buff *n; | 
|---|
| 2114 | unsigned int size; | 
|---|
| 2115 | int ; | 
|---|
| 2116 |  | 
|---|
| 2117 | if (!skb_frags_readable(skb)) | 
|---|
| 2118 | return NULL; | 
|---|
| 2119 |  | 
|---|
| 2120 | if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) | 
|---|
| 2121 | return NULL; | 
|---|
| 2122 |  | 
|---|
| 2123 | headerlen = skb_headroom(skb); | 
|---|
| 2124 | size = skb_end_offset(skb) + skb->data_len; | 
|---|
| 2125 | n = __alloc_skb(size, gfp_mask, | 
|---|
| 2126 | skb_alloc_rx_flag(skb), NUMA_NO_NODE); | 
|---|
| 2127 | if (!n) | 
|---|
| 2128 | return NULL; | 
|---|
| 2129 |  | 
|---|
| 2130 | /* Set the data pointer */ | 
|---|
| 2131 | skb_reserve(skb: n, len: headerlen); | 
|---|
| 2132 | /* Set the tail pointer and length */ | 
|---|
| 2133 | skb_put(skb: n, len: skb->len); | 
|---|
| 2134 |  | 
|---|
| 2135 | BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); | 
|---|
| 2136 |  | 
|---|
| 2137 | skb_copy_header(n, skb); | 
|---|
| 2138 | return n; | 
|---|
| 2139 | } | 
|---|
| 2140 | EXPORT_SYMBOL(skb_copy); | 
|---|
| 2141 |  | 
|---|
| 2142 | /** | 
|---|
| 2143 | *	__pskb_copy_fclone	-  create copy of an sk_buff with private head. | 
|---|
| 2144 | *	@skb: buffer to copy | 
|---|
| 2145 | *	@headroom: headroom of new skb | 
|---|
| 2146 | *	@gfp_mask: allocation priority | 
|---|
| 2147 | *	@fclone: if true allocate the copy of the skb from the fclone | 
|---|
| 2148 | *	cache instead of the head cache; it is recommended to set this | 
|---|
| 2149 | *	to true for the cases where the copy will likely be cloned | 
|---|
| 2150 | * | 
|---|
| 2151 | *	Make a copy of both an &sk_buff and part of its data, located | 
|---|
| 2152 | *	in header. Fragmented data remain shared. This is used when | 
|---|
| 2153 | *	the caller wishes to modify only header of &sk_buff and needs | 
|---|
| 2154 | *	private copy of the header to alter. Returns %NULL on failure | 
|---|
| 2155 | *	or the pointer to the buffer on success. | 
|---|
| 2156 | *	The returned buffer has a reference count of 1. | 
|---|
| 2157 | */ | 
|---|
| 2158 |  | 
|---|
| 2159 | struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, | 
|---|
| 2160 | gfp_t gfp_mask, bool fclone) | 
|---|
| 2161 | { | 
|---|
| 2162 | unsigned int size = skb_headlen(skb) + headroom; | 
|---|
| 2163 | int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); | 
|---|
| 2164 | struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); | 
|---|
| 2165 |  | 
|---|
| 2166 | if (!n) | 
|---|
| 2167 | goto out; | 
|---|
| 2168 |  | 
|---|
| 2169 | /* Set the data pointer */ | 
|---|
| 2170 | skb_reserve(skb: n, len: headroom); | 
|---|
| 2171 | /* Set the tail pointer and length */ | 
|---|
| 2172 | skb_put(skb: n, len: skb_headlen(skb)); | 
|---|
| 2173 | /* Copy the bytes */ | 
|---|
| 2174 | skb_copy_from_linear_data(skb, to: n->data, len: n->len); | 
|---|
| 2175 |  | 
|---|
| 2176 | n->truesize += skb->data_len; | 
|---|
| 2177 | n->data_len  = skb->data_len; | 
|---|
| 2178 | n->len	     = skb->len; | 
|---|
| 2179 |  | 
|---|
| 2180 | if (skb_shinfo(skb)->nr_frags) { | 
|---|
| 2181 | int i; | 
|---|
| 2182 |  | 
|---|
| 2183 | if (skb_orphan_frags(skb, gfp_mask) || | 
|---|
| 2184 | skb_zerocopy_clone(nskb: n, orig: skb, gfp_mask)) { | 
|---|
| 2185 | kfree_skb(skb: n); | 
|---|
| 2186 | n = NULL; | 
|---|
| 2187 | goto out; | 
|---|
| 2188 | } | 
|---|
| 2189 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|---|
| 2190 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; | 
|---|
| 2191 | skb_frag_ref(skb, f: i); | 
|---|
| 2192 | } | 
|---|
| 2193 | skb_shinfo(n)->nr_frags = i; | 
|---|
| 2194 | } | 
|---|
| 2195 |  | 
|---|
| 2196 | if (skb_has_frag_list(skb)) { | 
|---|
| 2197 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; | 
|---|
| 2198 | skb_clone_fraglist(skb: n); | 
|---|
| 2199 | } | 
|---|
| 2200 |  | 
|---|
| 2201 | skb_copy_header(n, skb); | 
|---|
| 2202 | out: | 
|---|
| 2203 | return n; | 
|---|
| 2204 | } | 
|---|
| 2205 | EXPORT_SYMBOL(__pskb_copy_fclone); | 
|---|
| 2206 |  | 
|---|
| 2207 | /** | 
|---|
| 2208 | *	pskb_expand_head - reallocate header of &sk_buff | 
|---|
| 2209 | *	@skb: buffer to reallocate | 
|---|
| 2210 | *	@nhead: room to add at head | 
|---|
| 2211 | *	@ntail: room to add at tail | 
|---|
| 2212 | *	@gfp_mask: allocation priority | 
|---|
| 2213 | * | 
|---|
| 2214 | *	Expands (or creates identical copy, if @nhead and @ntail are zero) | 
|---|
| 2215 | *	header of @skb. &sk_buff itself is not changed. &sk_buff MUST have | 
|---|
| 2216 | *	reference count of 1. Returns zero in the case of success or error, | 
|---|
| 2217 | *	if expansion failed. In the last case, &sk_buff is not changed. | 
|---|
| 2218 | * | 
|---|
| 2219 | *	All the pointers pointing into skb header may change and must be | 
|---|
| 2220 | *	reloaded after call to this function. | 
|---|
| 2221 | */ | 
|---|
| 2222 |  | 
|---|
| 2223 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | 
|---|
| 2224 | gfp_t gfp_mask) | 
|---|
| 2225 | { | 
|---|
| 2226 | unsigned int osize = skb_end_offset(skb); | 
|---|
| 2227 | unsigned int size = osize + nhead + ntail; | 
|---|
| 2228 | long off; | 
|---|
| 2229 | u8 *data; | 
|---|
| 2230 | int i; | 
|---|
| 2231 |  | 
|---|
| 2232 | BUG_ON(nhead < 0); | 
|---|
| 2233 |  | 
|---|
| 2234 | BUG_ON(skb_shared(skb)); | 
|---|
| 2235 |  | 
|---|
| 2236 | skb_zcopy_downgrade_managed(skb); | 
|---|
| 2237 |  | 
|---|
| 2238 | if (skb_pfmemalloc(skb)) | 
|---|
| 2239 | gfp_mask |= __GFP_MEMALLOC; | 
|---|
| 2240 |  | 
|---|
| 2241 | data = kmalloc_reserve(size: &size, flags: gfp_mask, NUMA_NO_NODE, NULL); | 
|---|
| 2242 | if (!data) | 
|---|
| 2243 | goto nodata; | 
|---|
| 2244 | size = SKB_WITH_OVERHEAD(size); | 
|---|
| 2245 |  | 
|---|
| 2246 | /* Copy only real data... and, alas, header. This should be | 
|---|
| 2247 | * optimized for the cases when header is void. | 
|---|
| 2248 | */ | 
|---|
| 2249 | memcpy(to: data + nhead, from: skb->head, len: skb_tail_pointer(skb) - skb->head); | 
|---|
| 2250 |  | 
|---|
| 2251 | memcpy(to: (struct skb_shared_info *)(data + size), | 
|---|
| 2252 | skb_shinfo(skb), | 
|---|
| 2253 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); | 
|---|
| 2254 |  | 
|---|
| 2255 | /* | 
|---|
| 2256 | * if shinfo is shared we must drop the old head gracefully, but if it | 
|---|
| 2257 | * is not we can just drop the old head and let the existing refcount | 
|---|
| 2258 | * be since all we did is relocate the values | 
|---|
| 2259 | */ | 
|---|
| 2260 | if (skb_cloned(skb)) { | 
|---|
| 2261 | if (skb_orphan_frags(skb, gfp_mask)) | 
|---|
| 2262 | goto nofrags; | 
|---|
| 2263 | if (skb_zcopy(skb)) | 
|---|
| 2264 | refcount_inc(r: &skb_uarg(skb)->refcnt); | 
|---|
| 2265 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 
|---|
| 2266 | skb_frag_ref(skb, f: i); | 
|---|
| 2267 |  | 
|---|
| 2268 | if (skb_has_frag_list(skb)) | 
|---|
| 2269 | skb_clone_fraglist(skb); | 
|---|
| 2270 |  | 
|---|
| 2271 | skb_release_data(skb, reason: SKB_CONSUMED); | 
|---|
| 2272 | } else { | 
|---|
| 2273 | skb_free_head(skb); | 
|---|
| 2274 | } | 
|---|
| 2275 | off = (data + nhead) - skb->head; | 
|---|
| 2276 |  | 
|---|
| 2277 | skb->head     = data; | 
|---|
| 2278 | skb->head_frag = 0; | 
|---|
| 2279 | skb->data    += off; | 
|---|
| 2280 |  | 
|---|
| 2281 | skb_set_end_offset(skb, offset: size); | 
|---|
| 2282 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | 
|---|
| 2283 | off           = nhead; | 
|---|
| 2284 | #endif | 
|---|
| 2285 | skb->tail	      += off; | 
|---|
| 2286 | skb_headers_offset_update(skb, nhead); | 
|---|
| 2287 | skb->cloned   = 0; | 
|---|
| 2288 | skb->hdr_len  = 0; | 
|---|
| 2289 | skb->nohdr    = 0; | 
|---|
| 2290 | atomic_set(v: &skb_shinfo(skb)->dataref, i: 1); | 
|---|
| 2291 |  | 
|---|
| 2292 | skb_metadata_clear(skb); | 
|---|
| 2293 |  | 
|---|
| 2294 | /* It is not generally safe to change skb->truesize. | 
|---|
| 2295 | * For the moment, we really care of rx path, or | 
|---|
| 2296 | * when skb is orphaned (not attached to a socket). | 
|---|
| 2297 | */ | 
|---|
| 2298 | if (!skb->sk || skb->destructor == sock_edemux) | 
|---|
| 2299 | skb->truesize += size - osize; | 
|---|
| 2300 |  | 
|---|
| 2301 | return 0; | 
|---|
| 2302 |  | 
|---|
| 2303 | nofrags: | 
|---|
| 2304 | skb_kfree_head(head: data, end_offset: size); | 
|---|
| 2305 | nodata: | 
|---|
| 2306 | return -ENOMEM; | 
|---|
| 2307 | } | 
|---|
| 2308 | EXPORT_SYMBOL(pskb_expand_head); | 
|---|
| 2309 |  | 
|---|
| 2310 | /* Make private copy of skb with writable head and some headroom */ | 
|---|
| 2311 |  | 
|---|
| 2312 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) | 
|---|
| 2313 | { | 
|---|
| 2314 | struct sk_buff *skb2; | 
|---|
| 2315 | int delta = headroom - skb_headroom(skb); | 
|---|
| 2316 |  | 
|---|
| 2317 | if (delta <= 0) | 
|---|
| 2318 | skb2 = pskb_copy(skb, GFP_ATOMIC); | 
|---|
| 2319 | else { | 
|---|
| 2320 | skb2 = skb_clone(skb, GFP_ATOMIC); | 
|---|
| 2321 | if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, | 
|---|
| 2322 | GFP_ATOMIC)) { | 
|---|
| 2323 | kfree_skb(skb: skb2); | 
|---|
| 2324 | skb2 = NULL; | 
|---|
| 2325 | } | 
|---|
| 2326 | } | 
|---|
| 2327 | return skb2; | 
|---|
| 2328 | } | 
|---|
| 2329 | EXPORT_SYMBOL(skb_realloc_headroom); | 
|---|
| 2330 |  | 
|---|
| 2331 | /* Note: We plan to rework this in linux-6.4 */ | 
|---|
| 2332 | int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) | 
|---|
| 2333 | { | 
|---|
| 2334 | unsigned int saved_end_offset, saved_truesize; | 
|---|
| 2335 | struct skb_shared_info *shinfo; | 
|---|
| 2336 | int res; | 
|---|
| 2337 |  | 
|---|
| 2338 | saved_end_offset = skb_end_offset(skb); | 
|---|
| 2339 | saved_truesize = skb->truesize; | 
|---|
| 2340 |  | 
|---|
| 2341 | res = pskb_expand_head(skb, 0, 0, pri); | 
|---|
| 2342 | if (res) | 
|---|
| 2343 | return res; | 
|---|
| 2344 |  | 
|---|
| 2345 | skb->truesize = saved_truesize; | 
|---|
| 2346 |  | 
|---|
| 2347 | if (likely(skb_end_offset(skb) == saved_end_offset)) | 
|---|
| 2348 | return 0; | 
|---|
| 2349 |  | 
|---|
| 2350 | /* We can not change skb->end if the original or new value | 
|---|
| 2351 | * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). | 
|---|
| 2352 | */ | 
|---|
| 2353 | if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM || | 
|---|
| 2354 | skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) { | 
|---|
| 2355 | /* We think this path should not be taken. | 
|---|
| 2356 | * Add a temporary trace to warn us just in case. | 
|---|
| 2357 | */ | 
|---|
| 2358 | pr_err_once( "__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n", | 
|---|
| 2359 | saved_end_offset, skb_end_offset(skb)); | 
|---|
| 2360 | WARN_ON_ONCE(1); | 
|---|
| 2361 | return 0; | 
|---|
| 2362 | } | 
|---|
| 2363 |  | 
|---|
| 2364 | shinfo = skb_shinfo(skb); | 
|---|
| 2365 |  | 
|---|
| 2366 | /* We are about to change back skb->end, | 
|---|
| 2367 | * we need to move skb_shinfo() to its new location. | 
|---|
| 2368 | */ | 
|---|
| 2369 | memmove(dest: skb->head + saved_end_offset, | 
|---|
| 2370 | src: shinfo, | 
|---|
| 2371 | offsetof(struct skb_shared_info, frags[shinfo->nr_frags])); | 
|---|
| 2372 |  | 
|---|
| 2373 | skb_set_end_offset(skb, offset: saved_end_offset); | 
|---|
| 2374 |  | 
|---|
| 2375 | return 0; | 
|---|
| 2376 | } | 
|---|
| 2377 |  | 
|---|
| 2378 | /** | 
|---|
| 2379 | *	skb_expand_head - reallocate header of &sk_buff | 
|---|
| 2380 | *	@skb: buffer to reallocate | 
|---|
| 2381 | *	@headroom: needed headroom | 
|---|
| 2382 | * | 
|---|
| 2383 | *	Unlike skb_realloc_headroom, this one does not allocate a new skb | 
|---|
| 2384 | *	if possible; copies skb->sk to new skb as needed | 
|---|
| 2385 | *	and frees original skb in case of failures. | 
|---|
| 2386 | * | 
|---|
| 2387 | *	It expect increased headroom and generates warning otherwise. | 
|---|
| 2388 | */ | 
|---|
| 2389 |  | 
|---|
| 2390 | struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) | 
|---|
| 2391 | { | 
|---|
| 2392 | int delta = headroom - skb_headroom(skb); | 
|---|
| 2393 | int osize = skb_end_offset(skb); | 
|---|
| 2394 | struct sock *sk = skb->sk; | 
|---|
| 2395 |  | 
|---|
| 2396 | if (WARN_ONCE(delta <= 0, | 
|---|
| 2397 | "%s is expecting an increase in the headroom", __func__)) | 
|---|
| 2398 | return skb; | 
|---|
| 2399 |  | 
|---|
| 2400 | delta = SKB_DATA_ALIGN(delta); | 
|---|
| 2401 | /* pskb_expand_head() might crash, if skb is shared. */ | 
|---|
| 2402 | if (skb_shared(skb) || !is_skb_wmem(skb)) { | 
|---|
| 2403 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | 
|---|
| 2404 |  | 
|---|
| 2405 | if (unlikely(!nskb)) | 
|---|
| 2406 | goto fail; | 
|---|
| 2407 |  | 
|---|
| 2408 | if (sk) | 
|---|
| 2409 | skb_set_owner_w(skb: nskb, sk); | 
|---|
| 2410 | consume_skb(skb); | 
|---|
| 2411 | skb = nskb; | 
|---|
| 2412 | } | 
|---|
| 2413 | if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) | 
|---|
| 2414 | goto fail; | 
|---|
| 2415 |  | 
|---|
| 2416 | if (sk && is_skb_wmem(skb)) { | 
|---|
| 2417 | delta = skb_end_offset(skb) - osize; | 
|---|
| 2418 | refcount_add(i: delta, r: &sk->sk_wmem_alloc); | 
|---|
| 2419 | skb->truesize += delta; | 
|---|
| 2420 | } | 
|---|
| 2421 | return skb; | 
|---|
| 2422 |  | 
|---|
| 2423 | fail: | 
|---|
| 2424 | kfree_skb(skb); | 
|---|
| 2425 | return NULL; | 
|---|
| 2426 | } | 
|---|
| 2427 | EXPORT_SYMBOL(skb_expand_head); | 
|---|
| 2428 |  | 
|---|
| 2429 | /** | 
|---|
| 2430 | *	skb_copy_expand	-	copy and expand sk_buff | 
|---|
| 2431 | *	@skb: buffer to copy | 
|---|
| 2432 | *	@newheadroom: new free bytes at head | 
|---|
| 2433 | *	@newtailroom: new free bytes at tail | 
|---|
| 2434 | *	@gfp_mask: allocation priority | 
|---|
| 2435 | * | 
|---|
| 2436 | *	Make a copy of both an &sk_buff and its data and while doing so | 
|---|
| 2437 | *	allocate additional space. | 
|---|
| 2438 | * | 
|---|
| 2439 | *	This is used when the caller wishes to modify the data and needs a | 
|---|
| 2440 | *	private copy of the data to alter as well as more space for new fields. | 
|---|
| 2441 | *	Returns %NULL on failure or the pointer to the buffer | 
|---|
| 2442 | *	on success. The returned buffer has a reference count of 1. | 
|---|
| 2443 | * | 
|---|
| 2444 | *	You must pass %GFP_ATOMIC as the allocation priority if this function | 
|---|
| 2445 | *	is called from an interrupt. | 
|---|
| 2446 | */ | 
|---|
| 2447 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | 
|---|
| 2448 | int newheadroom, int newtailroom, | 
|---|
| 2449 | gfp_t gfp_mask) | 
|---|
| 2450 | { | 
|---|
| 2451 | /* | 
|---|
| 2452 | *	Allocate the copy buffer | 
|---|
| 2453 | */ | 
|---|
| 2454 | int head_copy_len, head_copy_off; | 
|---|
| 2455 | struct sk_buff *n; | 
|---|
| 2456 | int oldheadroom; | 
|---|
| 2457 |  | 
|---|
| 2458 | if (!skb_frags_readable(skb)) | 
|---|
| 2459 | return NULL; | 
|---|
| 2460 |  | 
|---|
| 2461 | if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) | 
|---|
| 2462 | return NULL; | 
|---|
| 2463 |  | 
|---|
| 2464 | oldheadroom = skb_headroom(skb); | 
|---|
| 2465 | n = __alloc_skb(newheadroom + skb->len + newtailroom, | 
|---|
| 2466 | gfp_mask, skb_alloc_rx_flag(skb), | 
|---|
| 2467 | NUMA_NO_NODE); | 
|---|
| 2468 | if (!n) | 
|---|
| 2469 | return NULL; | 
|---|
| 2470 |  | 
|---|
| 2471 | skb_reserve(skb: n, len: newheadroom); | 
|---|
| 2472 |  | 
|---|
| 2473 | /* Set the tail pointer and length */ | 
|---|
| 2474 | skb_put(skb: n, len: skb->len); | 
|---|
| 2475 |  | 
|---|
| 2476 | head_copy_len = oldheadroom; | 
|---|
| 2477 | head_copy_off = 0; | 
|---|
| 2478 | if (newheadroom <= head_copy_len) | 
|---|
| 2479 | head_copy_len = newheadroom; | 
|---|
| 2480 | else | 
|---|
| 2481 | head_copy_off = newheadroom - head_copy_len; | 
|---|
| 2482 |  | 
|---|
| 2483 | /* Copy the linear header and data. */ | 
|---|
| 2484 | BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, | 
|---|
| 2485 | skb->len + head_copy_len)); | 
|---|
| 2486 |  | 
|---|
| 2487 | skb_copy_header(n, skb); | 
|---|
| 2488 |  | 
|---|
| 2489 | skb_headers_offset_update(n, newheadroom - oldheadroom); | 
|---|
| 2490 |  | 
|---|
| 2491 | return n; | 
|---|
| 2492 | } | 
|---|
| 2493 | EXPORT_SYMBOL(skb_copy_expand); | 
|---|
| 2494 |  | 
|---|
| 2495 | /** | 
|---|
| 2496 | *	__skb_pad		-	zero pad the tail of an skb | 
|---|
| 2497 | *	@skb: buffer to pad | 
|---|
| 2498 | *	@pad: space to pad | 
|---|
| 2499 | *	@free_on_error: free buffer on error | 
|---|
| 2500 | * | 
|---|
| 2501 | *	Ensure that a buffer is followed by a padding area that is zero | 
|---|
| 2502 | *	filled. Used by network drivers which may DMA or transfer data | 
|---|
| 2503 | *	beyond the buffer end onto the wire. | 
|---|
| 2504 | * | 
|---|
| 2505 | *	May return error in out of memory cases. The skb is freed on error | 
|---|
| 2506 | *	if @free_on_error is true. | 
|---|
| 2507 | */ | 
|---|
| 2508 |  | 
|---|
| 2509 | int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) | 
|---|
| 2510 | { | 
|---|
| 2511 | int err; | 
|---|
| 2512 | int ntail; | 
|---|
| 2513 |  | 
|---|
| 2514 | /* If the skbuff is non linear tailroom is always zero.. */ | 
|---|
| 2515 | if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { | 
|---|
| 2516 | memset(s: skb->data+skb->len, c: 0, n: pad); | 
|---|
| 2517 | return 0; | 
|---|
| 2518 | } | 
|---|
| 2519 |  | 
|---|
| 2520 | ntail = skb->data_len + pad - (skb->end - skb->tail); | 
|---|
| 2521 | if (likely(skb_cloned(skb) || ntail > 0)) { | 
|---|
| 2522 | err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); | 
|---|
| 2523 | if (unlikely(err)) | 
|---|
| 2524 | goto free_skb; | 
|---|
| 2525 | } | 
|---|
| 2526 |  | 
|---|
| 2527 | /* FIXME: The use of this function with non-linear skb's really needs | 
|---|
| 2528 | * to be audited. | 
|---|
| 2529 | */ | 
|---|
| 2530 | err = skb_linearize(skb); | 
|---|
| 2531 | if (unlikely(err)) | 
|---|
| 2532 | goto free_skb; | 
|---|
| 2533 |  | 
|---|
| 2534 | memset(s: skb->data + skb->len, c: 0, n: pad); | 
|---|
| 2535 | return 0; | 
|---|
| 2536 |  | 
|---|
| 2537 | free_skb: | 
|---|
| 2538 | if (free_on_error) | 
|---|
| 2539 | kfree_skb(skb); | 
|---|
| 2540 | return err; | 
|---|
| 2541 | } | 
|---|
| 2542 | EXPORT_SYMBOL(__skb_pad); | 
|---|
| 2543 |  | 
|---|
| 2544 | /** | 
|---|
| 2545 | *	pskb_put - add data to the tail of a potentially fragmented buffer | 
|---|
| 2546 | *	@skb: start of the buffer to use | 
|---|
| 2547 | *	@tail: tail fragment of the buffer to use | 
|---|
| 2548 | *	@len: amount of data to add | 
|---|
| 2549 | * | 
|---|
| 2550 | *	This function extends the used data area of the potentially | 
|---|
| 2551 | *	fragmented buffer. @tail must be the last fragment of @skb -- or | 
|---|
| 2552 | *	@skb itself. If this would exceed the total buffer size the kernel | 
|---|
| 2553 | *	will panic. A pointer to the first byte of the extra data is | 
|---|
| 2554 | *	returned. | 
|---|
| 2555 | */ | 
|---|
| 2556 |  | 
|---|
| 2557 | void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) | 
|---|
| 2558 | { | 
|---|
| 2559 | if (tail != skb) { | 
|---|
| 2560 | skb->data_len += len; | 
|---|
| 2561 | skb->len += len; | 
|---|
| 2562 | } | 
|---|
| 2563 | return skb_put(skb: tail, len); | 
|---|
| 2564 | } | 
|---|
| 2565 | EXPORT_SYMBOL_GPL(pskb_put); | 
|---|
| 2566 |  | 
|---|
| 2567 | /** | 
|---|
| 2568 | *	skb_put - add data to a buffer | 
|---|
| 2569 | *	@skb: buffer to use | 
|---|
| 2570 | *	@len: amount of data to add | 
|---|
| 2571 | * | 
|---|
| 2572 | *	This function extends the used data area of the buffer. If this would | 
|---|
| 2573 | *	exceed the total buffer size the kernel will panic. A pointer to the | 
|---|
| 2574 | *	first byte of the extra data is returned. | 
|---|
| 2575 | */ | 
|---|
| 2576 | void *skb_put(struct sk_buff *skb, unsigned int len) | 
|---|
| 2577 | { | 
|---|
| 2578 | void *tmp = skb_tail_pointer(skb); | 
|---|
| 2579 | SKB_LINEAR_ASSERT(skb); | 
|---|
| 2580 | skb->tail += len; | 
|---|
| 2581 | skb->len  += len; | 
|---|
| 2582 | if (unlikely(skb->tail > skb->end)) | 
|---|
| 2583 | skb_over_panic(skb, sz: len, addr: __builtin_return_address(0)); | 
|---|
| 2584 | return tmp; | 
|---|
| 2585 | } | 
|---|
| 2586 | EXPORT_SYMBOL(skb_put); | 
|---|
| 2587 |  | 
|---|
| 2588 | /** | 
|---|
| 2589 | *	skb_push - add data to the start of a buffer | 
|---|
| 2590 | *	@skb: buffer to use | 
|---|
| 2591 | *	@len: amount of data to add | 
|---|
| 2592 | * | 
|---|
| 2593 | *	This function extends the used data area of the buffer at the buffer | 
|---|
| 2594 | *	start. If this would exceed the total buffer headroom the kernel will | 
|---|
| 2595 | *	panic. A pointer to the first byte of the extra data is returned. | 
|---|
| 2596 | */ | 
|---|
| 2597 | void *skb_push(struct sk_buff *skb, unsigned int len) | 
|---|
| 2598 | { | 
|---|
| 2599 | skb->data -= len; | 
|---|
| 2600 | skb->len  += len; | 
|---|
| 2601 | if (unlikely(skb->data < skb->head)) | 
|---|
| 2602 | skb_under_panic(skb, sz: len, addr: __builtin_return_address(0)); | 
|---|
| 2603 | return skb->data; | 
|---|
| 2604 | } | 
|---|
| 2605 | EXPORT_SYMBOL(skb_push); | 
|---|
| 2606 |  | 
|---|
| 2607 | /** | 
|---|
| 2608 | *	skb_pull - remove data from the start of a buffer | 
|---|
| 2609 | *	@skb: buffer to use | 
|---|
| 2610 | *	@len: amount of data to remove | 
|---|
| 2611 | * | 
|---|
| 2612 | *	This function removes data from the start of a buffer, returning | 
|---|
| 2613 | *	the memory to the headroom. A pointer to the next data in the buffer | 
|---|
| 2614 | *	is returned. Once the data has been pulled future pushes will overwrite | 
|---|
| 2615 | *	the old data. | 
|---|
| 2616 | */ | 
|---|
| 2617 | void *skb_pull(struct sk_buff *skb, unsigned int len) | 
|---|
| 2618 | { | 
|---|
| 2619 | return skb_pull_inline(skb, len); | 
|---|
| 2620 | } | 
|---|
| 2621 | EXPORT_SYMBOL(skb_pull); | 
|---|
| 2622 |  | 
|---|
| 2623 | /** | 
|---|
| 2624 | *	skb_pull_data - remove data from the start of a buffer returning its | 
|---|
| 2625 | *	original position. | 
|---|
| 2626 | *	@skb: buffer to use | 
|---|
| 2627 | *	@len: amount of data to remove | 
|---|
| 2628 | * | 
|---|
| 2629 | *	This function removes data from the start of a buffer, returning | 
|---|
| 2630 | *	the memory to the headroom. A pointer to the original data in the buffer | 
|---|
| 2631 | *	is returned after checking if there is enough data to pull. Once the | 
|---|
| 2632 | *	data has been pulled future pushes will overwrite the old data. | 
|---|
| 2633 | */ | 
|---|
| 2634 | void *skb_pull_data(struct sk_buff *skb, size_t len) | 
|---|
| 2635 | { | 
|---|
| 2636 | void *data = skb->data; | 
|---|
| 2637 |  | 
|---|
| 2638 | if (skb->len < len) | 
|---|
| 2639 | return NULL; | 
|---|
| 2640 |  | 
|---|
| 2641 | skb_pull(skb, len); | 
|---|
| 2642 |  | 
|---|
| 2643 | return data; | 
|---|
| 2644 | } | 
|---|
| 2645 | EXPORT_SYMBOL(skb_pull_data); | 
|---|
| 2646 |  | 
|---|
| 2647 | /** | 
|---|
| 2648 | *	skb_trim - remove end from a buffer | 
|---|
| 2649 | *	@skb: buffer to alter | 
|---|
| 2650 | *	@len: new length | 
|---|
| 2651 | * | 
|---|
| 2652 | *	Cut the length of a buffer down by removing data from the tail. If | 
|---|
| 2653 | *	the buffer is already under the length specified it is not modified. | 
|---|
| 2654 | *	The skb must be linear. | 
|---|
| 2655 | */ | 
|---|
| 2656 | void skb_trim(struct sk_buff *skb, unsigned int len) | 
|---|
| 2657 | { | 
|---|
| 2658 | if (skb->len > len) | 
|---|
| 2659 | __skb_trim(skb, len); | 
|---|
| 2660 | } | 
|---|
| 2661 | EXPORT_SYMBOL(skb_trim); | 
|---|
| 2662 |  | 
|---|
| 2663 | /* Trims skb to length len. It can change skb pointers. | 
|---|
| 2664 | */ | 
|---|
| 2665 |  | 
|---|
| 2666 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) | 
|---|
| 2667 | { | 
|---|
| 2668 | struct sk_buff **fragp; | 
|---|
| 2669 | struct sk_buff *frag; | 
|---|
| 2670 | int offset = skb_headlen(skb); | 
|---|
| 2671 | int nfrags = skb_shinfo(skb)->nr_frags; | 
|---|
| 2672 | int i; | 
|---|
| 2673 | int err; | 
|---|
| 2674 |  | 
|---|
| 2675 | if (skb_cloned(skb) && | 
|---|
| 2676 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) | 
|---|
| 2677 | return err; | 
|---|
| 2678 |  | 
|---|
| 2679 | i = 0; | 
|---|
| 2680 | if (offset >= len) | 
|---|
| 2681 | goto drop_pages; | 
|---|
| 2682 |  | 
|---|
| 2683 | for (; i < nfrags; i++) { | 
|---|
| 2684 | int end = offset + skb_frag_size(frag: &skb_shinfo(skb)->frags[i]); | 
|---|
| 2685 |  | 
|---|
| 2686 | if (end < len) { | 
|---|
| 2687 | offset = end; | 
|---|
| 2688 | continue; | 
|---|
| 2689 | } | 
|---|
| 2690 |  | 
|---|
| 2691 | skb_frag_size_set(frag: &skb_shinfo(skb)->frags[i++], size: len - offset); | 
|---|
| 2692 |  | 
|---|
| 2693 | drop_pages: | 
|---|
| 2694 | skb_shinfo(skb)->nr_frags = i; | 
|---|
| 2695 |  | 
|---|
| 2696 | for (; i < nfrags; i++) | 
|---|
| 2697 | skb_frag_unref(skb, f: i); | 
|---|
| 2698 |  | 
|---|
| 2699 | if (skb_has_frag_list(skb)) | 
|---|
| 2700 | skb_drop_fraglist(skb); | 
|---|
| 2701 | goto done; | 
|---|
| 2702 | } | 
|---|
| 2703 |  | 
|---|
| 2704 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); | 
|---|
| 2705 | fragp = &frag->next) { | 
|---|
| 2706 | int end = offset + frag->len; | 
|---|
| 2707 |  | 
|---|
| 2708 | if (skb_shared(skb: frag)) { | 
|---|
| 2709 | struct sk_buff *nfrag; | 
|---|
| 2710 |  | 
|---|
| 2711 | nfrag = skb_clone(frag, GFP_ATOMIC); | 
|---|
| 2712 | if (unlikely(!nfrag)) | 
|---|
| 2713 | return -ENOMEM; | 
|---|
| 2714 |  | 
|---|
| 2715 | nfrag->next = frag->next; | 
|---|
| 2716 | consume_skb(frag); | 
|---|
| 2717 | frag = nfrag; | 
|---|
| 2718 | *fragp = frag; | 
|---|
| 2719 | } | 
|---|
| 2720 |  | 
|---|
| 2721 | if (end < len) { | 
|---|
| 2722 | offset = end; | 
|---|
| 2723 | continue; | 
|---|
| 2724 | } | 
|---|
| 2725 |  | 
|---|
| 2726 | if (end > len && | 
|---|
| 2727 | unlikely((err = pskb_trim(frag, len - offset)))) | 
|---|
| 2728 | return err; | 
|---|
| 2729 |  | 
|---|
| 2730 | if (frag->next) | 
|---|
| 2731 | skb_drop_list(listp: &frag->next); | 
|---|
| 2732 | break; | 
|---|
| 2733 | } | 
|---|
| 2734 |  | 
|---|
| 2735 | done: | 
|---|
| 2736 | if (len > skb_headlen(skb)) { | 
|---|
| 2737 | skb->data_len -= skb->len - len; | 
|---|
| 2738 | skb->len       = len; | 
|---|
| 2739 | } else { | 
|---|
| 2740 | skb->len       = len; | 
|---|
| 2741 | skb->data_len  = 0; | 
|---|
| 2742 | skb_set_tail_pointer(skb, offset: len); | 
|---|
| 2743 | } | 
|---|
| 2744 |  | 
|---|
| 2745 | if (!skb->sk || skb->destructor == sock_edemux) | 
|---|
| 2746 | skb_condense(skb); | 
|---|
| 2747 | return 0; | 
|---|
| 2748 | } | 
|---|
| 2749 | EXPORT_SYMBOL(___pskb_trim); | 
|---|
| 2750 |  | 
|---|
| 2751 | /* Note : use pskb_trim_rcsum() instead of calling this directly | 
|---|
| 2752 | */ | 
|---|
| 2753 | int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) | 
|---|
| 2754 | { | 
|---|
| 2755 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 
|---|
| 2756 | int delta = skb->len - len; | 
|---|
| 2757 |  | 
|---|
| 2758 | skb->csum = csum_block_sub(csum: skb->csum, | 
|---|
| 2759 | csum2: skb_checksum(skb, offset: len, len: delta, csum: 0), | 
|---|
| 2760 | offset: len); | 
|---|
| 2761 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 
|---|
| 2762 | int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; | 
|---|
| 2763 | int offset = skb_checksum_start_offset(skb) + skb->csum_offset; | 
|---|
| 2764 |  | 
|---|
| 2765 | if (offset + sizeof(__sum16) > hdlen) | 
|---|
| 2766 | return -EINVAL; | 
|---|
| 2767 | } | 
|---|
| 2768 | return __pskb_trim(skb, len); | 
|---|
| 2769 | } | 
|---|
| 2770 | EXPORT_SYMBOL(pskb_trim_rcsum_slow); | 
|---|
| 2771 |  | 
|---|
| 2772 | /** | 
|---|
| 2773 | *	__pskb_pull_tail - advance tail of skb header | 
|---|
| 2774 | *	@skb: buffer to reallocate | 
|---|
| 2775 | *	@delta: number of bytes to advance tail | 
|---|
| 2776 | * | 
|---|
| 2777 | *	The function makes a sense only on a fragmented &sk_buff, | 
|---|
| 2778 | *	it expands header moving its tail forward and copying necessary | 
|---|
| 2779 | *	data from fragmented part. | 
|---|
| 2780 | * | 
|---|
| 2781 | *	&sk_buff MUST have reference count of 1. | 
|---|
| 2782 | * | 
|---|
| 2783 | *	Returns %NULL (and &sk_buff does not change) if pull failed | 
|---|
| 2784 | *	or value of new tail of skb in the case of success. | 
|---|
| 2785 | * | 
|---|
| 2786 | *	All the pointers pointing into skb header may change and must be | 
|---|
| 2787 | *	reloaded after call to this function. | 
|---|
| 2788 | */ | 
|---|
| 2789 |  | 
|---|
| 2790 | /* Moves tail of skb head forward, copying data from fragmented part, | 
|---|
| 2791 | * when it is necessary. | 
|---|
| 2792 | * 1. It may fail due to malloc failure. | 
|---|
| 2793 | * 2. It may change skb pointers. | 
|---|
| 2794 | * | 
|---|
| 2795 | * It is pretty complicated. Luckily, it is called only in exceptional cases. | 
|---|
| 2796 | */ | 
|---|
| 2797 | void *__pskb_pull_tail(struct sk_buff *skb, int delta) | 
|---|
| 2798 | { | 
|---|
| 2799 | /* If skb has not enough free space at tail, get new one | 
|---|
| 2800 | * plus 128 bytes for future expansions. If we have enough | 
|---|
| 2801 | * room at tail, reallocate without expansion only if skb is cloned. | 
|---|
| 2802 | */ | 
|---|
| 2803 | int i, k, eat = (skb->tail + delta) - skb->end; | 
|---|
| 2804 |  | 
|---|
| 2805 | if (!skb_frags_readable(skb)) | 
|---|
| 2806 | return NULL; | 
|---|
| 2807 |  | 
|---|
| 2808 | if (eat > 0 || skb_cloned(skb)) { | 
|---|
| 2809 | if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, | 
|---|
| 2810 | GFP_ATOMIC)) | 
|---|
| 2811 | return NULL; | 
|---|
| 2812 | } | 
|---|
| 2813 |  | 
|---|
| 2814 | BUG_ON(skb_copy_bits(skb, skb_headlen(skb), | 
|---|
| 2815 | skb_tail_pointer(skb), delta)); | 
|---|
| 2816 |  | 
|---|
| 2817 | /* Optimization: no fragments, no reasons to preestimate | 
|---|
| 2818 | * size of pulled pages. Superb. | 
|---|
| 2819 | */ | 
|---|
| 2820 | if (!skb_has_frag_list(skb)) | 
|---|
| 2821 | goto pull_pages; | 
|---|
| 2822 |  | 
|---|
| 2823 | /* Estimate size of pulled pages. */ | 
|---|
| 2824 | eat = delta; | 
|---|
| 2825 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|---|
| 2826 | int size = skb_frag_size(frag: &skb_shinfo(skb)->frags[i]); | 
|---|
| 2827 |  | 
|---|
| 2828 | if (size >= eat) | 
|---|
| 2829 | goto pull_pages; | 
|---|
| 2830 | eat -= size; | 
|---|
| 2831 | } | 
|---|
| 2832 |  | 
|---|
| 2833 | /* If we need update frag list, we are in troubles. | 
|---|
| 2834 | * Certainly, it is possible to add an offset to skb data, | 
|---|
| 2835 | * but taking into account that pulling is expected to | 
|---|
| 2836 | * be very rare operation, it is worth to fight against | 
|---|
| 2837 | * further bloating skb head and crucify ourselves here instead. | 
|---|
| 2838 | * Pure masohism, indeed. 8)8) | 
|---|
| 2839 | */ | 
|---|
| 2840 | if (eat) { | 
|---|
| 2841 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 
|---|
| 2842 | struct sk_buff *clone = NULL; | 
|---|
| 2843 | struct sk_buff *insp = NULL; | 
|---|
| 2844 |  | 
|---|
| 2845 | do { | 
|---|
| 2846 | if (list->len <= eat) { | 
|---|
| 2847 | /* Eaten as whole. */ | 
|---|
| 2848 | eat -= list->len; | 
|---|
| 2849 | list = list->next; | 
|---|
| 2850 | insp = list; | 
|---|
| 2851 | } else { | 
|---|
| 2852 | /* Eaten partially. */ | 
|---|
| 2853 | if (skb_is_gso(skb) && !list->head_frag && | 
|---|
| 2854 | skb_headlen(skb: list)) | 
|---|
| 2855 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 
|---|
| 2856 |  | 
|---|
| 2857 | if (skb_shared(skb: list)) { | 
|---|
| 2858 | /* Sucks! We need to fork list. :-( */ | 
|---|
| 2859 | clone = skb_clone(list, GFP_ATOMIC); | 
|---|
| 2860 | if (!clone) | 
|---|
| 2861 | return NULL; | 
|---|
| 2862 | insp = list->next; | 
|---|
| 2863 | list = clone; | 
|---|
| 2864 | } else { | 
|---|
| 2865 | /* This may be pulled without | 
|---|
| 2866 | * problems. */ | 
|---|
| 2867 | insp = list; | 
|---|
| 2868 | } | 
|---|
| 2869 | if (!pskb_pull(skb: list, len: eat)) { | 
|---|
| 2870 | kfree_skb(skb: clone); | 
|---|
| 2871 | return NULL; | 
|---|
| 2872 | } | 
|---|
| 2873 | break; | 
|---|
| 2874 | } | 
|---|
| 2875 | } while (eat); | 
|---|
| 2876 |  | 
|---|
| 2877 | /* Free pulled out fragments. */ | 
|---|
| 2878 | while ((list = skb_shinfo(skb)->frag_list) != insp) { | 
|---|
| 2879 | skb_shinfo(skb)->frag_list = list->next; | 
|---|
| 2880 | consume_skb(list); | 
|---|
| 2881 | } | 
|---|
| 2882 | /* And insert new clone at head. */ | 
|---|
| 2883 | if (clone) { | 
|---|
| 2884 | clone->next = list; | 
|---|
| 2885 | skb_shinfo(skb)->frag_list = clone; | 
|---|
| 2886 | } | 
|---|
| 2887 | } | 
|---|
| 2888 | /* Success! Now we may commit changes to skb data. */ | 
|---|
| 2889 |  | 
|---|
| 2890 | pull_pages: | 
|---|
| 2891 | eat = delta; | 
|---|
| 2892 | k = 0; | 
|---|
| 2893 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|---|
| 2894 | int size = skb_frag_size(frag: &skb_shinfo(skb)->frags[i]); | 
|---|
| 2895 |  | 
|---|
| 2896 | if (size <= eat) { | 
|---|
| 2897 | skb_frag_unref(skb, f: i); | 
|---|
| 2898 | eat -= size; | 
|---|
| 2899 | } else { | 
|---|
| 2900 | skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; | 
|---|
| 2901 |  | 
|---|
| 2902 | *frag = skb_shinfo(skb)->frags[i]; | 
|---|
| 2903 | if (eat) { | 
|---|
| 2904 | skb_frag_off_add(frag, delta: eat); | 
|---|
| 2905 | skb_frag_size_sub(frag, delta: eat); | 
|---|
| 2906 | if (!i) | 
|---|
| 2907 | goto end; | 
|---|
| 2908 | eat = 0; | 
|---|
| 2909 | } | 
|---|
| 2910 | k++; | 
|---|
| 2911 | } | 
|---|
| 2912 | } | 
|---|
| 2913 | skb_shinfo(skb)->nr_frags = k; | 
|---|
| 2914 |  | 
|---|
| 2915 | end: | 
|---|
| 2916 | skb->tail     += delta; | 
|---|
| 2917 | skb->data_len -= delta; | 
|---|
| 2918 |  | 
|---|
| 2919 | if (!skb->data_len) | 
|---|
| 2920 | skb_zcopy_clear(skb, zerocopy_success: false); | 
|---|
| 2921 |  | 
|---|
| 2922 | return skb_tail_pointer(skb); | 
|---|
| 2923 | } | 
|---|
| 2924 | EXPORT_SYMBOL(__pskb_pull_tail); | 
|---|
| 2925 |  | 
|---|
| 2926 | /** | 
|---|
| 2927 | *	skb_copy_bits - copy bits from skb to kernel buffer | 
|---|
| 2928 | *	@skb: source skb | 
|---|
| 2929 | *	@offset: offset in source | 
|---|
| 2930 | *	@to: destination buffer | 
|---|
| 2931 | *	@len: number of bytes to copy | 
|---|
| 2932 | * | 
|---|
| 2933 | *	Copy the specified number of bytes from the source skb to the | 
|---|
| 2934 | *	destination buffer. | 
|---|
| 2935 | * | 
|---|
| 2936 | *	CAUTION ! : | 
|---|
| 2937 | *		If its prototype is ever changed, | 
|---|
| 2938 | *		check arch/{*}/net/{*}.S files, | 
|---|
| 2939 | *		since it is called from BPF assembly code. | 
|---|
| 2940 | */ | 
|---|
| 2941 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | 
|---|
| 2942 | { | 
|---|
| 2943 | int start = skb_headlen(skb); | 
|---|
| 2944 | struct sk_buff *frag_iter; | 
|---|
| 2945 | int i, copy; | 
|---|
| 2946 |  | 
|---|
| 2947 | if (offset > (int)skb->len - len) | 
|---|
| 2948 | goto fault; | 
|---|
| 2949 |  | 
|---|
| 2950 | /* Copy header. */ | 
|---|
| 2951 | if ((copy = start - offset) > 0) { | 
|---|
| 2952 | if (copy > len) | 
|---|
| 2953 | copy = len; | 
|---|
| 2954 | skb_copy_from_linear_data_offset(skb, offset, to, len: copy); | 
|---|
| 2955 | if ((len -= copy) == 0) | 
|---|
| 2956 | return 0; | 
|---|
| 2957 | offset += copy; | 
|---|
| 2958 | to     += copy; | 
|---|
| 2959 | } | 
|---|
| 2960 |  | 
|---|
| 2961 | if (!skb_frags_readable(skb)) | 
|---|
| 2962 | goto fault; | 
|---|
| 2963 |  | 
|---|
| 2964 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|---|
| 2965 | int end; | 
|---|
| 2966 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | 
|---|
| 2967 |  | 
|---|
| 2968 | WARN_ON(start > offset + len); | 
|---|
| 2969 |  | 
|---|
| 2970 | end = start + skb_frag_size(frag: f); | 
|---|
| 2971 | if ((copy = end - offset) > 0) { | 
|---|
| 2972 | u32 p_off, p_len, copied; | 
|---|
| 2973 | struct page *p; | 
|---|
| 2974 | u8 *vaddr; | 
|---|
| 2975 |  | 
|---|
| 2976 | if (copy > len) | 
|---|
| 2977 | copy = len; | 
|---|
| 2978 |  | 
|---|
| 2979 | skb_frag_foreach_page(f, | 
|---|
| 2980 | skb_frag_off(f) + offset - start, | 
|---|
| 2981 | copy, p, p_off, p_len, copied) { | 
|---|
| 2982 | vaddr = kmap_atomic(page: p); | 
|---|
| 2983 | memcpy(to: to + copied, from: vaddr + p_off, len: p_len); | 
|---|
| 2984 | kunmap_atomic(vaddr); | 
|---|
| 2985 | } | 
|---|
| 2986 |  | 
|---|
| 2987 | if ((len -= copy) == 0) | 
|---|
| 2988 | return 0; | 
|---|
| 2989 | offset += copy; | 
|---|
| 2990 | to     += copy; | 
|---|
| 2991 | } | 
|---|
| 2992 | start = end; | 
|---|
| 2993 | } | 
|---|
| 2994 |  | 
|---|
| 2995 | skb_walk_frags(skb, frag_iter) { | 
|---|
| 2996 | int end; | 
|---|
| 2997 |  | 
|---|
| 2998 | WARN_ON(start > offset + len); | 
|---|
| 2999 |  | 
|---|
| 3000 | end = start + frag_iter->len; | 
|---|
| 3001 | if ((copy = end - offset) > 0) { | 
|---|
| 3002 | if (copy > len) | 
|---|
| 3003 | copy = len; | 
|---|
| 3004 | if (skb_copy_bits(skb: frag_iter, offset: offset - start, to, len: copy)) | 
|---|
| 3005 | goto fault; | 
|---|
| 3006 | if ((len -= copy) == 0) | 
|---|
| 3007 | return 0; | 
|---|
| 3008 | offset += copy; | 
|---|
| 3009 | to     += copy; | 
|---|
| 3010 | } | 
|---|
| 3011 | start = end; | 
|---|
| 3012 | } | 
|---|
| 3013 |  | 
|---|
| 3014 | if (!len) | 
|---|
| 3015 | return 0; | 
|---|
| 3016 |  | 
|---|
| 3017 | fault: | 
|---|
| 3018 | return -EFAULT; | 
|---|
| 3019 | } | 
|---|
| 3020 | EXPORT_SYMBOL(skb_copy_bits); | 
|---|
| 3021 |  | 
|---|
| 3022 | /* | 
|---|
| 3023 | * Callback from splice_to_pipe(), if we need to release some pages | 
|---|
| 3024 | * at the end of the spd in case we error'ed out in filling the pipe. | 
|---|
| 3025 | */ | 
|---|
| 3026 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) | 
|---|
| 3027 | { | 
|---|
| 3028 | put_page(page: spd->pages[i]); | 
|---|
| 3029 | } | 
|---|
| 3030 |  | 
|---|
| 3031 | static struct page *linear_to_page(struct page *page, unsigned int *len, | 
|---|
| 3032 | unsigned int *offset, | 
|---|
| 3033 | struct sock *sk) | 
|---|
| 3034 | { | 
|---|
| 3035 | struct page_frag *pfrag = sk_page_frag(sk); | 
|---|
| 3036 |  | 
|---|
| 3037 | if (!sk_page_frag_refill(sk, pfrag)) | 
|---|
| 3038 | return NULL; | 
|---|
| 3039 |  | 
|---|
| 3040 | *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); | 
|---|
| 3041 |  | 
|---|
| 3042 | memcpy(page_address(pfrag->page) + pfrag->offset, | 
|---|
| 3043 | page_address(page) + *offset, len: *len); | 
|---|
| 3044 | *offset = pfrag->offset; | 
|---|
| 3045 | pfrag->offset += *len; | 
|---|
| 3046 |  | 
|---|
| 3047 | return pfrag->page; | 
|---|
| 3048 | } | 
|---|
| 3049 |  | 
|---|
| 3050 | static bool spd_can_coalesce(const struct splice_pipe_desc *spd, | 
|---|
| 3051 | struct page *page, | 
|---|
| 3052 | unsigned int offset) | 
|---|
| 3053 | { | 
|---|
| 3054 | return	spd->nr_pages && | 
|---|
| 3055 | spd->pages[spd->nr_pages - 1] == page && | 
|---|
| 3056 | (spd->partial[spd->nr_pages - 1].offset + | 
|---|
| 3057 | spd->partial[spd->nr_pages - 1].len == offset); | 
|---|
| 3058 | } | 
|---|
| 3059 |  | 
|---|
| 3060 | /* | 
|---|
| 3061 | * Fill page/offset/length into spd, if it can hold more pages. | 
|---|
| 3062 | */ | 
|---|
| 3063 | static bool spd_fill_page(struct splice_pipe_desc *spd, struct page *page, | 
|---|
| 3064 | unsigned int *len, unsigned int offset, bool linear, | 
|---|
| 3065 | struct sock *sk) | 
|---|
| 3066 | { | 
|---|
| 3067 | if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) | 
|---|
| 3068 | return true; | 
|---|
| 3069 |  | 
|---|
| 3070 | if (linear) { | 
|---|
| 3071 | page = linear_to_page(page, len, offset: &offset, sk); | 
|---|
| 3072 | if (!page) | 
|---|
| 3073 | return true; | 
|---|
| 3074 | } | 
|---|
| 3075 | if (spd_can_coalesce(spd, page, offset)) { | 
|---|
| 3076 | spd->partial[spd->nr_pages - 1].len += *len; | 
|---|
| 3077 | return false; | 
|---|
| 3078 | } | 
|---|
| 3079 | get_page(page); | 
|---|
| 3080 | spd->pages[spd->nr_pages] = page; | 
|---|
| 3081 | spd->partial[spd->nr_pages].len = *len; | 
|---|
| 3082 | spd->partial[spd->nr_pages].offset = offset; | 
|---|
| 3083 | spd->nr_pages++; | 
|---|
| 3084 |  | 
|---|
| 3085 | return false; | 
|---|
| 3086 | } | 
|---|
| 3087 |  | 
|---|
| 3088 | static bool __splice_segment(struct page *page, unsigned int poff, | 
|---|
| 3089 | unsigned int plen, unsigned int *off, | 
|---|
| 3090 | unsigned int *len, | 
|---|
| 3091 | struct splice_pipe_desc *spd, bool linear, | 
|---|
| 3092 | struct sock *sk) | 
|---|
| 3093 | { | 
|---|
| 3094 | if (!*len) | 
|---|
| 3095 | return true; | 
|---|
| 3096 |  | 
|---|
| 3097 | /* skip this segment if already processed */ | 
|---|
| 3098 | if (*off >= plen) { | 
|---|
| 3099 | *off -= plen; | 
|---|
| 3100 | return false; | 
|---|
| 3101 | } | 
|---|
| 3102 |  | 
|---|
| 3103 | /* ignore any bits we already processed */ | 
|---|
| 3104 | poff += *off; | 
|---|
| 3105 | plen -= *off; | 
|---|
| 3106 | *off = 0; | 
|---|
| 3107 |  | 
|---|
| 3108 | do { | 
|---|
| 3109 | unsigned int flen = min(*len, plen); | 
|---|
| 3110 |  | 
|---|
| 3111 | if (spd_fill_page(spd, page, len: &flen, offset: poff, linear, sk)) | 
|---|
| 3112 | return true; | 
|---|
| 3113 | poff += flen; | 
|---|
| 3114 | plen -= flen; | 
|---|
| 3115 | *len -= flen; | 
|---|
| 3116 | if (!*len) | 
|---|
| 3117 | return true; | 
|---|
| 3118 | } while (plen); | 
|---|
| 3119 |  | 
|---|
| 3120 | return false; | 
|---|
| 3121 | } | 
|---|
| 3122 |  | 
|---|
| 3123 | /* | 
|---|
| 3124 | * Map linear and fragment data from the skb to spd. It reports true if the | 
|---|
| 3125 | * pipe is full or if we already spliced the requested length. | 
|---|
| 3126 | */ | 
|---|
| 3127 | static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, | 
|---|
| 3128 | unsigned int *offset, unsigned int *len, | 
|---|
| 3129 | struct splice_pipe_desc *spd, struct sock *sk) | 
|---|
| 3130 | { | 
|---|
| 3131 | struct sk_buff *iter; | 
|---|
| 3132 | int seg; | 
|---|
| 3133 |  | 
|---|
| 3134 | /* map the linear part : | 
|---|
| 3135 | * If skb->head_frag is set, this 'linear' part is backed by a | 
|---|
| 3136 | * fragment, and if the head is not shared with any clones then | 
|---|
| 3137 | * we can avoid a copy since we own the head portion of this page. | 
|---|
| 3138 | */ | 
|---|
| 3139 | if (__splice_segment(virt_to_page(skb->data), | 
|---|
| 3140 | poff: (unsigned long) skb->data & (PAGE_SIZE - 1), | 
|---|
| 3141 | plen: skb_headlen(skb), | 
|---|
| 3142 | off: offset, len, spd, | 
|---|
| 3143 | linear: skb_head_is_locked(skb), | 
|---|
| 3144 | sk)) | 
|---|
| 3145 | return true; | 
|---|
| 3146 |  | 
|---|
| 3147 | /* | 
|---|
| 3148 | * then map the fragments | 
|---|
| 3149 | */ | 
|---|
| 3150 | if (!skb_frags_readable(skb)) | 
|---|
| 3151 | return false; | 
|---|
| 3152 |  | 
|---|
| 3153 | for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { | 
|---|
| 3154 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; | 
|---|
| 3155 |  | 
|---|
| 3156 | if (WARN_ON_ONCE(!skb_frag_page(f))) | 
|---|
| 3157 | return false; | 
|---|
| 3158 |  | 
|---|
| 3159 | if (__splice_segment(page: skb_frag_page(frag: f), | 
|---|
| 3160 | poff: skb_frag_off(frag: f), plen: skb_frag_size(frag: f), | 
|---|
| 3161 | off: offset, len, spd, linear: false, sk)) | 
|---|
| 3162 | return true; | 
|---|
| 3163 | } | 
|---|
| 3164 |  | 
|---|
| 3165 | skb_walk_frags(skb, iter) { | 
|---|
| 3166 | if (*offset >= iter->len) { | 
|---|
| 3167 | *offset -= iter->len; | 
|---|
| 3168 | continue; | 
|---|
| 3169 | } | 
|---|
| 3170 | /* __skb_splice_bits() only fails if the output has no room | 
|---|
| 3171 | * left, so no point in going over the frag_list for the error | 
|---|
| 3172 | * case. | 
|---|
| 3173 | */ | 
|---|
| 3174 | if (__skb_splice_bits(skb: iter, pipe, offset, len, spd, sk)) | 
|---|
| 3175 | return true; | 
|---|
| 3176 | } | 
|---|
| 3177 |  | 
|---|
| 3178 | return false; | 
|---|
| 3179 | } | 
|---|
| 3180 |  | 
|---|
| 3181 | /* | 
|---|
| 3182 | * Map data from the skb to a pipe. Should handle both the linear part, | 
|---|
| 3183 | * the fragments, and the frag list. | 
|---|
| 3184 | */ | 
|---|
| 3185 | int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, | 
|---|
| 3186 | struct pipe_inode_info *pipe, unsigned int tlen, | 
|---|
| 3187 | unsigned int flags) | 
|---|
| 3188 | { | 
|---|
| 3189 | struct partial_page partial[MAX_SKB_FRAGS]; | 
|---|
| 3190 | struct page *pages[MAX_SKB_FRAGS]; | 
|---|
| 3191 | struct splice_pipe_desc spd = { | 
|---|
| 3192 | .pages = pages, | 
|---|
| 3193 | .partial = partial, | 
|---|
| 3194 | .nr_pages_max = MAX_SKB_FRAGS, | 
|---|
| 3195 | .ops = &nosteal_pipe_buf_ops, | 
|---|
| 3196 | .spd_release = sock_spd_release, | 
|---|
| 3197 | }; | 
|---|
| 3198 | int ret = 0; | 
|---|
| 3199 |  | 
|---|
| 3200 | __skb_splice_bits(skb, pipe, offset: &offset, len: &tlen, spd: &spd, sk); | 
|---|
| 3201 |  | 
|---|
| 3202 | if (spd.nr_pages) | 
|---|
| 3203 | ret = splice_to_pipe(pipe, spd: &spd); | 
|---|
| 3204 |  | 
|---|
| 3205 | return ret; | 
|---|
| 3206 | } | 
|---|
| 3207 | EXPORT_SYMBOL_GPL(skb_splice_bits); | 
|---|
| 3208 |  | 
|---|
| 3209 | static int sendmsg_locked(struct sock *sk, struct msghdr *msg) | 
|---|
| 3210 | { | 
|---|
| 3211 | struct socket *sock = sk->sk_socket; | 
|---|
| 3212 | size_t size = msg_data_left(msg); | 
|---|
| 3213 |  | 
|---|
| 3214 | if (!sock) | 
|---|
| 3215 | return -EINVAL; | 
|---|
| 3216 |  | 
|---|
| 3217 | if (!sock->ops->sendmsg_locked) | 
|---|
| 3218 | return sock_no_sendmsg_locked(sk, msg, len: size); | 
|---|
| 3219 |  | 
|---|
| 3220 | return sock->ops->sendmsg_locked(sk, msg, size); | 
|---|
| 3221 | } | 
|---|
| 3222 |  | 
|---|
| 3223 | static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) | 
|---|
| 3224 | { | 
|---|
| 3225 | struct socket *sock = sk->sk_socket; | 
|---|
| 3226 |  | 
|---|
| 3227 | if (!sock) | 
|---|
| 3228 | return -EINVAL; | 
|---|
| 3229 | return sock_sendmsg(sock, msg); | 
|---|
| 3230 | } | 
|---|
| 3231 |  | 
|---|
| 3232 | typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); | 
|---|
| 3233 | static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, | 
|---|
| 3234 | int len, sendmsg_func sendmsg, int flags) | 
|---|
| 3235 | { | 
|---|
| 3236 | int more_hint = sk_is_tcp(sk) ? MSG_MORE : 0; | 
|---|
| 3237 | unsigned int orig_len = len; | 
|---|
| 3238 | struct sk_buff *head = skb; | 
|---|
| 3239 | unsigned short fragidx; | 
|---|
| 3240 | int slen, ret; | 
|---|
| 3241 |  | 
|---|
| 3242 | do_frag_list: | 
|---|
| 3243 |  | 
|---|
| 3244 | /* Deal with head data */ | 
|---|
| 3245 | while (offset < skb_headlen(skb) && len) { | 
|---|
| 3246 | struct kvec kv; | 
|---|
| 3247 | struct msghdr msg; | 
|---|
| 3248 |  | 
|---|
| 3249 | slen = min_t(int, len, skb_headlen(skb) - offset); | 
|---|
| 3250 | kv.iov_base = skb->data + offset; | 
|---|
| 3251 | kv.iov_len = slen; | 
|---|
| 3252 | memset(s: &msg, c: 0, n: sizeof(msg)); | 
|---|
| 3253 | msg.msg_flags = MSG_DONTWAIT | flags; | 
|---|
| 3254 | if (slen < len) | 
|---|
| 3255 | msg.msg_flags |= more_hint; | 
|---|
| 3256 |  | 
|---|
| 3257 | iov_iter_kvec(i: &msg.msg_iter, ITER_SOURCE, kvec: &kv, nr_segs: 1, count: slen); | 
|---|
| 3258 | ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, | 
|---|
| 3259 | sendmsg_unlocked, sk, &msg); | 
|---|
| 3260 | if (ret <= 0) | 
|---|
| 3261 | goto error; | 
|---|
| 3262 |  | 
|---|
| 3263 | offset += ret; | 
|---|
| 3264 | len -= ret; | 
|---|
| 3265 | } | 
|---|
| 3266 |  | 
|---|
| 3267 | /* All the data was skb head? */ | 
|---|
| 3268 | if (!len) | 
|---|
| 3269 | goto out; | 
|---|
| 3270 |  | 
|---|
| 3271 | /* Make offset relative to start of frags */ | 
|---|
| 3272 | offset -= skb_headlen(skb); | 
|---|
| 3273 |  | 
|---|
| 3274 | /* Find where we are in frag list */ | 
|---|
| 3275 | for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { | 
|---|
| 3276 | skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx]; | 
|---|
| 3277 |  | 
|---|
| 3278 | if (offset < skb_frag_size(frag)) | 
|---|
| 3279 | break; | 
|---|
| 3280 |  | 
|---|
| 3281 | offset -= skb_frag_size(frag); | 
|---|
| 3282 | } | 
|---|
| 3283 |  | 
|---|
| 3284 | for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { | 
|---|
| 3285 | skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx]; | 
|---|
| 3286 |  | 
|---|
| 3287 | slen = min_t(size_t, len, skb_frag_size(frag) - offset); | 
|---|
| 3288 |  | 
|---|
| 3289 | while (slen) { | 
|---|
| 3290 | struct bio_vec bvec; | 
|---|
| 3291 | struct msghdr msg = { | 
|---|
| 3292 | .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | | 
|---|
| 3293 | flags, | 
|---|
| 3294 | }; | 
|---|
| 3295 |  | 
|---|
| 3296 | if (slen < len) | 
|---|
| 3297 | msg.msg_flags |= more_hint; | 
|---|
| 3298 | bvec_set_page(bv: &bvec, page: skb_frag_page(frag), len: slen, | 
|---|
| 3299 | offset: skb_frag_off(frag) + offset); | 
|---|
| 3300 | iov_iter_bvec(i: &msg.msg_iter, ITER_SOURCE, bvec: &bvec, nr_segs: 1, | 
|---|
| 3301 | count: slen); | 
|---|
| 3302 |  | 
|---|
| 3303 | ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, | 
|---|
| 3304 | sendmsg_unlocked, sk, &msg); | 
|---|
| 3305 | if (ret <= 0) | 
|---|
| 3306 | goto error; | 
|---|
| 3307 |  | 
|---|
| 3308 | len -= ret; | 
|---|
| 3309 | offset += ret; | 
|---|
| 3310 | slen -= ret; | 
|---|
| 3311 | } | 
|---|
| 3312 |  | 
|---|
| 3313 | offset = 0; | 
|---|
| 3314 | } | 
|---|
| 3315 |  | 
|---|
| 3316 | if (len) { | 
|---|
| 3317 | /* Process any frag lists */ | 
|---|
| 3318 |  | 
|---|
| 3319 | if (skb == head) { | 
|---|
| 3320 | if (skb_has_frag_list(skb)) { | 
|---|
| 3321 | skb = skb_shinfo(skb)->frag_list; | 
|---|
| 3322 | goto do_frag_list; | 
|---|
| 3323 | } | 
|---|
| 3324 | } else if (skb->next) { | 
|---|
| 3325 | skb = skb->next; | 
|---|
| 3326 | goto do_frag_list; | 
|---|
| 3327 | } | 
|---|
| 3328 | } | 
|---|
| 3329 |  | 
|---|
| 3330 | out: | 
|---|
| 3331 | return orig_len - len; | 
|---|
| 3332 |  | 
|---|
| 3333 | error: | 
|---|
| 3334 | return orig_len == len ? ret : orig_len - len; | 
|---|
| 3335 | } | 
|---|
| 3336 |  | 
|---|
| 3337 | /* Send skb data on a socket. Socket must be locked. */ | 
|---|
| 3338 | int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, | 
|---|
| 3339 | int len) | 
|---|
| 3340 | { | 
|---|
| 3341 | return __skb_send_sock(sk, skb, offset, len, sendmsg: sendmsg_locked, flags: 0); | 
|---|
| 3342 | } | 
|---|
| 3343 | EXPORT_SYMBOL_GPL(skb_send_sock_locked); | 
|---|
| 3344 |  | 
|---|
| 3345 | int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb, | 
|---|
| 3346 | int offset, int len, int flags) | 
|---|
| 3347 | { | 
|---|
| 3348 | return __skb_send_sock(sk, skb, offset, len, sendmsg: sendmsg_locked, flags); | 
|---|
| 3349 | } | 
|---|
| 3350 | EXPORT_SYMBOL_GPL(skb_send_sock_locked_with_flags); | 
|---|
| 3351 |  | 
|---|
| 3352 | /* Send skb data on a socket. Socket must be unlocked. */ | 
|---|
| 3353 | int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) | 
|---|
| 3354 | { | 
|---|
| 3355 | return __skb_send_sock(sk, skb, offset, len, sendmsg: sendmsg_unlocked, flags: 0); | 
|---|
| 3356 | } | 
|---|
| 3357 |  | 
|---|
| 3358 | /** | 
|---|
| 3359 | *	skb_store_bits - store bits from kernel buffer to skb | 
|---|
| 3360 | *	@skb: destination buffer | 
|---|
| 3361 | *	@offset: offset in destination | 
|---|
| 3362 | *	@from: source buffer | 
|---|
| 3363 | *	@len: number of bytes to copy | 
|---|
| 3364 | * | 
|---|
| 3365 | *	Copy the specified number of bytes from the source buffer to the | 
|---|
| 3366 | *	destination skb.  This function handles all the messy bits of | 
|---|
| 3367 | *	traversing fragment lists and such. | 
|---|
| 3368 | */ | 
|---|
| 3369 |  | 
|---|
| 3370 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) | 
|---|
| 3371 | { | 
|---|
| 3372 | int start = skb_headlen(skb); | 
|---|
| 3373 | struct sk_buff *frag_iter; | 
|---|
| 3374 | int i, copy; | 
|---|
| 3375 |  | 
|---|
| 3376 | if (offset > (int)skb->len - len) | 
|---|
| 3377 | goto fault; | 
|---|
| 3378 |  | 
|---|
| 3379 | if ((copy = start - offset) > 0) { | 
|---|
| 3380 | if (copy > len) | 
|---|
| 3381 | copy = len; | 
|---|
| 3382 | skb_copy_to_linear_data_offset(skb, offset, from, len: copy); | 
|---|
| 3383 | if ((len -= copy) == 0) | 
|---|
| 3384 | return 0; | 
|---|
| 3385 | offset += copy; | 
|---|
| 3386 | from += copy; | 
|---|
| 3387 | } | 
|---|
| 3388 |  | 
|---|
| 3389 | if (!skb_frags_readable(skb)) | 
|---|
| 3390 | goto fault; | 
|---|
| 3391 |  | 
|---|
| 3392 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|---|
| 3393 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
|---|
| 3394 | int end; | 
|---|
| 3395 |  | 
|---|
| 3396 | WARN_ON(start > offset + len); | 
|---|
| 3397 |  | 
|---|
| 3398 | end = start + skb_frag_size(frag); | 
|---|
| 3399 | if ((copy = end - offset) > 0) { | 
|---|
| 3400 | u32 p_off, p_len, copied; | 
|---|
| 3401 | struct page *p; | 
|---|
| 3402 | u8 *vaddr; | 
|---|
| 3403 |  | 
|---|
| 3404 | if (copy > len) | 
|---|
| 3405 | copy = len; | 
|---|
| 3406 |  | 
|---|
| 3407 | skb_frag_foreach_page(frag, | 
|---|
| 3408 | skb_frag_off(frag) + offset - start, | 
|---|
| 3409 | copy, p, p_off, p_len, copied) { | 
|---|
| 3410 | vaddr = kmap_atomic(page: p); | 
|---|
| 3411 | memcpy(to: vaddr + p_off, from: from + copied, len: p_len); | 
|---|
| 3412 | kunmap_atomic(vaddr); | 
|---|
| 3413 | } | 
|---|
| 3414 |  | 
|---|
| 3415 | if ((len -= copy) == 0) | 
|---|
| 3416 | return 0; | 
|---|
| 3417 | offset += copy; | 
|---|
| 3418 | from += copy; | 
|---|
| 3419 | } | 
|---|
| 3420 | start = end; | 
|---|
| 3421 | } | 
|---|
| 3422 |  | 
|---|
| 3423 | skb_walk_frags(skb, frag_iter) { | 
|---|
| 3424 | int end; | 
|---|
| 3425 |  | 
|---|
| 3426 | WARN_ON(start > offset + len); | 
|---|
| 3427 |  | 
|---|
| 3428 | end = start + frag_iter->len; | 
|---|
| 3429 | if ((copy = end - offset) > 0) { | 
|---|
| 3430 | if (copy > len) | 
|---|
| 3431 | copy = len; | 
|---|
| 3432 | if (skb_store_bits(skb: frag_iter, offset: offset - start, | 
|---|
| 3433 | from, len: copy)) | 
|---|
| 3434 | goto fault; | 
|---|
| 3435 | if ((len -= copy) == 0) | 
|---|
| 3436 | return 0; | 
|---|
| 3437 | offset += copy; | 
|---|
| 3438 | from += copy; | 
|---|
| 3439 | } | 
|---|
| 3440 | start = end; | 
|---|
| 3441 | } | 
|---|
| 3442 | if (!len) | 
|---|
| 3443 | return 0; | 
|---|
| 3444 |  | 
|---|
| 3445 | fault: | 
|---|
| 3446 | return -EFAULT; | 
|---|
| 3447 | } | 
|---|
| 3448 | EXPORT_SYMBOL(skb_store_bits); | 
|---|
| 3449 |  | 
|---|
| 3450 | /* Checksum skb data. */ | 
|---|
| 3451 | __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum) | 
|---|
| 3452 | { | 
|---|
| 3453 | int start = skb_headlen(skb); | 
|---|
| 3454 | int i, copy = start - offset; | 
|---|
| 3455 | struct sk_buff *frag_iter; | 
|---|
| 3456 | int pos = 0; | 
|---|
| 3457 |  | 
|---|
| 3458 | /* Checksum header. */ | 
|---|
| 3459 | if (copy > 0) { | 
|---|
| 3460 | if (copy > len) | 
|---|
| 3461 | copy = len; | 
|---|
| 3462 | csum = csum_partial(buff: skb->data + offset, len: copy, sum: csum); | 
|---|
| 3463 | if ((len -= copy) == 0) | 
|---|
| 3464 | return csum; | 
|---|
| 3465 | offset += copy; | 
|---|
| 3466 | pos	= copy; | 
|---|
| 3467 | } | 
|---|
| 3468 |  | 
|---|
| 3469 | if (WARN_ON_ONCE(!skb_frags_readable(skb))) | 
|---|
| 3470 | return 0; | 
|---|
| 3471 |  | 
|---|
| 3472 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|---|
| 3473 | int end; | 
|---|
| 3474 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
|---|
| 3475 |  | 
|---|
| 3476 | WARN_ON(start > offset + len); | 
|---|
| 3477 |  | 
|---|
| 3478 | end = start + skb_frag_size(frag); | 
|---|
| 3479 | if ((copy = end - offset) > 0) { | 
|---|
| 3480 | u32 p_off, p_len, copied; | 
|---|
| 3481 | struct page *p; | 
|---|
| 3482 | __wsum csum2; | 
|---|
| 3483 | u8 *vaddr; | 
|---|
| 3484 |  | 
|---|
| 3485 | if (copy > len) | 
|---|
| 3486 | copy = len; | 
|---|
| 3487 |  | 
|---|
| 3488 | skb_frag_foreach_page(frag, | 
|---|
| 3489 | skb_frag_off(frag) + offset - start, | 
|---|
| 3490 | copy, p, p_off, p_len, copied) { | 
|---|
| 3491 | vaddr = kmap_atomic(page: p); | 
|---|
| 3492 | csum2 = csum_partial(buff: vaddr + p_off, len: p_len, sum: 0); | 
|---|
| 3493 | kunmap_atomic(vaddr); | 
|---|
| 3494 | csum = csum_block_add(csum, csum2, offset: pos); | 
|---|
| 3495 | pos += p_len; | 
|---|
| 3496 | } | 
|---|
| 3497 |  | 
|---|
| 3498 | if (!(len -= copy)) | 
|---|
| 3499 | return csum; | 
|---|
| 3500 | offset += copy; | 
|---|
| 3501 | } | 
|---|
| 3502 | start = end; | 
|---|
| 3503 | } | 
|---|
| 3504 |  | 
|---|
| 3505 | skb_walk_frags(skb, frag_iter) { | 
|---|
| 3506 | int end; | 
|---|
| 3507 |  | 
|---|
| 3508 | WARN_ON(start > offset + len); | 
|---|
| 3509 |  | 
|---|
| 3510 | end = start + frag_iter->len; | 
|---|
| 3511 | if ((copy = end - offset) > 0) { | 
|---|
| 3512 | __wsum csum2; | 
|---|
| 3513 | if (copy > len) | 
|---|
| 3514 | copy = len; | 
|---|
| 3515 | csum2 = skb_checksum(skb: frag_iter, offset: offset - start, len: copy, | 
|---|
| 3516 | csum: 0); | 
|---|
| 3517 | csum = csum_block_add(csum, csum2, offset: pos); | 
|---|
| 3518 | if ((len -= copy) == 0) | 
|---|
| 3519 | return csum; | 
|---|
| 3520 | offset += copy; | 
|---|
| 3521 | pos    += copy; | 
|---|
| 3522 | } | 
|---|
| 3523 | start = end; | 
|---|
| 3524 | } | 
|---|
| 3525 | BUG_ON(len); | 
|---|
| 3526 |  | 
|---|
| 3527 | return csum; | 
|---|
| 3528 | } | 
|---|
| 3529 | EXPORT_SYMBOL(skb_checksum); | 
|---|
| 3530 |  | 
|---|
| 3531 | /* Both of above in one bottle. */ | 
|---|
| 3532 |  | 
|---|
| 3533 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | 
|---|
| 3534 | u8 *to, int len) | 
|---|
| 3535 | { | 
|---|
| 3536 | int start = skb_headlen(skb); | 
|---|
| 3537 | int i, copy = start - offset; | 
|---|
| 3538 | struct sk_buff *frag_iter; | 
|---|
| 3539 | int pos = 0; | 
|---|
| 3540 | __wsum csum = 0; | 
|---|
| 3541 |  | 
|---|
| 3542 | /* Copy header. */ | 
|---|
| 3543 | if (copy > 0) { | 
|---|
| 3544 | if (copy > len) | 
|---|
| 3545 | copy = len; | 
|---|
| 3546 | csum = csum_partial_copy_nocheck(src: skb->data + offset, dst: to, | 
|---|
| 3547 | len: copy); | 
|---|
| 3548 | if ((len -= copy) == 0) | 
|---|
| 3549 | return csum; | 
|---|
| 3550 | offset += copy; | 
|---|
| 3551 | to     += copy; | 
|---|
| 3552 | pos	= copy; | 
|---|
| 3553 | } | 
|---|
| 3554 |  | 
|---|
| 3555 | if (!skb_frags_readable(skb)) | 
|---|
| 3556 | return 0; | 
|---|
| 3557 |  | 
|---|
| 3558 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|---|
| 3559 | int end; | 
|---|
| 3560 |  | 
|---|
| 3561 | WARN_ON(start > offset + len); | 
|---|
| 3562 |  | 
|---|
| 3563 | end = start + skb_frag_size(frag: &skb_shinfo(skb)->frags[i]); | 
|---|
| 3564 | if ((copy = end - offset) > 0) { | 
|---|
| 3565 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
|---|
| 3566 | u32 p_off, p_len, copied; | 
|---|
| 3567 | struct page *p; | 
|---|
| 3568 | __wsum csum2; | 
|---|
| 3569 | u8 *vaddr; | 
|---|
| 3570 |  | 
|---|
| 3571 | if (copy > len) | 
|---|
| 3572 | copy = len; | 
|---|
| 3573 |  | 
|---|
| 3574 | skb_frag_foreach_page(frag, | 
|---|
| 3575 | skb_frag_off(frag) + offset - start, | 
|---|
| 3576 | copy, p, p_off, p_len, copied) { | 
|---|
| 3577 | vaddr = kmap_atomic(page: p); | 
|---|
| 3578 | csum2 = csum_partial_copy_nocheck(src: vaddr + p_off, | 
|---|
| 3579 | dst: to + copied, | 
|---|
| 3580 | len: p_len); | 
|---|
| 3581 | kunmap_atomic(vaddr); | 
|---|
| 3582 | csum = csum_block_add(csum, csum2, offset: pos); | 
|---|
| 3583 | pos += p_len; | 
|---|
| 3584 | } | 
|---|
| 3585 |  | 
|---|
| 3586 | if (!(len -= copy)) | 
|---|
| 3587 | return csum; | 
|---|
| 3588 | offset += copy; | 
|---|
| 3589 | to     += copy; | 
|---|
| 3590 | } | 
|---|
| 3591 | start = end; | 
|---|
| 3592 | } | 
|---|
| 3593 |  | 
|---|
| 3594 | skb_walk_frags(skb, frag_iter) { | 
|---|
| 3595 | __wsum csum2; | 
|---|
| 3596 | int end; | 
|---|
| 3597 |  | 
|---|
| 3598 | WARN_ON(start > offset + len); | 
|---|
| 3599 |  | 
|---|
| 3600 | end = start + frag_iter->len; | 
|---|
| 3601 | if ((copy = end - offset) > 0) { | 
|---|
| 3602 | if (copy > len) | 
|---|
| 3603 | copy = len; | 
|---|
| 3604 | csum2 = skb_copy_and_csum_bits(skb: frag_iter, | 
|---|
| 3605 | offset: offset - start, | 
|---|
| 3606 | to, len: copy); | 
|---|
| 3607 | csum = csum_block_add(csum, csum2, offset: pos); | 
|---|
| 3608 | if ((len -= copy) == 0) | 
|---|
| 3609 | return csum; | 
|---|
| 3610 | offset += copy; | 
|---|
| 3611 | to     += copy; | 
|---|
| 3612 | pos    += copy; | 
|---|
| 3613 | } | 
|---|
| 3614 | start = end; | 
|---|
| 3615 | } | 
|---|
| 3616 | BUG_ON(len); | 
|---|
| 3617 | return csum; | 
|---|
| 3618 | } | 
|---|
| 3619 | EXPORT_SYMBOL(skb_copy_and_csum_bits); | 
|---|
| 3620 |  | 
|---|
| 3621 | #ifdef CONFIG_NET_CRC32C | 
|---|
| 3622 | u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc) | 
|---|
| 3623 | { | 
|---|
| 3624 | int start = skb_headlen(skb); | 
|---|
| 3625 | int i, copy = start - offset; | 
|---|
| 3626 | struct sk_buff *frag_iter; | 
|---|
| 3627 |  | 
|---|
| 3628 | if (copy > 0) { | 
|---|
| 3629 | copy = min(copy, len); | 
|---|
| 3630 | crc = crc32c(crc, skb->data + offset, copy); | 
|---|
| 3631 | len -= copy; | 
|---|
| 3632 | if (len == 0) | 
|---|
| 3633 | return crc; | 
|---|
| 3634 | offset += copy; | 
|---|
| 3635 | } | 
|---|
| 3636 |  | 
|---|
| 3637 | if (WARN_ON_ONCE(!skb_frags_readable(skb))) | 
|---|
| 3638 | return 0; | 
|---|
| 3639 |  | 
|---|
| 3640 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|---|
| 3641 | int end; | 
|---|
| 3642 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
|---|
| 3643 |  | 
|---|
| 3644 | WARN_ON(start > offset + len); | 
|---|
| 3645 |  | 
|---|
| 3646 | end = start + skb_frag_size(frag); | 
|---|
| 3647 | copy = end - offset; | 
|---|
| 3648 | if (copy > 0) { | 
|---|
| 3649 | u32 p_off, p_len, copied; | 
|---|
| 3650 | struct page *p; | 
|---|
| 3651 | u8 *vaddr; | 
|---|
| 3652 |  | 
|---|
| 3653 | copy = min(copy, len); | 
|---|
| 3654 | skb_frag_foreach_page(frag, | 
|---|
| 3655 | skb_frag_off(frag) + offset - start, | 
|---|
| 3656 | copy, p, p_off, p_len, copied) { | 
|---|
| 3657 | vaddr = kmap_atomic(p); | 
|---|
| 3658 | crc = crc32c(crc, vaddr + p_off, p_len); | 
|---|
| 3659 | kunmap_atomic(vaddr); | 
|---|
| 3660 | } | 
|---|
| 3661 | len -= copy; | 
|---|
| 3662 | if (len == 0) | 
|---|
| 3663 | return crc; | 
|---|
| 3664 | offset += copy; | 
|---|
| 3665 | } | 
|---|
| 3666 | start = end; | 
|---|
| 3667 | } | 
|---|
| 3668 |  | 
|---|
| 3669 | skb_walk_frags(skb, frag_iter) { | 
|---|
| 3670 | int end; | 
|---|
| 3671 |  | 
|---|
| 3672 | WARN_ON(start > offset + len); | 
|---|
| 3673 |  | 
|---|
| 3674 | end = start + frag_iter->len; | 
|---|
| 3675 | copy = end - offset; | 
|---|
| 3676 | if (copy > 0) { | 
|---|
| 3677 | copy = min(copy, len); | 
|---|
| 3678 | crc = skb_crc32c(frag_iter, offset - start, copy, crc); | 
|---|
| 3679 | len -= copy; | 
|---|
| 3680 | if (len == 0) | 
|---|
| 3681 | return crc; | 
|---|
| 3682 | offset += copy; | 
|---|
| 3683 | } | 
|---|
| 3684 | start = end; | 
|---|
| 3685 | } | 
|---|
| 3686 | BUG_ON(len); | 
|---|
| 3687 |  | 
|---|
| 3688 | return crc; | 
|---|
| 3689 | } | 
|---|
| 3690 | EXPORT_SYMBOL(skb_crc32c); | 
|---|
| 3691 | #endif /* CONFIG_NET_CRC32C */ | 
|---|
| 3692 |  | 
|---|
| 3693 | __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) | 
|---|
| 3694 | { | 
|---|
| 3695 | __sum16 sum; | 
|---|
| 3696 |  | 
|---|
| 3697 | sum = csum_fold(sum: skb_checksum(skb, 0, len, skb->csum)); | 
|---|
| 3698 | /* See comments in __skb_checksum_complete(). */ | 
|---|
| 3699 | if (likely(!sum)) { | 
|---|
| 3700 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | 
|---|
| 3701 | !skb->csum_complete_sw) | 
|---|
| 3702 | netdev_rx_csum_fault(dev: skb->dev, skb); | 
|---|
| 3703 | } | 
|---|
| 3704 | if (!skb_shared(skb)) | 
|---|
| 3705 | skb->csum_valid = !sum; | 
|---|
| 3706 | return sum; | 
|---|
| 3707 | } | 
|---|
| 3708 | EXPORT_SYMBOL(__skb_checksum_complete_head); | 
|---|
| 3709 |  | 
|---|
| 3710 | /* This function assumes skb->csum already holds pseudo header's checksum, | 
|---|
| 3711 | * which has been changed from the hardware checksum, for example, by | 
|---|
| 3712 | * __skb_checksum_validate_complete(). And, the original skb->csum must | 
|---|
| 3713 | * have been validated unsuccessfully for CHECKSUM_COMPLETE case. | 
|---|
| 3714 | * | 
|---|
| 3715 | * It returns non-zero if the recomputed checksum is still invalid, otherwise | 
|---|
| 3716 | * zero. The new checksum is stored back into skb->csum unless the skb is | 
|---|
| 3717 | * shared. | 
|---|
| 3718 | */ | 
|---|
| 3719 | __sum16 __skb_checksum_complete(struct sk_buff *skb) | 
|---|
| 3720 | { | 
|---|
| 3721 | __wsum csum; | 
|---|
| 3722 | __sum16 sum; | 
|---|
| 3723 |  | 
|---|
| 3724 | csum = skb_checksum(skb, 0, skb->len, 0); | 
|---|
| 3725 |  | 
|---|
| 3726 | sum = csum_fold(sum: csum_add(csum: skb->csum, addend: csum)); | 
|---|
| 3727 | /* This check is inverted, because we already knew the hardware | 
|---|
| 3728 | * checksum is invalid before calling this function. So, if the | 
|---|
| 3729 | * re-computed checksum is valid instead, then we have a mismatch | 
|---|
| 3730 | * between the original skb->csum and skb_checksum(). This means either | 
|---|
| 3731 | * the original hardware checksum is incorrect or we screw up skb->csum | 
|---|
| 3732 | * when moving skb->data around. | 
|---|
| 3733 | */ | 
|---|
| 3734 | if (likely(!sum)) { | 
|---|
| 3735 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | 
|---|
| 3736 | !skb->csum_complete_sw) | 
|---|
| 3737 | netdev_rx_csum_fault(dev: skb->dev, skb); | 
|---|
| 3738 | } | 
|---|
| 3739 |  | 
|---|
| 3740 | if (!skb_shared(skb)) { | 
|---|
| 3741 | /* Save full packet checksum */ | 
|---|
| 3742 | skb->csum = csum; | 
|---|
| 3743 | skb->ip_summed = CHECKSUM_COMPLETE; | 
|---|
| 3744 | skb->csum_complete_sw = 1; | 
|---|
| 3745 | skb->csum_valid = !sum; | 
|---|
| 3746 | } | 
|---|
| 3747 |  | 
|---|
| 3748 | return sum; | 
|---|
| 3749 | } | 
|---|
| 3750 | EXPORT_SYMBOL(__skb_checksum_complete); | 
|---|
| 3751 |  | 
|---|
| 3752 | /** | 
|---|
| 3753 | *	skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() | 
|---|
| 3754 | *	@from: source buffer | 
|---|
| 3755 | * | 
|---|
| 3756 | *	Calculates the amount of linear headroom needed in the 'to' skb passed | 
|---|
| 3757 | *	into skb_zerocopy(). | 
|---|
| 3758 | */ | 
|---|
| 3759 | unsigned int | 
|---|
| 3760 | skb_zerocopy_headlen(const struct sk_buff *from) | 
|---|
| 3761 | { | 
|---|
| 3762 | unsigned int hlen = 0; | 
|---|
| 3763 |  | 
|---|
| 3764 | if (!from->head_frag || | 
|---|
| 3765 | skb_headlen(skb: from) < L1_CACHE_BYTES || | 
|---|
| 3766 | skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { | 
|---|
| 3767 | hlen = skb_headlen(skb: from); | 
|---|
| 3768 | if (!hlen) | 
|---|
| 3769 | hlen = from->len; | 
|---|
| 3770 | } | 
|---|
| 3771 |  | 
|---|
| 3772 | if (skb_has_frag_list(skb: from)) | 
|---|
| 3773 | hlen = from->len; | 
|---|
| 3774 |  | 
|---|
| 3775 | return hlen; | 
|---|
| 3776 | } | 
|---|
| 3777 | EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); | 
|---|
| 3778 |  | 
|---|
| 3779 | /** | 
|---|
| 3780 | *	skb_zerocopy - Zero copy skb to skb | 
|---|
| 3781 | *	@to: destination buffer | 
|---|
| 3782 | *	@from: source buffer | 
|---|
| 3783 | *	@len: number of bytes to copy from source buffer | 
|---|
| 3784 | *	@hlen: size of linear headroom in destination buffer | 
|---|
| 3785 | * | 
|---|
| 3786 | *	Copies up to `len` bytes from `from` to `to` by creating references | 
|---|
| 3787 | *	to the frags in the source buffer. | 
|---|
| 3788 | * | 
|---|
| 3789 | *	The `hlen` as calculated by skb_zerocopy_headlen() specifies the | 
|---|
| 3790 | *	headroom in the `to` buffer. | 
|---|
| 3791 | * | 
|---|
| 3792 | *	Return value: | 
|---|
| 3793 | *	0: everything is OK | 
|---|
| 3794 | *	-ENOMEM: couldn't orphan frags of @from due to lack of memory | 
|---|
| 3795 | *	-EFAULT: skb_copy_bits() found some problem with skb geometry | 
|---|
| 3796 | */ | 
|---|
| 3797 | int | 
|---|
| 3798 | skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) | 
|---|
| 3799 | { | 
|---|
| 3800 | int i, j = 0; | 
|---|
| 3801 | int plen = 0; /* length of skb->head fragment */ | 
|---|
| 3802 | int ret; | 
|---|
| 3803 | struct page *page; | 
|---|
| 3804 | unsigned int offset; | 
|---|
| 3805 |  | 
|---|
| 3806 | BUG_ON(!from->head_frag && !hlen); | 
|---|
| 3807 |  | 
|---|
| 3808 | /* dont bother with small payloads */ | 
|---|
| 3809 | if (len <= skb_tailroom(skb: to)) | 
|---|
| 3810 | return skb_copy_bits(from, 0, skb_put(to, len), len); | 
|---|
| 3811 |  | 
|---|
| 3812 | if (hlen) { | 
|---|
| 3813 | ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); | 
|---|
| 3814 | if (unlikely(ret)) | 
|---|
| 3815 | return ret; | 
|---|
| 3816 | len -= hlen; | 
|---|
| 3817 | } else { | 
|---|
| 3818 | plen = min_t(int, skb_headlen(from), len); | 
|---|
| 3819 | if (plen) { | 
|---|
| 3820 | page = virt_to_head_page(x: from->head); | 
|---|
| 3821 | offset = from->data - (unsigned char *)page_address(page); | 
|---|
| 3822 | __skb_fill_netmem_desc(skb: to, i: 0, page_to_netmem(page), | 
|---|
| 3823 | off: offset, size: plen); | 
|---|
| 3824 | get_page(page); | 
|---|
| 3825 | j = 1; | 
|---|
| 3826 | len -= plen; | 
|---|
| 3827 | } | 
|---|
| 3828 | } | 
|---|
| 3829 |  | 
|---|
| 3830 | skb_len_add(skb: to, delta: len + plen); | 
|---|
| 3831 |  | 
|---|
| 3832 | if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { | 
|---|
| 3833 | skb_tx_error(from); | 
|---|
| 3834 | return -ENOMEM; | 
|---|
| 3835 | } | 
|---|
| 3836 | skb_zerocopy_clone(nskb: to, orig: from, GFP_ATOMIC); | 
|---|
| 3837 |  | 
|---|
| 3838 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { | 
|---|
| 3839 | int size; | 
|---|
| 3840 |  | 
|---|
| 3841 | if (!len) | 
|---|
| 3842 | break; | 
|---|
| 3843 | skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; | 
|---|
| 3844 | size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), | 
|---|
| 3845 | len); | 
|---|
| 3846 | skb_frag_size_set(frag: &skb_shinfo(to)->frags[j], size); | 
|---|
| 3847 | len -= size; | 
|---|
| 3848 | skb_frag_ref(skb: to, f: j); | 
|---|
| 3849 | j++; | 
|---|
| 3850 | } | 
|---|
| 3851 | skb_shinfo(to)->nr_frags = j; | 
|---|
| 3852 |  | 
|---|
| 3853 | return 0; | 
|---|
| 3854 | } | 
|---|
| 3855 | EXPORT_SYMBOL_GPL(skb_zerocopy); | 
|---|
| 3856 |  | 
|---|
| 3857 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) | 
|---|
| 3858 | { | 
|---|
| 3859 | __wsum csum; | 
|---|
| 3860 | long csstart; | 
|---|
| 3861 |  | 
|---|
| 3862 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 
|---|
| 3863 | csstart = skb_checksum_start_offset(skb); | 
|---|
| 3864 | else | 
|---|
| 3865 | csstart = skb_headlen(skb); | 
|---|
| 3866 |  | 
|---|
| 3867 | BUG_ON(csstart > skb_headlen(skb)); | 
|---|
| 3868 |  | 
|---|
| 3869 | skb_copy_from_linear_data(skb, to, len: csstart); | 
|---|
| 3870 |  | 
|---|
| 3871 | csum = 0; | 
|---|
| 3872 | if (csstart != skb->len) | 
|---|
| 3873 | csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, | 
|---|
| 3874 | skb->len - csstart); | 
|---|
| 3875 |  | 
|---|
| 3876 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 
|---|
| 3877 | long csstuff = csstart + skb->csum_offset; | 
|---|
| 3878 |  | 
|---|
| 3879 | *((__sum16 *)(to + csstuff)) = csum_fold(sum: csum); | 
|---|
| 3880 | } | 
|---|
| 3881 | } | 
|---|
| 3882 | EXPORT_SYMBOL(skb_copy_and_csum_dev); | 
|---|
| 3883 |  | 
|---|
| 3884 | /** | 
|---|
| 3885 | *	skb_dequeue - remove from the head of the queue | 
|---|
| 3886 | *	@list: list to dequeue from | 
|---|
| 3887 | * | 
|---|
| 3888 | *	Remove the head of the list. The list lock is taken so the function | 
|---|
| 3889 | *	may be used safely with other locking list functions. The head item is | 
|---|
| 3890 | *	returned or %NULL if the list is empty. | 
|---|
| 3891 | */ | 
|---|
| 3892 |  | 
|---|
| 3893 | struct sk_buff *skb_dequeue(struct sk_buff_head *list) | 
|---|
| 3894 | { | 
|---|
| 3895 | unsigned long flags; | 
|---|
| 3896 | struct sk_buff *result; | 
|---|
| 3897 |  | 
|---|
| 3898 | spin_lock_irqsave(&list->lock, flags); | 
|---|
| 3899 | result = __skb_dequeue(list); | 
|---|
| 3900 | spin_unlock_irqrestore(lock: &list->lock, flags); | 
|---|
| 3901 | return result; | 
|---|
| 3902 | } | 
|---|
| 3903 | EXPORT_SYMBOL(skb_dequeue); | 
|---|
| 3904 |  | 
|---|
| 3905 | /** | 
|---|
| 3906 | *	skb_dequeue_tail - remove from the tail of the queue | 
|---|
| 3907 | *	@list: list to dequeue from | 
|---|
| 3908 | * | 
|---|
| 3909 | *	Remove the tail of the list. The list lock is taken so the function | 
|---|
| 3910 | *	may be used safely with other locking list functions. The tail item is | 
|---|
| 3911 | *	returned or %NULL if the list is empty. | 
|---|
| 3912 | */ | 
|---|
| 3913 | struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) | 
|---|
| 3914 | { | 
|---|
| 3915 | unsigned long flags; | 
|---|
| 3916 | struct sk_buff *result; | 
|---|
| 3917 |  | 
|---|
| 3918 | spin_lock_irqsave(&list->lock, flags); | 
|---|
| 3919 | result = __skb_dequeue_tail(list); | 
|---|
| 3920 | spin_unlock_irqrestore(lock: &list->lock, flags); | 
|---|
| 3921 | return result; | 
|---|
| 3922 | } | 
|---|
| 3923 | EXPORT_SYMBOL(skb_dequeue_tail); | 
|---|
| 3924 |  | 
|---|
| 3925 | /** | 
|---|
| 3926 | *	skb_queue_purge_reason - empty a list | 
|---|
| 3927 | *	@list: list to empty | 
|---|
| 3928 | *	@reason: drop reason | 
|---|
| 3929 | * | 
|---|
| 3930 | *	Delete all buffers on an &sk_buff list. Each buffer is removed from | 
|---|
| 3931 | *	the list and one reference dropped. This function takes the list | 
|---|
| 3932 | *	lock and is atomic with respect to other list locking functions. | 
|---|
| 3933 | */ | 
|---|
| 3934 | void skb_queue_purge_reason(struct sk_buff_head *list, | 
|---|
| 3935 | enum skb_drop_reason reason) | 
|---|
| 3936 | { | 
|---|
| 3937 | struct sk_buff_head tmp; | 
|---|
| 3938 | unsigned long flags; | 
|---|
| 3939 |  | 
|---|
| 3940 | if (skb_queue_empty_lockless(list)) | 
|---|
| 3941 | return; | 
|---|
| 3942 |  | 
|---|
| 3943 | __skb_queue_head_init(list: &tmp); | 
|---|
| 3944 |  | 
|---|
| 3945 | spin_lock_irqsave(&list->lock, flags); | 
|---|
| 3946 | skb_queue_splice_init(list, head: &tmp); | 
|---|
| 3947 | spin_unlock_irqrestore(lock: &list->lock, flags); | 
|---|
| 3948 |  | 
|---|
| 3949 | __skb_queue_purge_reason(list: &tmp, reason); | 
|---|
| 3950 | } | 
|---|
| 3951 | EXPORT_SYMBOL(skb_queue_purge_reason); | 
|---|
| 3952 |  | 
|---|
| 3953 | /** | 
|---|
| 3954 | *	skb_rbtree_purge - empty a skb rbtree | 
|---|
| 3955 | *	@root: root of the rbtree to empty | 
|---|
| 3956 | *	Return value: the sum of truesizes of all purged skbs. | 
|---|
| 3957 | * | 
|---|
| 3958 | *	Delete all buffers on an &sk_buff rbtree. Each buffer is removed from | 
|---|
| 3959 | *	the list and one reference dropped. This function does not take | 
|---|
| 3960 | *	any lock. Synchronization should be handled by the caller (e.g., TCP | 
|---|
| 3961 | *	out-of-order queue is protected by the socket lock). | 
|---|
| 3962 | */ | 
|---|
| 3963 | unsigned int skb_rbtree_purge(struct rb_root *root) | 
|---|
| 3964 | { | 
|---|
| 3965 | struct rb_node *p = rb_first(root); | 
|---|
| 3966 | unsigned int sum = 0; | 
|---|
| 3967 |  | 
|---|
| 3968 | while (p) { | 
|---|
| 3969 | struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); | 
|---|
| 3970 |  | 
|---|
| 3971 | p = rb_next(p); | 
|---|
| 3972 | rb_erase(&skb->rbnode, root); | 
|---|
| 3973 | sum += skb->truesize; | 
|---|
| 3974 | kfree_skb(skb); | 
|---|
| 3975 | } | 
|---|
| 3976 | return sum; | 
|---|
| 3977 | } | 
|---|
| 3978 |  | 
|---|
| 3979 | void skb_errqueue_purge(struct sk_buff_head *list) | 
|---|
| 3980 | { | 
|---|
| 3981 | struct sk_buff *skb, *next; | 
|---|
| 3982 | struct sk_buff_head kill; | 
|---|
| 3983 | unsigned long flags; | 
|---|
| 3984 |  | 
|---|
| 3985 | __skb_queue_head_init(list: &kill); | 
|---|
| 3986 |  | 
|---|
| 3987 | spin_lock_irqsave(&list->lock, flags); | 
|---|
| 3988 | skb_queue_walk_safe(list, skb, next) { | 
|---|
| 3989 | if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY || | 
|---|
| 3990 | SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) | 
|---|
| 3991 | continue; | 
|---|
| 3992 | __skb_unlink(skb, list); | 
|---|
| 3993 | __skb_queue_tail(list: &kill, newsk: skb); | 
|---|
| 3994 | } | 
|---|
| 3995 | spin_unlock_irqrestore(lock: &list->lock, flags); | 
|---|
| 3996 | __skb_queue_purge(list: &kill); | 
|---|
| 3997 | } | 
|---|
| 3998 | EXPORT_SYMBOL(skb_errqueue_purge); | 
|---|
| 3999 |  | 
|---|
| 4000 | /** | 
|---|
| 4001 | *	skb_queue_head - queue a buffer at the list head | 
|---|
| 4002 | *	@list: list to use | 
|---|
| 4003 | *	@newsk: buffer to queue | 
|---|
| 4004 | * | 
|---|
| 4005 | *	Queue a buffer at the start of the list. This function takes the | 
|---|
| 4006 | *	list lock and can be used safely with other locking &sk_buff functions | 
|---|
| 4007 | *	safely. | 
|---|
| 4008 | * | 
|---|
| 4009 | *	A buffer cannot be placed on two lists at the same time. | 
|---|
| 4010 | */ | 
|---|
| 4011 | void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) | 
|---|
| 4012 | { | 
|---|
| 4013 | unsigned long flags; | 
|---|
| 4014 |  | 
|---|
| 4015 | spin_lock_irqsave(&list->lock, flags); | 
|---|
| 4016 | __skb_queue_head(list, newsk); | 
|---|
| 4017 | spin_unlock_irqrestore(lock: &list->lock, flags); | 
|---|
| 4018 | } | 
|---|
| 4019 | EXPORT_SYMBOL(skb_queue_head); | 
|---|
| 4020 |  | 
|---|
| 4021 | /** | 
|---|
| 4022 | *	skb_queue_tail - queue a buffer at the list tail | 
|---|
| 4023 | *	@list: list to use | 
|---|
| 4024 | *	@newsk: buffer to queue | 
|---|
| 4025 | * | 
|---|
| 4026 | *	Queue a buffer at the tail of the list. This function takes the | 
|---|
| 4027 | *	list lock and can be used safely with other locking &sk_buff functions | 
|---|
| 4028 | *	safely. | 
|---|
| 4029 | * | 
|---|
| 4030 | *	A buffer cannot be placed on two lists at the same time. | 
|---|
| 4031 | */ | 
|---|
| 4032 | void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) | 
|---|
| 4033 | { | 
|---|
| 4034 | unsigned long flags; | 
|---|
| 4035 |  | 
|---|
| 4036 | spin_lock_irqsave(&list->lock, flags); | 
|---|
| 4037 | __skb_queue_tail(list, newsk); | 
|---|
| 4038 | spin_unlock_irqrestore(lock: &list->lock, flags); | 
|---|
| 4039 | } | 
|---|
| 4040 | EXPORT_SYMBOL(skb_queue_tail); | 
|---|
| 4041 |  | 
|---|
| 4042 | /** | 
|---|
| 4043 | *	skb_unlink	-	remove a buffer from a list | 
|---|
| 4044 | *	@skb: buffer to remove | 
|---|
| 4045 | *	@list: list to use | 
|---|
| 4046 | * | 
|---|
| 4047 | *	Remove a packet from a list. The list locks are taken and this | 
|---|
| 4048 | *	function is atomic with respect to other list locked calls | 
|---|
| 4049 | * | 
|---|
| 4050 | *	You must know what list the SKB is on. | 
|---|
| 4051 | */ | 
|---|
| 4052 | void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) | 
|---|
| 4053 | { | 
|---|
| 4054 | unsigned long flags; | 
|---|
| 4055 |  | 
|---|
| 4056 | spin_lock_irqsave(&list->lock, flags); | 
|---|
| 4057 | __skb_unlink(skb, list); | 
|---|
| 4058 | spin_unlock_irqrestore(lock: &list->lock, flags); | 
|---|
| 4059 | } | 
|---|
| 4060 | EXPORT_SYMBOL(skb_unlink); | 
|---|
| 4061 |  | 
|---|
| 4062 | /** | 
|---|
| 4063 | *	skb_append	-	append a buffer | 
|---|
| 4064 | *	@old: buffer to insert after | 
|---|
| 4065 | *	@newsk: buffer to insert | 
|---|
| 4066 | *	@list: list to use | 
|---|
| 4067 | * | 
|---|
| 4068 | *	Place a packet after a given packet in a list. The list locks are taken | 
|---|
| 4069 | *	and this function is atomic with respect to other list locked calls. | 
|---|
| 4070 | *	A buffer cannot be placed on two lists at the same time. | 
|---|
| 4071 | */ | 
|---|
| 4072 | void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) | 
|---|
| 4073 | { | 
|---|
| 4074 | unsigned long flags; | 
|---|
| 4075 |  | 
|---|
| 4076 | spin_lock_irqsave(&list->lock, flags); | 
|---|
| 4077 | __skb_queue_after(list, prev: old, newsk); | 
|---|
| 4078 | spin_unlock_irqrestore(lock: &list->lock, flags); | 
|---|
| 4079 | } | 
|---|
| 4080 | EXPORT_SYMBOL(skb_append); | 
|---|
| 4081 |  | 
|---|
| 4082 | static inline void (struct sk_buff *skb, | 
|---|
| 4083 | struct sk_buff* skb1, | 
|---|
| 4084 | const u32 len, const int pos) | 
|---|
| 4085 | { | 
|---|
| 4086 | int i; | 
|---|
| 4087 |  | 
|---|
| 4088 | skb_copy_from_linear_data_offset(skb, offset: len, to: skb_put(skb1, pos - len), | 
|---|
| 4089 | len: pos - len); | 
|---|
| 4090 | /* And move data appendix as is. */ | 
|---|
| 4091 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 
|---|
| 4092 | skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; | 
|---|
| 4093 |  | 
|---|
| 4094 | skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; | 
|---|
| 4095 | skb1->unreadable	   = skb->unreadable; | 
|---|
| 4096 | skb_shinfo(skb)->nr_frags  = 0; | 
|---|
| 4097 | skb1->data_len		   = skb->data_len; | 
|---|
| 4098 | skb1->len		   += skb1->data_len; | 
|---|
| 4099 | skb->data_len		   = 0; | 
|---|
| 4100 | skb->len		   = len; | 
|---|
| 4101 | skb_set_tail_pointer(skb, offset: len); | 
|---|
| 4102 | } | 
|---|
| 4103 |  | 
|---|
| 4104 | static inline void (struct sk_buff *skb, | 
|---|
| 4105 | struct sk_buff* skb1, | 
|---|
| 4106 | const u32 len, int pos) | 
|---|
| 4107 | { | 
|---|
| 4108 | int i, k = 0; | 
|---|
| 4109 | const int nfrags = skb_shinfo(skb)->nr_frags; | 
|---|
| 4110 |  | 
|---|
| 4111 | skb_shinfo(skb)->nr_frags = 0; | 
|---|
| 4112 | skb1->len		  = skb1->data_len = skb->len - len; | 
|---|
| 4113 | skb->len		  = len; | 
|---|
| 4114 | skb->data_len		  = len - pos; | 
|---|
| 4115 |  | 
|---|
| 4116 | for (i = 0; i < nfrags; i++) { | 
|---|
| 4117 | int size = skb_frag_size(frag: &skb_shinfo(skb)->frags[i]); | 
|---|
| 4118 |  | 
|---|
| 4119 | if (pos + size > len) { | 
|---|
| 4120 | skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; | 
|---|
| 4121 |  | 
|---|
| 4122 | if (pos < len) { | 
|---|
| 4123 | /* Split frag. | 
|---|
| 4124 | * We have two variants in this case: | 
|---|
| 4125 | * 1. Move all the frag to the second | 
|---|
| 4126 | *    part, if it is possible. F.e. | 
|---|
| 4127 | *    this approach is mandatory for TUX, | 
|---|
| 4128 | *    where splitting is expensive. | 
|---|
| 4129 | * 2. Split is accurately. We make this. | 
|---|
| 4130 | */ | 
|---|
| 4131 | skb_frag_ref(skb, f: i); | 
|---|
| 4132 | skb_frag_off_add(frag: &skb_shinfo(skb1)->frags[0], delta: len - pos); | 
|---|
| 4133 | skb_frag_size_sub(frag: &skb_shinfo(skb1)->frags[0], delta: len - pos); | 
|---|
| 4134 | skb_frag_size_set(frag: &skb_shinfo(skb)->frags[i], size: len - pos); | 
|---|
| 4135 | skb_shinfo(skb)->nr_frags++; | 
|---|
| 4136 | } | 
|---|
| 4137 | k++; | 
|---|
| 4138 | } else | 
|---|
| 4139 | skb_shinfo(skb)->nr_frags++; | 
|---|
| 4140 | pos += size; | 
|---|
| 4141 | } | 
|---|
| 4142 | skb_shinfo(skb1)->nr_frags = k; | 
|---|
| 4143 |  | 
|---|
| 4144 | skb1->unreadable = skb->unreadable; | 
|---|
| 4145 | } | 
|---|
| 4146 |  | 
|---|
| 4147 | /** | 
|---|
| 4148 | * skb_split - Split fragmented skb to two parts at length len. | 
|---|
| 4149 | * @skb: the buffer to split | 
|---|
| 4150 | * @skb1: the buffer to receive the second part | 
|---|
| 4151 | * @len: new length for skb | 
|---|
| 4152 | */ | 
|---|
| 4153 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) | 
|---|
| 4154 | { | 
|---|
| 4155 | int pos = skb_headlen(skb); | 
|---|
| 4156 | const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY; | 
|---|
| 4157 |  | 
|---|
| 4158 | skb_zcopy_downgrade_managed(skb); | 
|---|
| 4159 |  | 
|---|
| 4160 | skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; | 
|---|
| 4161 | skb_zerocopy_clone(nskb: skb1, orig: skb, gfp_mask: 0); | 
|---|
| 4162 | if (len < pos)	/* Split line is inside header. */ | 
|---|
| 4163 | skb_split_inside_header(skb, skb1, len, pos); | 
|---|
| 4164 | else		/* Second chunk has no header, nothing to copy. */ | 
|---|
| 4165 | skb_split_no_header(skb, skb1, len, pos); | 
|---|
| 4166 | } | 
|---|
| 4167 | EXPORT_SYMBOL(skb_split); | 
|---|
| 4168 |  | 
|---|
| 4169 | /* Shifting from/to a cloned skb is a no-go. | 
|---|
| 4170 | * | 
|---|
| 4171 | * Caller cannot keep skb_shinfo related pointers past calling here! | 
|---|
| 4172 | */ | 
|---|
| 4173 | static int skb_prepare_for_shift(struct sk_buff *skb) | 
|---|
| 4174 | { | 
|---|
| 4175 | return skb_unclone_keeptruesize(skb, GFP_ATOMIC); | 
|---|
| 4176 | } | 
|---|
| 4177 |  | 
|---|
| 4178 | /** | 
|---|
| 4179 | * skb_shift - Shifts paged data partially from skb to another | 
|---|
| 4180 | * @tgt: buffer into which tail data gets added | 
|---|
| 4181 | * @skb: buffer from which the paged data comes from | 
|---|
| 4182 | * @shiftlen: shift up to this many bytes | 
|---|
| 4183 | * | 
|---|
| 4184 | * Attempts to shift up to shiftlen worth of bytes, which may be less than | 
|---|
| 4185 | * the length of the skb, from skb to tgt. Returns number bytes shifted. | 
|---|
| 4186 | * It's up to caller to free skb if everything was shifted. | 
|---|
| 4187 | * | 
|---|
| 4188 | * If @tgt runs out of frags, the whole operation is aborted. | 
|---|
| 4189 | * | 
|---|
| 4190 | * Skb cannot include anything else but paged data while tgt is allowed | 
|---|
| 4191 | * to have non-paged data as well. | 
|---|
| 4192 | * | 
|---|
| 4193 | * TODO: full sized shift could be optimized but that would need | 
|---|
| 4194 | * specialized skb free'er to handle frags without up-to-date nr_frags. | 
|---|
| 4195 | */ | 
|---|
| 4196 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) | 
|---|
| 4197 | { | 
|---|
| 4198 | int from, to, merge, todo; | 
|---|
| 4199 | skb_frag_t *fragfrom, *fragto; | 
|---|
| 4200 |  | 
|---|
| 4201 | BUG_ON(shiftlen > skb->len); | 
|---|
| 4202 |  | 
|---|
| 4203 | if (skb_headlen(skb)) | 
|---|
| 4204 | return 0; | 
|---|
| 4205 | if (skb_zcopy(skb: tgt) || skb_zcopy(skb)) | 
|---|
| 4206 | return 0; | 
|---|
| 4207 |  | 
|---|
| 4208 | DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); | 
|---|
| 4209 | DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb)); | 
|---|
| 4210 |  | 
|---|
| 4211 | todo = shiftlen; | 
|---|
| 4212 | from = 0; | 
|---|
| 4213 | to = skb_shinfo(tgt)->nr_frags; | 
|---|
| 4214 | fragfrom = &skb_shinfo(skb)->frags[from]; | 
|---|
| 4215 |  | 
|---|
| 4216 | /* Actual merge is delayed until the point when we know we can | 
|---|
| 4217 | * commit all, so that we don't have to undo partial changes | 
|---|
| 4218 | */ | 
|---|
| 4219 | if (!skb_can_coalesce(skb: tgt, i: to, page: skb_frag_page(frag: fragfrom), | 
|---|
| 4220 | off: skb_frag_off(frag: fragfrom))) { | 
|---|
| 4221 | merge = -1; | 
|---|
| 4222 | } else { | 
|---|
| 4223 | merge = to - 1; | 
|---|
| 4224 |  | 
|---|
| 4225 | todo -= skb_frag_size(frag: fragfrom); | 
|---|
| 4226 | if (todo < 0) { | 
|---|
| 4227 | if (skb_prepare_for_shift(skb) || | 
|---|
| 4228 | skb_prepare_for_shift(skb: tgt)) | 
|---|
| 4229 | return 0; | 
|---|
| 4230 |  | 
|---|
| 4231 | /* All previous frag pointers might be stale! */ | 
|---|
| 4232 | fragfrom = &skb_shinfo(skb)->frags[from]; | 
|---|
| 4233 | fragto = &skb_shinfo(tgt)->frags[merge]; | 
|---|
| 4234 |  | 
|---|
| 4235 | skb_frag_size_add(frag: fragto, delta: shiftlen); | 
|---|
| 4236 | skb_frag_size_sub(frag: fragfrom, delta: shiftlen); | 
|---|
| 4237 | skb_frag_off_add(frag: fragfrom, delta: shiftlen); | 
|---|
| 4238 |  | 
|---|
| 4239 | goto onlymerged; | 
|---|
| 4240 | } | 
|---|
| 4241 |  | 
|---|
| 4242 | from++; | 
|---|
| 4243 | } | 
|---|
| 4244 |  | 
|---|
| 4245 | /* Skip full, not-fitting skb to avoid expensive operations */ | 
|---|
| 4246 | if ((shiftlen == skb->len) && | 
|---|
| 4247 | (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) | 
|---|
| 4248 | return 0; | 
|---|
| 4249 |  | 
|---|
| 4250 | if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(skb: tgt)) | 
|---|
| 4251 | return 0; | 
|---|
| 4252 |  | 
|---|
| 4253 | while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { | 
|---|
| 4254 | if (to == MAX_SKB_FRAGS) | 
|---|
| 4255 | return 0; | 
|---|
| 4256 |  | 
|---|
| 4257 | fragfrom = &skb_shinfo(skb)->frags[from]; | 
|---|
| 4258 | fragto = &skb_shinfo(tgt)->frags[to]; | 
|---|
| 4259 |  | 
|---|
| 4260 | if (todo >= skb_frag_size(frag: fragfrom)) { | 
|---|
| 4261 | *fragto = *fragfrom; | 
|---|
| 4262 | todo -= skb_frag_size(frag: fragfrom); | 
|---|
| 4263 | from++; | 
|---|
| 4264 | to++; | 
|---|
| 4265 |  | 
|---|
| 4266 | } else { | 
|---|
| 4267 | __skb_frag_ref(frag: fragfrom); | 
|---|
| 4268 | skb_frag_page_copy(fragto, fragfrom); | 
|---|
| 4269 | skb_frag_off_copy(fragto, fragfrom); | 
|---|
| 4270 | skb_frag_size_set(frag: fragto, size: todo); | 
|---|
| 4271 |  | 
|---|
| 4272 | skb_frag_off_add(frag: fragfrom, delta: todo); | 
|---|
| 4273 | skb_frag_size_sub(frag: fragfrom, delta: todo); | 
|---|
| 4274 | todo = 0; | 
|---|
| 4275 |  | 
|---|
| 4276 | to++; | 
|---|
| 4277 | break; | 
|---|
| 4278 | } | 
|---|
| 4279 | } | 
|---|
| 4280 |  | 
|---|
| 4281 | /* Ready to "commit" this state change to tgt */ | 
|---|
| 4282 | skb_shinfo(tgt)->nr_frags = to; | 
|---|
| 4283 |  | 
|---|
| 4284 | if (merge >= 0) { | 
|---|
| 4285 | fragfrom = &skb_shinfo(skb)->frags[0]; | 
|---|
| 4286 | fragto = &skb_shinfo(tgt)->frags[merge]; | 
|---|
| 4287 |  | 
|---|
| 4288 | skb_frag_size_add(frag: fragto, delta: skb_frag_size(frag: fragfrom)); | 
|---|
| 4289 | __skb_frag_unref(frag: fragfrom, recycle: skb->pp_recycle); | 
|---|
| 4290 | } | 
|---|
| 4291 |  | 
|---|
| 4292 | /* Reposition in the original skb */ | 
|---|
| 4293 | to = 0; | 
|---|
| 4294 | while (from < skb_shinfo(skb)->nr_frags) | 
|---|
| 4295 | skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; | 
|---|
| 4296 | skb_shinfo(skb)->nr_frags = to; | 
|---|
| 4297 |  | 
|---|
| 4298 | BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); | 
|---|
| 4299 |  | 
|---|
| 4300 | onlymerged: | 
|---|
| 4301 | /* Most likely the tgt won't ever need its checksum anymore, skb on | 
|---|
| 4302 | * the other hand might need it if it needs to be resent | 
|---|
| 4303 | */ | 
|---|
| 4304 | tgt->ip_summed = CHECKSUM_PARTIAL; | 
|---|
| 4305 | skb->ip_summed = CHECKSUM_PARTIAL; | 
|---|
| 4306 |  | 
|---|
| 4307 | skb_len_add(skb, delta: -shiftlen); | 
|---|
| 4308 | skb_len_add(skb: tgt, delta: shiftlen); | 
|---|
| 4309 |  | 
|---|
| 4310 | return shiftlen; | 
|---|
| 4311 | } | 
|---|
| 4312 |  | 
|---|
| 4313 | /** | 
|---|
| 4314 | * skb_prepare_seq_read - Prepare a sequential read of skb data | 
|---|
| 4315 | * @skb: the buffer to read | 
|---|
| 4316 | * @from: lower offset of data to be read | 
|---|
| 4317 | * @to: upper offset of data to be read | 
|---|
| 4318 | * @st: state variable | 
|---|
| 4319 | * | 
|---|
| 4320 | * Initializes the specified state variable. Must be called before | 
|---|
| 4321 | * invoking skb_seq_read() for the first time. | 
|---|
| 4322 | */ | 
|---|
| 4323 | void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, | 
|---|
| 4324 | unsigned int to, struct skb_seq_state *st) | 
|---|
| 4325 | { | 
|---|
| 4326 | st->lower_offset = from; | 
|---|
| 4327 | st->upper_offset = to; | 
|---|
| 4328 | st->root_skb = st->cur_skb = skb; | 
|---|
| 4329 | st->frag_idx = st->stepped_offset = 0; | 
|---|
| 4330 | st->frag_data = NULL; | 
|---|
| 4331 | st->frag_off = 0; | 
|---|
| 4332 | } | 
|---|
| 4333 | EXPORT_SYMBOL(skb_prepare_seq_read); | 
|---|
| 4334 |  | 
|---|
| 4335 | /** | 
|---|
| 4336 | * skb_seq_read - Sequentially read skb data | 
|---|
| 4337 | * @consumed: number of bytes consumed by the caller so far | 
|---|
| 4338 | * @data: destination pointer for data to be returned | 
|---|
| 4339 | * @st: state variable | 
|---|
| 4340 | * | 
|---|
| 4341 | * Reads a block of skb data at @consumed relative to the | 
|---|
| 4342 | * lower offset specified to skb_prepare_seq_read(). Assigns | 
|---|
| 4343 | * the head of the data block to @data and returns the length | 
|---|
| 4344 | * of the block or 0 if the end of the skb data or the upper | 
|---|
| 4345 | * offset has been reached. | 
|---|
| 4346 | * | 
|---|
| 4347 | * The caller is not required to consume all of the data | 
|---|
| 4348 | * returned, i.e. @consumed is typically set to the number | 
|---|
| 4349 | * of bytes already consumed and the next call to | 
|---|
| 4350 | * skb_seq_read() will return the remaining part of the block. | 
|---|
| 4351 | * | 
|---|
| 4352 | * Note 1: The size of each block of data returned can be arbitrary, | 
|---|
| 4353 | *       this limitation is the cost for zerocopy sequential | 
|---|
| 4354 | *       reads of potentially non linear data. | 
|---|
| 4355 | * | 
|---|
| 4356 | * Note 2: Fragment lists within fragments are not implemented | 
|---|
| 4357 | *       at the moment, state->root_skb could be replaced with | 
|---|
| 4358 | *       a stack for this purpose. | 
|---|
| 4359 | */ | 
|---|
| 4360 | unsigned int skb_seq_read(unsigned int consumed, const u8 **data, | 
|---|
| 4361 | struct skb_seq_state *st) | 
|---|
| 4362 | { | 
|---|
| 4363 | unsigned int block_limit, abs_offset = consumed + st->lower_offset; | 
|---|
| 4364 | skb_frag_t *frag; | 
|---|
| 4365 |  | 
|---|
| 4366 | if (unlikely(abs_offset >= st->upper_offset)) { | 
|---|
| 4367 | if (st->frag_data) { | 
|---|
| 4368 | kunmap_atomic(st->frag_data); | 
|---|
| 4369 | st->frag_data = NULL; | 
|---|
| 4370 | } | 
|---|
| 4371 | return 0; | 
|---|
| 4372 | } | 
|---|
| 4373 |  | 
|---|
| 4374 | next_skb: | 
|---|
| 4375 | block_limit = skb_headlen(skb: st->cur_skb) + st->stepped_offset; | 
|---|
| 4376 |  | 
|---|
| 4377 | if (abs_offset < block_limit && !st->frag_data) { | 
|---|
| 4378 | *data = st->cur_skb->data + (abs_offset - st->stepped_offset); | 
|---|
| 4379 | return block_limit - abs_offset; | 
|---|
| 4380 | } | 
|---|
| 4381 |  | 
|---|
| 4382 | if (!skb_frags_readable(skb: st->cur_skb)) | 
|---|
| 4383 | return 0; | 
|---|
| 4384 |  | 
|---|
| 4385 | if (st->frag_idx == 0 && !st->frag_data) | 
|---|
| 4386 | st->stepped_offset += skb_headlen(skb: st->cur_skb); | 
|---|
| 4387 |  | 
|---|
| 4388 | while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { | 
|---|
| 4389 | unsigned int pg_idx, pg_off, pg_sz; | 
|---|
| 4390 |  | 
|---|
| 4391 | frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; | 
|---|
| 4392 |  | 
|---|
| 4393 | pg_idx = 0; | 
|---|
| 4394 | pg_off = skb_frag_off(frag); | 
|---|
| 4395 | pg_sz = skb_frag_size(frag); | 
|---|
| 4396 |  | 
|---|
| 4397 | if (skb_frag_must_loop(p: skb_frag_page(frag))) { | 
|---|
| 4398 | pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; | 
|---|
| 4399 | pg_off = offset_in_page(pg_off + st->frag_off); | 
|---|
| 4400 | pg_sz = min_t(unsigned int, pg_sz - st->frag_off, | 
|---|
| 4401 | PAGE_SIZE - pg_off); | 
|---|
| 4402 | } | 
|---|
| 4403 |  | 
|---|
| 4404 | block_limit = pg_sz + st->stepped_offset; | 
|---|
| 4405 | if (abs_offset < block_limit) { | 
|---|
| 4406 | if (!st->frag_data) | 
|---|
| 4407 | st->frag_data = kmap_atomic(page: skb_frag_page(frag) + pg_idx); | 
|---|
| 4408 |  | 
|---|
| 4409 | *data = (u8 *)st->frag_data + pg_off + | 
|---|
| 4410 | (abs_offset - st->stepped_offset); | 
|---|
| 4411 |  | 
|---|
| 4412 | return block_limit - abs_offset; | 
|---|
| 4413 | } | 
|---|
| 4414 |  | 
|---|
| 4415 | if (st->frag_data) { | 
|---|
| 4416 | kunmap_atomic(st->frag_data); | 
|---|
| 4417 | st->frag_data = NULL; | 
|---|
| 4418 | } | 
|---|
| 4419 |  | 
|---|
| 4420 | st->stepped_offset += pg_sz; | 
|---|
| 4421 | st->frag_off += pg_sz; | 
|---|
| 4422 | if (st->frag_off == skb_frag_size(frag)) { | 
|---|
| 4423 | st->frag_off = 0; | 
|---|
| 4424 | st->frag_idx++; | 
|---|
| 4425 | } | 
|---|
| 4426 | } | 
|---|
| 4427 |  | 
|---|
| 4428 | if (st->frag_data) { | 
|---|
| 4429 | kunmap_atomic(st->frag_data); | 
|---|
| 4430 | st->frag_data = NULL; | 
|---|
| 4431 | } | 
|---|
| 4432 |  | 
|---|
| 4433 | if (st->root_skb == st->cur_skb && skb_has_frag_list(skb: st->root_skb)) { | 
|---|
| 4434 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; | 
|---|
| 4435 | st->frag_idx = 0; | 
|---|
| 4436 | goto next_skb; | 
|---|
| 4437 | } else if (st->cur_skb->next) { | 
|---|
| 4438 | st->cur_skb = st->cur_skb->next; | 
|---|
| 4439 | st->frag_idx = 0; | 
|---|
| 4440 | goto next_skb; | 
|---|
| 4441 | } | 
|---|
| 4442 |  | 
|---|
| 4443 | return 0; | 
|---|
| 4444 | } | 
|---|
| 4445 | EXPORT_SYMBOL(skb_seq_read); | 
|---|
| 4446 |  | 
|---|
| 4447 | /** | 
|---|
| 4448 | * skb_abort_seq_read - Abort a sequential read of skb data | 
|---|
| 4449 | * @st: state variable | 
|---|
| 4450 | * | 
|---|
| 4451 | * Must be called if skb_seq_read() was not called until it | 
|---|
| 4452 | * returned 0. | 
|---|
| 4453 | */ | 
|---|
| 4454 | void skb_abort_seq_read(struct skb_seq_state *st) | 
|---|
| 4455 | { | 
|---|
| 4456 | if (st->frag_data) | 
|---|
| 4457 | kunmap_atomic(st->frag_data); | 
|---|
| 4458 | } | 
|---|
| 4459 | EXPORT_SYMBOL(skb_abort_seq_read); | 
|---|
| 4460 |  | 
|---|
| 4461 | /** | 
|---|
| 4462 | * skb_copy_seq_read() - copy from a skb_seq_state to a buffer | 
|---|
| 4463 | * @st: source skb_seq_state | 
|---|
| 4464 | * @offset: offset in source | 
|---|
| 4465 | * @to: destination buffer | 
|---|
| 4466 | * @len: number of bytes to copy | 
|---|
| 4467 | * | 
|---|
| 4468 | * Copy @len bytes from @offset bytes into the source @st to the destination | 
|---|
| 4469 | * buffer @to. `offset` should increase (or be unchanged) with each subsequent | 
|---|
| 4470 | * call to this function. If offset needs to decrease from the previous use `st` | 
|---|
| 4471 | * should be reset first. | 
|---|
| 4472 | * | 
|---|
| 4473 | * Return: 0 on success or -EINVAL if the copy ended early | 
|---|
| 4474 | */ | 
|---|
| 4475 | int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len) | 
|---|
| 4476 | { | 
|---|
| 4477 | const u8 *data; | 
|---|
| 4478 | u32 sqlen; | 
|---|
| 4479 |  | 
|---|
| 4480 | for (;;) { | 
|---|
| 4481 | sqlen = skb_seq_read(offset, &data, st); | 
|---|
| 4482 | if (sqlen == 0) | 
|---|
| 4483 | return -EINVAL; | 
|---|
| 4484 | if (sqlen >= len) { | 
|---|
| 4485 | memcpy(to, from: data, len); | 
|---|
| 4486 | return 0; | 
|---|
| 4487 | } | 
|---|
| 4488 | memcpy(to, from: data, len: sqlen); | 
|---|
| 4489 | to += sqlen; | 
|---|
| 4490 | offset += sqlen; | 
|---|
| 4491 | len -= sqlen; | 
|---|
| 4492 | } | 
|---|
| 4493 | } | 
|---|
| 4494 | EXPORT_SYMBOL(skb_copy_seq_read); | 
|---|
| 4495 |  | 
|---|
| 4496 | #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb)) | 
|---|
| 4497 |  | 
|---|
| 4498 | static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, | 
|---|
| 4499 | struct ts_config *conf, | 
|---|
| 4500 | struct ts_state *state) | 
|---|
| 4501 | { | 
|---|
| 4502 | return skb_seq_read(offset, text, TS_SKB_CB(state)); | 
|---|
| 4503 | } | 
|---|
| 4504 |  | 
|---|
| 4505 | static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) | 
|---|
| 4506 | { | 
|---|
| 4507 | skb_abort_seq_read(TS_SKB_CB(state)); | 
|---|
| 4508 | } | 
|---|
| 4509 |  | 
|---|
| 4510 | /** | 
|---|
| 4511 | * skb_find_text - Find a text pattern in skb data | 
|---|
| 4512 | * @skb: the buffer to look in | 
|---|
| 4513 | * @from: search offset | 
|---|
| 4514 | * @to: search limit | 
|---|
| 4515 | * @config: textsearch configuration | 
|---|
| 4516 | * | 
|---|
| 4517 | * Finds a pattern in the skb data according to the specified | 
|---|
| 4518 | * textsearch configuration. Use textsearch_next() to retrieve | 
|---|
| 4519 | * subsequent occurrences of the pattern. Returns the offset | 
|---|
| 4520 | * to the first occurrence or UINT_MAX if no match was found. | 
|---|
| 4521 | */ | 
|---|
| 4522 | unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, | 
|---|
| 4523 | unsigned int to, struct ts_config *config) | 
|---|
| 4524 | { | 
|---|
| 4525 | unsigned int patlen = config->ops->get_pattern_len(config); | 
|---|
| 4526 | struct ts_state state; | 
|---|
| 4527 | unsigned int ret; | 
|---|
| 4528 |  | 
|---|
| 4529 | BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); | 
|---|
| 4530 |  | 
|---|
| 4531 | config->get_next_block = skb_ts_get_next_block; | 
|---|
| 4532 | config->finish = skb_ts_finish; | 
|---|
| 4533 |  | 
|---|
| 4534 | skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); | 
|---|
| 4535 |  | 
|---|
| 4536 | ret = textsearch_find(conf: config, state: &state); | 
|---|
| 4537 | return (ret + patlen <= to - from ? ret : UINT_MAX); | 
|---|
| 4538 | } | 
|---|
| 4539 | EXPORT_SYMBOL(skb_find_text); | 
|---|
| 4540 |  | 
|---|
| 4541 | int skb_append_pagefrags(struct sk_buff *skb, struct page *page, | 
|---|
| 4542 | int offset, size_t size, size_t max_frags) | 
|---|
| 4543 | { | 
|---|
| 4544 | int i = skb_shinfo(skb)->nr_frags; | 
|---|
| 4545 |  | 
|---|
| 4546 | if (skb_can_coalesce(skb, i, page, off: offset)) { | 
|---|
| 4547 | skb_frag_size_add(frag: &skb_shinfo(skb)->frags[i - 1], delta: size); | 
|---|
| 4548 | } else if (i < max_frags) { | 
|---|
| 4549 | skb_zcopy_downgrade_managed(skb); | 
|---|
| 4550 | get_page(page); | 
|---|
| 4551 | skb_fill_page_desc_noacc(skb, i, page, off: offset, size); | 
|---|
| 4552 | } else { | 
|---|
| 4553 | return -EMSGSIZE; | 
|---|
| 4554 | } | 
|---|
| 4555 |  | 
|---|
| 4556 | return 0; | 
|---|
| 4557 | } | 
|---|
| 4558 | EXPORT_SYMBOL_GPL(skb_append_pagefrags); | 
|---|
| 4559 |  | 
|---|
| 4560 | /** | 
|---|
| 4561 | *	skb_pull_rcsum - pull skb and update receive checksum | 
|---|
| 4562 | *	@skb: buffer to update | 
|---|
| 4563 | *	@len: length of data pulled | 
|---|
| 4564 | * | 
|---|
| 4565 | *	This function performs an skb_pull on the packet and updates | 
|---|
| 4566 | *	the CHECKSUM_COMPLETE checksum.  It should be used on | 
|---|
| 4567 | *	receive path processing instead of skb_pull unless you know | 
|---|
| 4568 | *	that the checksum difference is zero (e.g., a valid IP header) | 
|---|
| 4569 | *	or you are setting ip_summed to CHECKSUM_NONE. | 
|---|
| 4570 | */ | 
|---|
| 4571 | void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) | 
|---|
| 4572 | { | 
|---|
| 4573 | unsigned char *data = skb->data; | 
|---|
| 4574 |  | 
|---|
| 4575 | BUG_ON(len > skb->len); | 
|---|
| 4576 | __skb_pull(skb, len); | 
|---|
| 4577 | skb_postpull_rcsum(skb, start: data, len); | 
|---|
| 4578 | return skb->data; | 
|---|
| 4579 | } | 
|---|
| 4580 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); | 
|---|
| 4581 |  | 
|---|
| 4582 | static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) | 
|---|
| 4583 | { | 
|---|
| 4584 | skb_frag_t head_frag; | 
|---|
| 4585 | struct page *page; | 
|---|
| 4586 |  | 
|---|
| 4587 | page = virt_to_head_page(x: frag_skb->head); | 
|---|
| 4588 | skb_frag_fill_page_desc(frag: &head_frag, page, off: frag_skb->data - | 
|---|
| 4589 | (unsigned char *)page_address(page), | 
|---|
| 4590 | size: skb_headlen(skb: frag_skb)); | 
|---|
| 4591 | return head_frag; | 
|---|
| 4592 | } | 
|---|
| 4593 |  | 
|---|
| 4594 | struct sk_buff *skb_segment_list(struct sk_buff *skb, | 
|---|
| 4595 | netdev_features_t features, | 
|---|
| 4596 | unsigned int offset) | 
|---|
| 4597 | { | 
|---|
| 4598 | struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; | 
|---|
| 4599 | unsigned int tnl_hlen = skb_tnl_header_len(inner_skb: skb); | 
|---|
| 4600 | unsigned int delta_truesize = 0; | 
|---|
| 4601 | unsigned int delta_len = 0; | 
|---|
| 4602 | struct sk_buff *tail = NULL; | 
|---|
| 4603 | struct sk_buff *nskb, *tmp; | 
|---|
| 4604 | int len_diff, err; | 
|---|
| 4605 |  | 
|---|
| 4606 | skb_push(skb, -skb_network_offset(skb) + offset); | 
|---|
| 4607 |  | 
|---|
| 4608 | /* Ensure the head is writeable before touching the shared info */ | 
|---|
| 4609 | err = skb_unclone(skb, GFP_ATOMIC); | 
|---|
| 4610 | if (err) | 
|---|
| 4611 | goto err_linearize; | 
|---|
| 4612 |  | 
|---|
| 4613 | skb_shinfo(skb)->frag_list = NULL; | 
|---|
| 4614 |  | 
|---|
| 4615 | while (list_skb) { | 
|---|
| 4616 | nskb = list_skb; | 
|---|
| 4617 | list_skb = list_skb->next; | 
|---|
| 4618 |  | 
|---|
| 4619 | err = 0; | 
|---|
| 4620 | delta_truesize += nskb->truesize; | 
|---|
| 4621 | if (skb_shared(skb: nskb)) { | 
|---|
| 4622 | tmp = skb_clone(nskb, GFP_ATOMIC); | 
|---|
| 4623 | if (tmp) { | 
|---|
| 4624 | consume_skb(nskb); | 
|---|
| 4625 | nskb = tmp; | 
|---|
| 4626 | err = skb_unclone(skb: nskb, GFP_ATOMIC); | 
|---|
| 4627 | } else { | 
|---|
| 4628 | err = -ENOMEM; | 
|---|
| 4629 | } | 
|---|
| 4630 | } | 
|---|
| 4631 |  | 
|---|
| 4632 | if (!tail) | 
|---|
| 4633 | skb->next = nskb; | 
|---|
| 4634 | else | 
|---|
| 4635 | tail->next = nskb; | 
|---|
| 4636 |  | 
|---|
| 4637 | if (unlikely(err)) { | 
|---|
| 4638 | nskb->next = list_skb; | 
|---|
| 4639 | goto err_linearize; | 
|---|
| 4640 | } | 
|---|
| 4641 |  | 
|---|
| 4642 | tail = nskb; | 
|---|
| 4643 |  | 
|---|
| 4644 | delta_len += nskb->len; | 
|---|
| 4645 |  | 
|---|
| 4646 | skb_push(nskb, -skb_network_offset(skb: nskb) + offset); | 
|---|
| 4647 |  | 
|---|
| 4648 | skb_release_head_state(skb: nskb); | 
|---|
| 4649 | len_diff = skb_network_header_len(skb: nskb) - skb_network_header_len(skb); | 
|---|
| 4650 | __copy_skb_header(new: nskb, old: skb); | 
|---|
| 4651 |  | 
|---|
| 4652 | skb_headers_offset_update(nskb, skb_headroom(skb: nskb) - skb_headroom(skb)); | 
|---|
| 4653 | nskb->transport_header += len_diff; | 
|---|
| 4654 | skb_copy_from_linear_data_offset(skb, offset: -tnl_hlen, | 
|---|
| 4655 | to: nskb->data - tnl_hlen, | 
|---|
| 4656 | len: offset + tnl_hlen); | 
|---|
| 4657 |  | 
|---|
| 4658 | if (skb_needs_linearize(skb: nskb, features) && | 
|---|
| 4659 | __skb_linearize(skb: nskb)) | 
|---|
| 4660 | goto err_linearize; | 
|---|
| 4661 | } | 
|---|
| 4662 |  | 
|---|
| 4663 | skb->truesize = skb->truesize - delta_truesize; | 
|---|
| 4664 | skb->data_len = skb->data_len - delta_len; | 
|---|
| 4665 | skb->len = skb->len - delta_len; | 
|---|
| 4666 |  | 
|---|
| 4667 | skb_gso_reset(skb); | 
|---|
| 4668 |  | 
|---|
| 4669 | skb->prev = tail; | 
|---|
| 4670 |  | 
|---|
| 4671 | if (skb_needs_linearize(skb, features) && | 
|---|
| 4672 | __skb_linearize(skb)) | 
|---|
| 4673 | goto err_linearize; | 
|---|
| 4674 |  | 
|---|
| 4675 | skb_get(skb); | 
|---|
| 4676 |  | 
|---|
| 4677 | return skb; | 
|---|
| 4678 |  | 
|---|
| 4679 | err_linearize: | 
|---|
| 4680 | kfree_skb_list(segs: skb->next); | 
|---|
| 4681 | skb->next = NULL; | 
|---|
| 4682 | return ERR_PTR(error: -ENOMEM); | 
|---|
| 4683 | } | 
|---|
| 4684 | EXPORT_SYMBOL_GPL(skb_segment_list); | 
|---|
| 4685 |  | 
|---|
| 4686 | /** | 
|---|
| 4687 | *	skb_segment - Perform protocol segmentation on skb. | 
|---|
| 4688 | *	@head_skb: buffer to segment | 
|---|
| 4689 | *	@features: features for the output path (see dev->features) | 
|---|
| 4690 | * | 
|---|
| 4691 | *	This function performs segmentation on the given skb.  It returns | 
|---|
| 4692 | *	a pointer to the first in a list of new skbs for the segments. | 
|---|
| 4693 | *	In case of error it returns ERR_PTR(err). | 
|---|
| 4694 | */ | 
|---|
| 4695 | struct sk_buff *skb_segment(struct sk_buff *head_skb, | 
|---|
| 4696 | netdev_features_t features) | 
|---|
| 4697 | { | 
|---|
| 4698 | struct sk_buff *segs = NULL; | 
|---|
| 4699 | struct sk_buff *tail = NULL; | 
|---|
| 4700 | struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; | 
|---|
| 4701 | unsigned int mss = skb_shinfo(head_skb)->gso_size; | 
|---|
| 4702 | unsigned int doffset = head_skb->data - skb_mac_header(skb: head_skb); | 
|---|
| 4703 | unsigned int offset = doffset; | 
|---|
| 4704 | unsigned int tnl_hlen = skb_tnl_header_len(inner_skb: head_skb); | 
|---|
| 4705 | unsigned int partial_segs = 0; | 
|---|
| 4706 | unsigned int headroom; | 
|---|
| 4707 | unsigned int len = head_skb->len; | 
|---|
| 4708 | struct sk_buff *frag_skb; | 
|---|
| 4709 | skb_frag_t *frag; | 
|---|
| 4710 | __be16 proto; | 
|---|
| 4711 | bool csum, sg; | 
|---|
| 4712 | int err = -ENOMEM; | 
|---|
| 4713 | int i = 0; | 
|---|
| 4714 | int nfrags, pos; | 
|---|
| 4715 |  | 
|---|
| 4716 | if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) && | 
|---|
| 4717 | mss != GSO_BY_FRAGS && mss != skb_headlen(skb: head_skb)) { | 
|---|
| 4718 | struct sk_buff *check_skb; | 
|---|
| 4719 |  | 
|---|
| 4720 | for (check_skb = list_skb; check_skb; check_skb = check_skb->next) { | 
|---|
| 4721 | if (skb_headlen(skb: check_skb) && !check_skb->head_frag) { | 
|---|
| 4722 | /* gso_size is untrusted, and we have a frag_list with | 
|---|
| 4723 | * a linear non head_frag item. | 
|---|
| 4724 | * | 
|---|
| 4725 | * If head_skb's headlen does not fit requested gso_size, | 
|---|
| 4726 | * it means that the frag_list members do NOT terminate | 
|---|
| 4727 | * on exact gso_size boundaries. Hence we cannot perform | 
|---|
| 4728 | * skb_frag_t page sharing. Therefore we must fallback to | 
|---|
| 4729 | * copying the frag_list skbs; we do so by disabling SG. | 
|---|
| 4730 | */ | 
|---|
| 4731 | features &= ~NETIF_F_SG; | 
|---|
| 4732 | break; | 
|---|
| 4733 | } | 
|---|
| 4734 | } | 
|---|
| 4735 | } | 
|---|
| 4736 |  | 
|---|
| 4737 | __skb_push(skb: head_skb, len: doffset); | 
|---|
| 4738 | proto = skb_network_protocol(skb: head_skb, NULL); | 
|---|
| 4739 | if (unlikely(!proto)) | 
|---|
| 4740 | return ERR_PTR(error: -EINVAL); | 
|---|
| 4741 |  | 
|---|
| 4742 | sg = !!(features & NETIF_F_SG); | 
|---|
| 4743 | csum = !!can_checksum_protocol(features, protocol: proto); | 
|---|
| 4744 |  | 
|---|
| 4745 | if (sg && csum && (mss != GSO_BY_FRAGS))  { | 
|---|
| 4746 | if (!(features & NETIF_F_GSO_PARTIAL)) { | 
|---|
| 4747 | struct sk_buff *iter; | 
|---|
| 4748 | unsigned int frag_len; | 
|---|
| 4749 |  | 
|---|
| 4750 | if (!list_skb || | 
|---|
| 4751 | !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) | 
|---|
| 4752 | goto normal; | 
|---|
| 4753 |  | 
|---|
| 4754 | /* If we get here then all the required | 
|---|
| 4755 | * GSO features except frag_list are supported. | 
|---|
| 4756 | * Try to split the SKB to multiple GSO SKBs | 
|---|
| 4757 | * with no frag_list. | 
|---|
| 4758 | * Currently we can do that only when the buffers don't | 
|---|
| 4759 | * have a linear part and all the buffers except | 
|---|
| 4760 | * the last are of the same length. | 
|---|
| 4761 | */ | 
|---|
| 4762 | frag_len = list_skb->len; | 
|---|
| 4763 | skb_walk_frags(head_skb, iter) { | 
|---|
| 4764 | if (frag_len != iter->len && iter->next) | 
|---|
| 4765 | goto normal; | 
|---|
| 4766 | if (skb_headlen(skb: iter) && !iter->head_frag) | 
|---|
| 4767 | goto normal; | 
|---|
| 4768 |  | 
|---|
| 4769 | len -= iter->len; | 
|---|
| 4770 | } | 
|---|
| 4771 |  | 
|---|
| 4772 | if (len != frag_len) | 
|---|
| 4773 | goto normal; | 
|---|
| 4774 | } | 
|---|
| 4775 |  | 
|---|
| 4776 | /* GSO partial only requires that we trim off any excess that | 
|---|
| 4777 | * doesn't fit into an MSS sized block, so take care of that | 
|---|
| 4778 | * now. | 
|---|
| 4779 | * Cap len to not accidentally hit GSO_BY_FRAGS. | 
|---|
| 4780 | */ | 
|---|
| 4781 | partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; | 
|---|
| 4782 | if (partial_segs > 1) | 
|---|
| 4783 | mss *= partial_segs; | 
|---|
| 4784 | else | 
|---|
| 4785 | partial_segs = 0; | 
|---|
| 4786 | } | 
|---|
| 4787 |  | 
|---|
| 4788 | normal: | 
|---|
| 4789 | headroom = skb_headroom(skb: head_skb); | 
|---|
| 4790 | pos = skb_headlen(skb: head_skb); | 
|---|
| 4791 |  | 
|---|
| 4792 | if (skb_orphan_frags(skb: head_skb, GFP_ATOMIC)) | 
|---|
| 4793 | return ERR_PTR(error: -ENOMEM); | 
|---|
| 4794 |  | 
|---|
| 4795 | nfrags = skb_shinfo(head_skb)->nr_frags; | 
|---|
| 4796 | frag = skb_shinfo(head_skb)->frags; | 
|---|
| 4797 | frag_skb = head_skb; | 
|---|
| 4798 |  | 
|---|
| 4799 | do { | 
|---|
| 4800 | struct sk_buff *nskb; | 
|---|
| 4801 | skb_frag_t *nskb_frag; | 
|---|
| 4802 | int hsize; | 
|---|
| 4803 | int size; | 
|---|
| 4804 |  | 
|---|
| 4805 | if (unlikely(mss == GSO_BY_FRAGS)) { | 
|---|
| 4806 | len = list_skb->len; | 
|---|
| 4807 | } else { | 
|---|
| 4808 | len = head_skb->len - offset; | 
|---|
| 4809 | if (len > mss) | 
|---|
| 4810 | len = mss; | 
|---|
| 4811 | } | 
|---|
| 4812 |  | 
|---|
| 4813 | hsize = skb_headlen(skb: head_skb) - offset; | 
|---|
| 4814 |  | 
|---|
| 4815 | if (hsize <= 0 && i >= nfrags && skb_headlen(skb: list_skb) && | 
|---|
| 4816 | (skb_headlen(skb: list_skb) == len || sg)) { | 
|---|
| 4817 | BUG_ON(skb_headlen(list_skb) > len); | 
|---|
| 4818 |  | 
|---|
| 4819 | nskb = skb_clone(list_skb, GFP_ATOMIC); | 
|---|
| 4820 | if (unlikely(!nskb)) | 
|---|
| 4821 | goto err; | 
|---|
| 4822 |  | 
|---|
| 4823 | i = 0; | 
|---|
| 4824 | nfrags = skb_shinfo(list_skb)->nr_frags; | 
|---|
| 4825 | frag = skb_shinfo(list_skb)->frags; | 
|---|
| 4826 | frag_skb = list_skb; | 
|---|
| 4827 | pos += skb_headlen(skb: list_skb); | 
|---|
| 4828 |  | 
|---|
| 4829 | while (pos < offset + len) { | 
|---|
| 4830 | BUG_ON(i >= nfrags); | 
|---|
| 4831 |  | 
|---|
| 4832 | size = skb_frag_size(frag); | 
|---|
| 4833 | if (pos + size > offset + len) | 
|---|
| 4834 | break; | 
|---|
| 4835 |  | 
|---|
| 4836 | i++; | 
|---|
| 4837 | pos += size; | 
|---|
| 4838 | frag++; | 
|---|
| 4839 | } | 
|---|
| 4840 |  | 
|---|
| 4841 | list_skb = list_skb->next; | 
|---|
| 4842 |  | 
|---|
| 4843 | if (unlikely(pskb_trim(nskb, len))) { | 
|---|
| 4844 | kfree_skb(skb: nskb); | 
|---|
| 4845 | goto err; | 
|---|
| 4846 | } | 
|---|
| 4847 |  | 
|---|
| 4848 | hsize = skb_end_offset(skb: nskb); | 
|---|
| 4849 | if (skb_cow_head(skb: nskb, headroom: doffset + headroom)) { | 
|---|
| 4850 | kfree_skb(skb: nskb); | 
|---|
| 4851 | goto err; | 
|---|
| 4852 | } | 
|---|
| 4853 |  | 
|---|
| 4854 | nskb->truesize += skb_end_offset(skb: nskb) - hsize; | 
|---|
| 4855 | skb_release_head_state(skb: nskb); | 
|---|
| 4856 | __skb_push(skb: nskb, len: doffset); | 
|---|
| 4857 | } else { | 
|---|
| 4858 | if (hsize < 0) | 
|---|
| 4859 | hsize = 0; | 
|---|
| 4860 | if (hsize > len || !sg) | 
|---|
| 4861 | hsize = len; | 
|---|
| 4862 |  | 
|---|
| 4863 | nskb = __alloc_skb(hsize + doffset + headroom, | 
|---|
| 4864 | GFP_ATOMIC, skb_alloc_rx_flag(skb: head_skb), | 
|---|
| 4865 | NUMA_NO_NODE); | 
|---|
| 4866 |  | 
|---|
| 4867 | if (unlikely(!nskb)) | 
|---|
| 4868 | goto err; | 
|---|
| 4869 |  | 
|---|
| 4870 | skb_reserve(skb: nskb, len: headroom); | 
|---|
| 4871 | __skb_put(skb: nskb, len: doffset); | 
|---|
| 4872 | } | 
|---|
| 4873 |  | 
|---|
| 4874 | if (segs) | 
|---|
| 4875 | tail->next = nskb; | 
|---|
| 4876 | else | 
|---|
| 4877 | segs = nskb; | 
|---|
| 4878 | tail = nskb; | 
|---|
| 4879 |  | 
|---|
| 4880 | __copy_skb_header(new: nskb, old: head_skb); | 
|---|
| 4881 |  | 
|---|
| 4882 | skb_headers_offset_update(nskb, skb_headroom(skb: nskb) - headroom); | 
|---|
| 4883 | skb_reset_mac_len(skb: nskb); | 
|---|
| 4884 |  | 
|---|
| 4885 | skb_copy_from_linear_data_offset(skb: head_skb, offset: -tnl_hlen, | 
|---|
| 4886 | to: nskb->data - tnl_hlen, | 
|---|
| 4887 | len: doffset + tnl_hlen); | 
|---|
| 4888 |  | 
|---|
| 4889 | if (nskb->len == len + doffset) | 
|---|
| 4890 | goto perform_csum_check; | 
|---|
| 4891 |  | 
|---|
| 4892 | if (!sg) { | 
|---|
| 4893 | if (!csum) { | 
|---|
| 4894 | if (!nskb->remcsum_offload) | 
|---|
| 4895 | nskb->ip_summed = CHECKSUM_NONE; | 
|---|
| 4896 | SKB_GSO_CB(nskb)->csum = | 
|---|
| 4897 | skb_copy_and_csum_bits(head_skb, offset, | 
|---|
| 4898 | skb_put(nskb, | 
|---|
| 4899 | len), | 
|---|
| 4900 | len); | 
|---|
| 4901 | SKB_GSO_CB(nskb)->csum_start = | 
|---|
| 4902 | skb_headroom(skb: nskb) + doffset; | 
|---|
| 4903 | } else { | 
|---|
| 4904 | if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) | 
|---|
| 4905 | goto err; | 
|---|
| 4906 | } | 
|---|
| 4907 | continue; | 
|---|
| 4908 | } | 
|---|
| 4909 |  | 
|---|
| 4910 | nskb_frag = skb_shinfo(nskb)->frags; | 
|---|
| 4911 |  | 
|---|
| 4912 | skb_copy_from_linear_data_offset(skb: head_skb, offset, | 
|---|
| 4913 | to: skb_put(nskb, hsize), len: hsize); | 
|---|
| 4914 |  | 
|---|
| 4915 | skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & | 
|---|
| 4916 | SKBFL_SHARED_FRAG; | 
|---|
| 4917 |  | 
|---|
| 4918 | if (skb_zerocopy_clone(nskb, orig: frag_skb, GFP_ATOMIC)) | 
|---|
| 4919 | goto err; | 
|---|
| 4920 |  | 
|---|
| 4921 | while (pos < offset + len) { | 
|---|
| 4922 | if (i >= nfrags) { | 
|---|
| 4923 | if (skb_orphan_frags(skb: list_skb, GFP_ATOMIC) || | 
|---|
| 4924 | skb_zerocopy_clone(nskb, orig: list_skb, | 
|---|
| 4925 | GFP_ATOMIC)) | 
|---|
| 4926 | goto err; | 
|---|
| 4927 |  | 
|---|
| 4928 | i = 0; | 
|---|
| 4929 | nfrags = skb_shinfo(list_skb)->nr_frags; | 
|---|
| 4930 | frag = skb_shinfo(list_skb)->frags; | 
|---|
| 4931 | frag_skb = list_skb; | 
|---|
| 4932 | if (!skb_headlen(skb: list_skb)) { | 
|---|
| 4933 | BUG_ON(!nfrags); | 
|---|
| 4934 | } else { | 
|---|
| 4935 | BUG_ON(!list_skb->head_frag); | 
|---|
| 4936 |  | 
|---|
| 4937 | /* to make room for head_frag. */ | 
|---|
| 4938 | i--; | 
|---|
| 4939 | frag--; | 
|---|
| 4940 | } | 
|---|
| 4941 |  | 
|---|
| 4942 | list_skb = list_skb->next; | 
|---|
| 4943 | } | 
|---|
| 4944 |  | 
|---|
| 4945 | if (unlikely(skb_shinfo(nskb)->nr_frags >= | 
|---|
| 4946 | MAX_SKB_FRAGS)) { | 
|---|
| 4947 | net_warn_ratelimited( | 
|---|
| 4948 | "skb_segment: too many frags: %u %u\n", | 
|---|
| 4949 | pos, mss); | 
|---|
| 4950 | err = -EINVAL; | 
|---|
| 4951 | goto err; | 
|---|
| 4952 | } | 
|---|
| 4953 |  | 
|---|
| 4954 | *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; | 
|---|
| 4955 | __skb_frag_ref(frag: nskb_frag); | 
|---|
| 4956 | size = skb_frag_size(frag: nskb_frag); | 
|---|
| 4957 |  | 
|---|
| 4958 | if (pos < offset) { | 
|---|
| 4959 | skb_frag_off_add(frag: nskb_frag, delta: offset - pos); | 
|---|
| 4960 | skb_frag_size_sub(frag: nskb_frag, delta: offset - pos); | 
|---|
| 4961 | } | 
|---|
| 4962 |  | 
|---|
| 4963 | skb_shinfo(nskb)->nr_frags++; | 
|---|
| 4964 |  | 
|---|
| 4965 | if (pos + size <= offset + len) { | 
|---|
| 4966 | i++; | 
|---|
| 4967 | frag++; | 
|---|
| 4968 | pos += size; | 
|---|
| 4969 | } else { | 
|---|
| 4970 | skb_frag_size_sub(frag: nskb_frag, delta: pos + size - (offset + len)); | 
|---|
| 4971 | goto skip_fraglist; | 
|---|
| 4972 | } | 
|---|
| 4973 |  | 
|---|
| 4974 | nskb_frag++; | 
|---|
| 4975 | } | 
|---|
| 4976 |  | 
|---|
| 4977 | skip_fraglist: | 
|---|
| 4978 | nskb->data_len = len - hsize; | 
|---|
| 4979 | nskb->len += nskb->data_len; | 
|---|
| 4980 | nskb->truesize += nskb->data_len; | 
|---|
| 4981 |  | 
|---|
| 4982 | perform_csum_check: | 
|---|
| 4983 | if (!csum) { | 
|---|
| 4984 | if (skb_has_shared_frag(skb: nskb) && | 
|---|
| 4985 | __skb_linearize(skb: nskb)) | 
|---|
| 4986 | goto err; | 
|---|
| 4987 |  | 
|---|
| 4988 | if (!nskb->remcsum_offload) | 
|---|
| 4989 | nskb->ip_summed = CHECKSUM_NONE; | 
|---|
| 4990 | SKB_GSO_CB(nskb)->csum = | 
|---|
| 4991 | skb_checksum(nskb, doffset, | 
|---|
| 4992 | nskb->len - doffset, 0); | 
|---|
| 4993 | SKB_GSO_CB(nskb)->csum_start = | 
|---|
| 4994 | skb_headroom(skb: nskb) + doffset; | 
|---|
| 4995 | } | 
|---|
| 4996 | } while ((offset += len) < head_skb->len); | 
|---|
| 4997 |  | 
|---|
| 4998 | /* Some callers want to get the end of the list. | 
|---|
| 4999 | * Put it in segs->prev to avoid walking the list. | 
|---|
| 5000 | * (see validate_xmit_skb_list() for example) | 
|---|
| 5001 | */ | 
|---|
| 5002 | segs->prev = tail; | 
|---|
| 5003 |  | 
|---|
| 5004 | if (partial_segs) { | 
|---|
| 5005 | struct sk_buff *iter; | 
|---|
| 5006 | int type = skb_shinfo(head_skb)->gso_type; | 
|---|
| 5007 | unsigned short gso_size = skb_shinfo(head_skb)->gso_size; | 
|---|
| 5008 |  | 
|---|
| 5009 | /* Update type to add partial and then remove dodgy if set */ | 
|---|
| 5010 | type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; | 
|---|
| 5011 | type &= ~SKB_GSO_DODGY; | 
|---|
| 5012 |  | 
|---|
| 5013 | /* Update GSO info and prepare to start updating headers on | 
|---|
| 5014 | * our way back down the stack of protocols. | 
|---|
| 5015 | */ | 
|---|
| 5016 | for (iter = segs; iter; iter = iter->next) { | 
|---|
| 5017 | skb_shinfo(iter)->gso_size = gso_size; | 
|---|
| 5018 | skb_shinfo(iter)->gso_segs = partial_segs; | 
|---|
| 5019 | skb_shinfo(iter)->gso_type = type; | 
|---|
| 5020 | SKB_GSO_CB(iter)->data_offset = skb_headroom(skb: iter) + doffset; | 
|---|
| 5021 | } | 
|---|
| 5022 |  | 
|---|
| 5023 | if (tail->len - doffset <= gso_size) | 
|---|
| 5024 | skb_shinfo(tail)->gso_size = 0; | 
|---|
| 5025 | else if (tail != segs) | 
|---|
| 5026 | skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); | 
|---|
| 5027 | } | 
|---|
| 5028 |  | 
|---|
| 5029 | /* Following permits correct backpressure, for protocols | 
|---|
| 5030 | * using skb_set_owner_w(). | 
|---|
| 5031 | * Idea is to tranfert ownership from head_skb to last segment. | 
|---|
| 5032 | */ | 
|---|
| 5033 | if (head_skb->destructor == sock_wfree) { | 
|---|
| 5034 | swap(tail->truesize, head_skb->truesize); | 
|---|
| 5035 | swap(tail->destructor, head_skb->destructor); | 
|---|
| 5036 | swap(tail->sk, head_skb->sk); | 
|---|
| 5037 | } | 
|---|
| 5038 | return segs; | 
|---|
| 5039 |  | 
|---|
| 5040 | err: | 
|---|
| 5041 | kfree_skb_list(segs); | 
|---|
| 5042 | return ERR_PTR(error: err); | 
|---|
| 5043 | } | 
|---|
| 5044 | EXPORT_SYMBOL_GPL(skb_segment); | 
|---|
| 5045 |  | 
|---|
| 5046 | #ifdef CONFIG_SKB_EXTENSIONS | 
|---|
| 5047 | #define SKB_EXT_ALIGN_VALUE	8 | 
|---|
| 5048 | #define SKB_EXT_CHUNKSIZEOF(x)	(ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) | 
|---|
| 5049 |  | 
|---|
| 5050 | static const u8 skb_ext_type_len[] = { | 
|---|
| 5051 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) | 
|---|
| 5052 | [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), | 
|---|
| 5053 | #endif | 
|---|
| 5054 | #ifdef CONFIG_XFRM | 
|---|
| 5055 | [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), | 
|---|
| 5056 | #endif | 
|---|
| 5057 | #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) | 
|---|
| 5058 | [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), | 
|---|
| 5059 | #endif | 
|---|
| 5060 | #if IS_ENABLED(CONFIG_MPTCP) | 
|---|
| 5061 | [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), | 
|---|
| 5062 | #endif | 
|---|
| 5063 | #if IS_ENABLED(CONFIG_MCTP_FLOWS) | 
|---|
| 5064 | [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow), | 
|---|
| 5065 | #endif | 
|---|
| 5066 | #if IS_ENABLED(CONFIG_INET_PSP) | 
|---|
| 5067 | [SKB_EXT_PSP] = SKB_EXT_CHUNKSIZEOF(struct psp_skb_ext), | 
|---|
| 5068 | #endif | 
|---|
| 5069 | }; | 
|---|
| 5070 |  | 
|---|
| 5071 | static __always_inline unsigned int skb_ext_total_length(void) | 
|---|
| 5072 | { | 
|---|
| 5073 | unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext); | 
|---|
| 5074 | int i; | 
|---|
| 5075 |  | 
|---|
| 5076 | for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++) | 
|---|
| 5077 | l += skb_ext_type_len[i]; | 
|---|
| 5078 |  | 
|---|
| 5079 | return l; | 
|---|
| 5080 | } | 
|---|
| 5081 |  | 
|---|
| 5082 | static void skb_extensions_init(void) | 
|---|
| 5083 | { | 
|---|
| 5084 | BUILD_BUG_ON(SKB_EXT_NUM >= 8); | 
|---|
| 5085 | #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL) | 
|---|
| 5086 | BUILD_BUG_ON(skb_ext_total_length() > 255); | 
|---|
| 5087 | #endif | 
|---|
| 5088 |  | 
|---|
| 5089 | skbuff_ext_cache = kmem_cache_create( "skbuff_ext_cache", | 
|---|
| 5090 | SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), | 
|---|
| 5091 | 0, | 
|---|
| 5092 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, | 
|---|
| 5093 | NULL); | 
|---|
| 5094 | } | 
|---|
| 5095 | #else | 
|---|
| 5096 | static void skb_extensions_init(void) {} | 
|---|
| 5097 | #endif | 
|---|
| 5098 |  | 
|---|
| 5099 | /* The SKB kmem_cache slab is critical for network performance.  Never | 
|---|
| 5100 | * merge/alias the slab with similar sized objects.  This avoids fragmentation | 
|---|
| 5101 | * that hurts performance of kmem_cache_{alloc,free}_bulk APIs. | 
|---|
| 5102 | */ | 
|---|
| 5103 | #ifndef CONFIG_SLUB_TINY | 
|---|
| 5104 | #define FLAG_SKB_NO_MERGE	SLAB_NO_MERGE | 
|---|
| 5105 | #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */ | 
|---|
| 5106 | #define FLAG_SKB_NO_MERGE	0 | 
|---|
| 5107 | #endif | 
|---|
| 5108 |  | 
|---|
| 5109 | void __init skb_init(void) | 
|---|
| 5110 | { | 
|---|
| 5111 | net_hotdata.skbuff_cache = kmem_cache_create_usercopy(name: "skbuff_head_cache", | 
|---|
| 5112 | size: sizeof(struct sk_buff), | 
|---|
| 5113 | align: 0, | 
|---|
| 5114 | SLAB_HWCACHE_ALIGN|SLAB_PANIC| | 
|---|
| 5115 | FLAG_SKB_NO_MERGE, | 
|---|
| 5116 | offsetof(struct sk_buff, cb), | 
|---|
| 5117 | sizeof_field(struct sk_buff, cb), | 
|---|
| 5118 | NULL); | 
|---|
| 5119 | net_hotdata.skbuff_fclone_cache = kmem_cache_create( "skbuff_fclone_cache", | 
|---|
| 5120 | sizeof(struct sk_buff_fclones), | 
|---|
| 5121 | 0, | 
|---|
| 5122 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, | 
|---|
| 5123 | NULL); | 
|---|
| 5124 | /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. | 
|---|
| 5125 | * struct skb_shared_info is located at the end of skb->head, | 
|---|
| 5126 | * and should not be copied to/from user. | 
|---|
| 5127 | */ | 
|---|
| 5128 | net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy(name: "skbuff_small_head", | 
|---|
| 5129 | SKB_SMALL_HEAD_CACHE_SIZE, | 
|---|
| 5130 | align: 0, | 
|---|
| 5131 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, | 
|---|
| 5132 | useroffset: 0, | 
|---|
| 5133 | SKB_SMALL_HEAD_HEADROOM, | 
|---|
| 5134 | NULL); | 
|---|
| 5135 | skb_extensions_init(); | 
|---|
| 5136 | } | 
|---|
| 5137 |  | 
|---|
| 5138 | static int | 
|---|
| 5139 | __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, | 
|---|
| 5140 | unsigned int recursion_level) | 
|---|
| 5141 | { | 
|---|
| 5142 | int start = skb_headlen(skb); | 
|---|
| 5143 | int i, copy = start - offset; | 
|---|
| 5144 | struct sk_buff *frag_iter; | 
|---|
| 5145 | int elt = 0; | 
|---|
| 5146 |  | 
|---|
| 5147 | if (unlikely(recursion_level >= 24)) | 
|---|
| 5148 | return -EMSGSIZE; | 
|---|
| 5149 |  | 
|---|
| 5150 | if (copy > 0) { | 
|---|
| 5151 | if (copy > len) | 
|---|
| 5152 | copy = len; | 
|---|
| 5153 | sg_set_buf(sg, buf: skb->data + offset, buflen: copy); | 
|---|
| 5154 | elt++; | 
|---|
| 5155 | if ((len -= copy) == 0) | 
|---|
| 5156 | return elt; | 
|---|
| 5157 | offset += copy; | 
|---|
| 5158 | } | 
|---|
| 5159 |  | 
|---|
| 5160 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|---|
| 5161 | int end; | 
|---|
| 5162 |  | 
|---|
| 5163 | WARN_ON(start > offset + len); | 
|---|
| 5164 |  | 
|---|
| 5165 | end = start + skb_frag_size(frag: &skb_shinfo(skb)->frags[i]); | 
|---|
| 5166 | if ((copy = end - offset) > 0) { | 
|---|
| 5167 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 
|---|
| 5168 | if (unlikely(elt && sg_is_last(&sg[elt - 1]))) | 
|---|
| 5169 | return -EMSGSIZE; | 
|---|
| 5170 |  | 
|---|
| 5171 | if (copy > len) | 
|---|
| 5172 | copy = len; | 
|---|
| 5173 | sg_set_page(sg: &sg[elt], page: skb_frag_page(frag), len: copy, | 
|---|
| 5174 | offset: skb_frag_off(frag) + offset - start); | 
|---|
| 5175 | elt++; | 
|---|
| 5176 | if (!(len -= copy)) | 
|---|
| 5177 | return elt; | 
|---|
| 5178 | offset += copy; | 
|---|
| 5179 | } | 
|---|
| 5180 | start = end; | 
|---|
| 5181 | } | 
|---|
| 5182 |  | 
|---|
| 5183 | skb_walk_frags(skb, frag_iter) { | 
|---|
| 5184 | int end, ret; | 
|---|
| 5185 |  | 
|---|
| 5186 | WARN_ON(start > offset + len); | 
|---|
| 5187 |  | 
|---|
| 5188 | end = start + frag_iter->len; | 
|---|
| 5189 | if ((copy = end - offset) > 0) { | 
|---|
| 5190 | if (unlikely(elt && sg_is_last(&sg[elt - 1]))) | 
|---|
| 5191 | return -EMSGSIZE; | 
|---|
| 5192 |  | 
|---|
| 5193 | if (copy > len) | 
|---|
| 5194 | copy = len; | 
|---|
| 5195 | ret = __skb_to_sgvec(skb: frag_iter, sg: sg+elt, offset: offset - start, | 
|---|
| 5196 | len: copy, recursion_level: recursion_level + 1); | 
|---|
| 5197 | if (unlikely(ret < 0)) | 
|---|
| 5198 | return ret; | 
|---|
| 5199 | elt += ret; | 
|---|
| 5200 | if ((len -= copy) == 0) | 
|---|
| 5201 | return elt; | 
|---|
| 5202 | offset += copy; | 
|---|
| 5203 | } | 
|---|
| 5204 | start = end; | 
|---|
| 5205 | } | 
|---|
| 5206 | BUG_ON(len); | 
|---|
| 5207 | return elt; | 
|---|
| 5208 | } | 
|---|
| 5209 |  | 
|---|
| 5210 | /** | 
|---|
| 5211 | *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer | 
|---|
| 5212 | *	@skb: Socket buffer containing the buffers to be mapped | 
|---|
| 5213 | *	@sg: The scatter-gather list to map into | 
|---|
| 5214 | *	@offset: The offset into the buffer's contents to start mapping | 
|---|
| 5215 | *	@len: Length of buffer space to be mapped | 
|---|
| 5216 | * | 
|---|
| 5217 | *	Fill the specified scatter-gather list with mappings/pointers into a | 
|---|
| 5218 | *	region of the buffer space attached to a socket buffer. Returns either | 
|---|
| 5219 | *	the number of scatterlist items used, or -EMSGSIZE if the contents | 
|---|
| 5220 | *	could not fit. | 
|---|
| 5221 | */ | 
|---|
| 5222 | int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | 
|---|
| 5223 | { | 
|---|
| 5224 | int nsg = __skb_to_sgvec(skb, sg, offset, len, recursion_level: 0); | 
|---|
| 5225 |  | 
|---|
| 5226 | if (nsg <= 0) | 
|---|
| 5227 | return nsg; | 
|---|
| 5228 |  | 
|---|
| 5229 | sg_mark_end(sg: &sg[nsg - 1]); | 
|---|
| 5230 |  | 
|---|
| 5231 | return nsg; | 
|---|
| 5232 | } | 
|---|
| 5233 | EXPORT_SYMBOL_GPL(skb_to_sgvec); | 
|---|
| 5234 |  | 
|---|
| 5235 | /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given | 
|---|
| 5236 | * sglist without mark the sg which contain last skb data as the end. | 
|---|
| 5237 | * So the caller can mannipulate sg list as will when padding new data after | 
|---|
| 5238 | * the first call without calling sg_unmark_end to expend sg list. | 
|---|
| 5239 | * | 
|---|
| 5240 | * Scenario to use skb_to_sgvec_nomark: | 
|---|
| 5241 | * 1. sg_init_table | 
|---|
| 5242 | * 2. skb_to_sgvec_nomark(payload1) | 
|---|
| 5243 | * 3. skb_to_sgvec_nomark(payload2) | 
|---|
| 5244 | * | 
|---|
| 5245 | * This is equivalent to: | 
|---|
| 5246 | * 1. sg_init_table | 
|---|
| 5247 | * 2. skb_to_sgvec(payload1) | 
|---|
| 5248 | * 3. sg_unmark_end | 
|---|
| 5249 | * 4. skb_to_sgvec(payload2) | 
|---|
| 5250 | * | 
|---|
| 5251 | * When mapping multiple payload conditionally, skb_to_sgvec_nomark | 
|---|
| 5252 | * is more preferable. | 
|---|
| 5253 | */ | 
|---|
| 5254 | int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, | 
|---|
| 5255 | int offset, int len) | 
|---|
| 5256 | { | 
|---|
| 5257 | return __skb_to_sgvec(skb, sg, offset, len, recursion_level: 0); | 
|---|
| 5258 | } | 
|---|
| 5259 | EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); | 
|---|
| 5260 |  | 
|---|
| 5261 |  | 
|---|
| 5262 |  | 
|---|
| 5263 | /** | 
|---|
| 5264 | *	skb_cow_data - Check that a socket buffer's data buffers are writable | 
|---|
| 5265 | *	@skb: The socket buffer to check. | 
|---|
| 5266 | *	@tailbits: Amount of trailing space to be added | 
|---|
| 5267 | *	@trailer: Returned pointer to the skb where the @tailbits space begins | 
|---|
| 5268 | * | 
|---|
| 5269 | *	Make sure that the data buffers attached to a socket buffer are | 
|---|
| 5270 | *	writable. If they are not, private copies are made of the data buffers | 
|---|
| 5271 | *	and the socket buffer is set to use these instead. | 
|---|
| 5272 | * | 
|---|
| 5273 | *	If @tailbits is given, make sure that there is space to write @tailbits | 
|---|
| 5274 | *	bytes of data beyond current end of socket buffer.  @trailer will be | 
|---|
| 5275 | *	set to point to the skb in which this space begins. | 
|---|
| 5276 | * | 
|---|
| 5277 | *	The number of scatterlist elements required to completely map the | 
|---|
| 5278 | *	COW'd and extended socket buffer will be returned. | 
|---|
| 5279 | */ | 
|---|
| 5280 | int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | 
|---|
| 5281 | { | 
|---|
| 5282 | int copyflag; | 
|---|
| 5283 | int elt; | 
|---|
| 5284 | struct sk_buff *skb1, **skb_p; | 
|---|
| 5285 |  | 
|---|
| 5286 | /* If skb is cloned or its head is paged, reallocate | 
|---|
| 5287 | * head pulling out all the pages (pages are considered not writable | 
|---|
| 5288 | * at the moment even if they are anonymous). | 
|---|
| 5289 | */ | 
|---|
| 5290 | if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && | 
|---|
| 5291 | !__pskb_pull_tail(skb, __skb_pagelen(skb))) | 
|---|
| 5292 | return -ENOMEM; | 
|---|
| 5293 |  | 
|---|
| 5294 | /* Easy case. Most of packets will go this way. */ | 
|---|
| 5295 | if (!skb_has_frag_list(skb)) { | 
|---|
| 5296 | /* A little of trouble, not enough of space for trailer. | 
|---|
| 5297 | * This should not happen, when stack is tuned to generate | 
|---|
| 5298 | * good frames. OK, on miss we reallocate and reserve even more | 
|---|
| 5299 | * space, 128 bytes is fair. */ | 
|---|
| 5300 |  | 
|---|
| 5301 | if (skb_tailroom(skb) < tailbits && | 
|---|
| 5302 | pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) | 
|---|
| 5303 | return -ENOMEM; | 
|---|
| 5304 |  | 
|---|
| 5305 | /* Voila! */ | 
|---|
| 5306 | *trailer = skb; | 
|---|
| 5307 | return 1; | 
|---|
| 5308 | } | 
|---|
| 5309 |  | 
|---|
| 5310 | /* Misery. We are in troubles, going to mincer fragments... */ | 
|---|
| 5311 |  | 
|---|
| 5312 | elt = 1; | 
|---|
| 5313 | skb_p = &skb_shinfo(skb)->frag_list; | 
|---|
| 5314 | copyflag = 0; | 
|---|
| 5315 |  | 
|---|
| 5316 | while ((skb1 = *skb_p) != NULL) { | 
|---|
| 5317 | int ntail = 0; | 
|---|
| 5318 |  | 
|---|
| 5319 | /* The fragment is partially pulled by someone, | 
|---|
| 5320 | * this can happen on input. Copy it and everything | 
|---|
| 5321 | * after it. */ | 
|---|
| 5322 |  | 
|---|
| 5323 | if (skb_shared(skb: skb1)) | 
|---|
| 5324 | copyflag = 1; | 
|---|
| 5325 |  | 
|---|
| 5326 | /* If the skb is the last, worry about trailer. */ | 
|---|
| 5327 |  | 
|---|
| 5328 | if (skb1->next == NULL && tailbits) { | 
|---|
| 5329 | if (skb_shinfo(skb1)->nr_frags || | 
|---|
| 5330 | skb_has_frag_list(skb: skb1) || | 
|---|
| 5331 | skb_tailroom(skb: skb1) < tailbits) | 
|---|
| 5332 | ntail = tailbits + 128; | 
|---|
| 5333 | } | 
|---|
| 5334 |  | 
|---|
| 5335 | if (copyflag || | 
|---|
| 5336 | skb_cloned(skb: skb1) || | 
|---|
| 5337 | ntail || | 
|---|
| 5338 | skb_shinfo(skb1)->nr_frags || | 
|---|
| 5339 | skb_has_frag_list(skb: skb1)) { | 
|---|
| 5340 | struct sk_buff *skb2; | 
|---|
| 5341 |  | 
|---|
| 5342 | /* Fuck, we are miserable poor guys... */ | 
|---|
| 5343 | if (ntail == 0) | 
|---|
| 5344 | skb2 = skb_copy(skb1, GFP_ATOMIC); | 
|---|
| 5345 | else | 
|---|
| 5346 | skb2 = skb_copy_expand(skb1, | 
|---|
| 5347 | skb_headroom(skb: skb1), | 
|---|
| 5348 | ntail, | 
|---|
| 5349 | GFP_ATOMIC); | 
|---|
| 5350 | if (unlikely(skb2 == NULL)) | 
|---|
| 5351 | return -ENOMEM; | 
|---|
| 5352 |  | 
|---|
| 5353 | if (skb1->sk) | 
|---|
| 5354 | skb_set_owner_w(skb: skb2, sk: skb1->sk); | 
|---|
| 5355 |  | 
|---|
| 5356 | /* Looking around. Are we still alive? | 
|---|
| 5357 | * OK, link new skb, drop old one */ | 
|---|
| 5358 |  | 
|---|
| 5359 | skb2->next = skb1->next; | 
|---|
| 5360 | *skb_p = skb2; | 
|---|
| 5361 | kfree_skb(skb: skb1); | 
|---|
| 5362 | skb1 = skb2; | 
|---|
| 5363 | } | 
|---|
| 5364 | elt++; | 
|---|
| 5365 | *trailer = skb1; | 
|---|
| 5366 | skb_p = &skb1->next; | 
|---|
| 5367 | } | 
|---|
| 5368 |  | 
|---|
| 5369 | return elt; | 
|---|
| 5370 | } | 
|---|
| 5371 | EXPORT_SYMBOL_GPL(skb_cow_data); | 
|---|
| 5372 |  | 
|---|
| 5373 | static void sock_rmem_free(struct sk_buff *skb) | 
|---|
| 5374 | { | 
|---|
| 5375 | struct sock *sk = skb->sk; | 
|---|
| 5376 |  | 
|---|
| 5377 | atomic_sub(i: skb->truesize, v: &sk->sk_rmem_alloc); | 
|---|
| 5378 | } | 
|---|
| 5379 |  | 
|---|
| 5380 | static void skb_set_err_queue(struct sk_buff *skb) | 
|---|
| 5381 | { | 
|---|
| 5382 | /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. | 
|---|
| 5383 | * So, it is safe to (mis)use it to mark skbs on the error queue. | 
|---|
| 5384 | */ | 
|---|
| 5385 | skb->pkt_type = PACKET_OUTGOING; | 
|---|
| 5386 | BUILD_BUG_ON(PACKET_OUTGOING == 0); | 
|---|
| 5387 | } | 
|---|
| 5388 |  | 
|---|
| 5389 | /* | 
|---|
| 5390 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) | 
|---|
| 5391 | */ | 
|---|
| 5392 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | 
|---|
| 5393 | { | 
|---|
| 5394 | if (atomic_read(v: &sk->sk_rmem_alloc) + skb->truesize >= | 
|---|
| 5395 | (unsigned int)READ_ONCE(sk->sk_rcvbuf)) | 
|---|
| 5396 | return -ENOMEM; | 
|---|
| 5397 |  | 
|---|
| 5398 | skb_orphan(skb); | 
|---|
| 5399 | skb->sk = sk; | 
|---|
| 5400 | skb->destructor = sock_rmem_free; | 
|---|
| 5401 | atomic_add(i: skb->truesize, v: &sk->sk_rmem_alloc); | 
|---|
| 5402 | skb_set_err_queue(skb); | 
|---|
| 5403 |  | 
|---|
| 5404 | /* before exiting rcu section, make sure dst is refcounted */ | 
|---|
| 5405 | skb_dst_force(skb); | 
|---|
| 5406 |  | 
|---|
| 5407 | skb_queue_tail(&sk->sk_error_queue, skb); | 
|---|
| 5408 | if (!sock_flag(sk, flag: SOCK_DEAD)) | 
|---|
| 5409 | sk_error_report(sk); | 
|---|
| 5410 | return 0; | 
|---|
| 5411 | } | 
|---|
| 5412 | EXPORT_SYMBOL(sock_queue_err_skb); | 
|---|
| 5413 |  | 
|---|
| 5414 | static bool is_icmp_err_skb(const struct sk_buff *skb) | 
|---|
| 5415 | { | 
|---|
| 5416 | return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || | 
|---|
| 5417 | SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); | 
|---|
| 5418 | } | 
|---|
| 5419 |  | 
|---|
| 5420 | struct sk_buff *sock_dequeue_err_skb(struct sock *sk) | 
|---|
| 5421 | { | 
|---|
| 5422 | struct sk_buff_head *q = &sk->sk_error_queue; | 
|---|
| 5423 | struct sk_buff *skb, *skb_next = NULL; | 
|---|
| 5424 | bool icmp_next = false; | 
|---|
| 5425 | unsigned long flags; | 
|---|
| 5426 |  | 
|---|
| 5427 | if (skb_queue_empty_lockless(list: q)) | 
|---|
| 5428 | return NULL; | 
|---|
| 5429 |  | 
|---|
| 5430 | spin_lock_irqsave(&q->lock, flags); | 
|---|
| 5431 | skb = __skb_dequeue(list: q); | 
|---|
| 5432 | if (skb && (skb_next = skb_peek(list_: q))) { | 
|---|
| 5433 | icmp_next = is_icmp_err_skb(skb: skb_next); | 
|---|
| 5434 | if (icmp_next) | 
|---|
| 5435 | sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; | 
|---|
| 5436 | } | 
|---|
| 5437 | spin_unlock_irqrestore(lock: &q->lock, flags); | 
|---|
| 5438 |  | 
|---|
| 5439 | if (is_icmp_err_skb(skb) && !icmp_next) | 
|---|
| 5440 | sk->sk_err = 0; | 
|---|
| 5441 |  | 
|---|
| 5442 | if (skb_next) | 
|---|
| 5443 | sk_error_report(sk); | 
|---|
| 5444 |  | 
|---|
| 5445 | return skb; | 
|---|
| 5446 | } | 
|---|
| 5447 | EXPORT_SYMBOL(sock_dequeue_err_skb); | 
|---|
| 5448 |  | 
|---|
| 5449 | /** | 
|---|
| 5450 | * skb_clone_sk - create clone of skb, and take reference to socket | 
|---|
| 5451 | * @skb: the skb to clone | 
|---|
| 5452 | * | 
|---|
| 5453 | * This function creates a clone of a buffer that holds a reference on | 
|---|
| 5454 | * sk_refcnt.  Buffers created via this function are meant to be | 
|---|
| 5455 | * returned using sock_queue_err_skb, or free via kfree_skb. | 
|---|
| 5456 | * | 
|---|
| 5457 | * When passing buffers allocated with this function to sock_queue_err_skb | 
|---|
| 5458 | * it is necessary to wrap the call with sock_hold/sock_put in order to | 
|---|
| 5459 | * prevent the socket from being released prior to being enqueued on | 
|---|
| 5460 | * the sk_error_queue. | 
|---|
| 5461 | */ | 
|---|
| 5462 | struct sk_buff *skb_clone_sk(struct sk_buff *skb) | 
|---|
| 5463 | { | 
|---|
| 5464 | struct sock *sk = skb->sk; | 
|---|
| 5465 | struct sk_buff *clone; | 
|---|
| 5466 |  | 
|---|
| 5467 | if (!sk || !refcount_inc_not_zero(r: &sk->sk_refcnt)) | 
|---|
| 5468 | return NULL; | 
|---|
| 5469 |  | 
|---|
| 5470 | clone = skb_clone(skb, GFP_ATOMIC); | 
|---|
| 5471 | if (!clone) { | 
|---|
| 5472 | sock_put(sk); | 
|---|
| 5473 | return NULL; | 
|---|
| 5474 | } | 
|---|
| 5475 |  | 
|---|
| 5476 | clone->sk = sk; | 
|---|
| 5477 | clone->destructor = sock_efree; | 
|---|
| 5478 |  | 
|---|
| 5479 | return clone; | 
|---|
| 5480 | } | 
|---|
| 5481 | EXPORT_SYMBOL(skb_clone_sk); | 
|---|
| 5482 |  | 
|---|
| 5483 | static void __skb_complete_tx_timestamp(struct sk_buff *skb, | 
|---|
| 5484 | struct sock *sk, | 
|---|
| 5485 | int tstype, | 
|---|
| 5486 | bool opt_stats) | 
|---|
| 5487 | { | 
|---|
| 5488 | struct sock_exterr_skb *serr; | 
|---|
| 5489 | int err; | 
|---|
| 5490 |  | 
|---|
| 5491 | BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); | 
|---|
| 5492 |  | 
|---|
| 5493 | serr = SKB_EXT_ERR(skb); | 
|---|
| 5494 | memset(s: serr, c: 0, n: sizeof(*serr)); | 
|---|
| 5495 | serr->ee.ee_errno = ENOMSG; | 
|---|
| 5496 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; | 
|---|
| 5497 | serr->ee.ee_info = tstype; | 
|---|
| 5498 | serr->opt_stats = opt_stats; | 
|---|
| 5499 | serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; | 
|---|
| 5500 | if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) { | 
|---|
| 5501 | serr->ee.ee_data = skb_shinfo(skb)->tskey; | 
|---|
| 5502 | if (sk_is_tcp(sk)) | 
|---|
| 5503 | serr->ee.ee_data -= atomic_read(v: &sk->sk_tskey); | 
|---|
| 5504 | } | 
|---|
| 5505 |  | 
|---|
| 5506 | err = sock_queue_err_skb(sk, skb); | 
|---|
| 5507 |  | 
|---|
| 5508 | if (err) | 
|---|
| 5509 | kfree_skb(skb); | 
|---|
| 5510 | } | 
|---|
| 5511 |  | 
|---|
| 5512 | static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) | 
|---|
| 5513 | { | 
|---|
| 5514 | bool ret; | 
|---|
| 5515 |  | 
|---|
| 5516 | if (likely(tsonly || READ_ONCE(sock_net(sk)->core.sysctl_tstamp_allow_data))) | 
|---|
| 5517 | return true; | 
|---|
| 5518 |  | 
|---|
| 5519 | read_lock_bh(&sk->sk_callback_lock); | 
|---|
| 5520 | ret = sk->sk_socket && sk->sk_socket->file && | 
|---|
| 5521 | file_ns_capable(file: sk->sk_socket->file, ns: &init_user_ns, CAP_NET_RAW); | 
|---|
| 5522 | read_unlock_bh(&sk->sk_callback_lock); | 
|---|
| 5523 | return ret; | 
|---|
| 5524 | } | 
|---|
| 5525 |  | 
|---|
| 5526 | void skb_complete_tx_timestamp(struct sk_buff *skb, | 
|---|
| 5527 | struct skb_shared_hwtstamps *hwtstamps) | 
|---|
| 5528 | { | 
|---|
| 5529 | struct sock *sk = skb->sk; | 
|---|
| 5530 |  | 
|---|
| 5531 | if (!skb_may_tx_timestamp(sk, tsonly: false)) | 
|---|
| 5532 | goto err; | 
|---|
| 5533 |  | 
|---|
| 5534 | /* Take a reference to prevent skb_orphan() from freeing the socket, | 
|---|
| 5535 | * but only if the socket refcount is not zero. | 
|---|
| 5536 | */ | 
|---|
| 5537 | if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { | 
|---|
| 5538 | *skb_hwtstamps(skb) = *hwtstamps; | 
|---|
| 5539 | __skb_complete_tx_timestamp(skb, sk, tstype: SCM_TSTAMP_SND, opt_stats: false); | 
|---|
| 5540 | sock_put(sk); | 
|---|
| 5541 | return; | 
|---|
| 5542 | } | 
|---|
| 5543 |  | 
|---|
| 5544 | err: | 
|---|
| 5545 | kfree_skb(skb); | 
|---|
| 5546 | } | 
|---|
| 5547 | EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); | 
|---|
| 5548 |  | 
|---|
| 5549 | static bool skb_tstamp_tx_report_so_timestamping(struct sk_buff *skb, | 
|---|
| 5550 | struct skb_shared_hwtstamps *hwtstamps, | 
|---|
| 5551 | int tstype) | 
|---|
| 5552 | { | 
|---|
| 5553 | switch (tstype) { | 
|---|
| 5554 | case SCM_TSTAMP_SCHED: | 
|---|
| 5555 | return skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP; | 
|---|
| 5556 | case SCM_TSTAMP_SND: | 
|---|
| 5557 | return skb_shinfo(skb)->tx_flags & (hwtstamps ? SKBTX_HW_TSTAMP_NOBPF : | 
|---|
| 5558 | SKBTX_SW_TSTAMP); | 
|---|
| 5559 | case SCM_TSTAMP_ACK: | 
|---|
| 5560 | return TCP_SKB_CB(skb)->txstamp_ack & TSTAMP_ACK_SK; | 
|---|
| 5561 | case SCM_TSTAMP_COMPLETION: | 
|---|
| 5562 | return skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP; | 
|---|
| 5563 | } | 
|---|
| 5564 |  | 
|---|
| 5565 | return false; | 
|---|
| 5566 | } | 
|---|
| 5567 |  | 
|---|
| 5568 | static void skb_tstamp_tx_report_bpf_timestamping(struct sk_buff *skb, | 
|---|
| 5569 | struct skb_shared_hwtstamps *hwtstamps, | 
|---|
| 5570 | struct sock *sk, | 
|---|
| 5571 | int tstype) | 
|---|
| 5572 | { | 
|---|
| 5573 | int op; | 
|---|
| 5574 |  | 
|---|
| 5575 | switch (tstype) { | 
|---|
| 5576 | case SCM_TSTAMP_SCHED: | 
|---|
| 5577 | op = BPF_SOCK_OPS_TSTAMP_SCHED_CB; | 
|---|
| 5578 | break; | 
|---|
| 5579 | case SCM_TSTAMP_SND: | 
|---|
| 5580 | if (hwtstamps) { | 
|---|
| 5581 | op = BPF_SOCK_OPS_TSTAMP_SND_HW_CB; | 
|---|
| 5582 | *skb_hwtstamps(skb) = *hwtstamps; | 
|---|
| 5583 | } else { | 
|---|
| 5584 | op = BPF_SOCK_OPS_TSTAMP_SND_SW_CB; | 
|---|
| 5585 | } | 
|---|
| 5586 | break; | 
|---|
| 5587 | case SCM_TSTAMP_ACK: | 
|---|
| 5588 | op = BPF_SOCK_OPS_TSTAMP_ACK_CB; | 
|---|
| 5589 | break; | 
|---|
| 5590 | default: | 
|---|
| 5591 | return; | 
|---|
| 5592 | } | 
|---|
| 5593 |  | 
|---|
| 5594 | bpf_skops_tx_timestamping(sk, skb, op); | 
|---|
| 5595 | } | 
|---|
| 5596 |  | 
|---|
| 5597 | void __skb_tstamp_tx(struct sk_buff *orig_skb, | 
|---|
| 5598 | const struct sk_buff *ack_skb, | 
|---|
| 5599 | struct skb_shared_hwtstamps *hwtstamps, | 
|---|
| 5600 | struct sock *sk, int tstype) | 
|---|
| 5601 | { | 
|---|
| 5602 | struct sk_buff *skb; | 
|---|
| 5603 | bool tsonly, opt_stats = false; | 
|---|
| 5604 | u32 tsflags; | 
|---|
| 5605 |  | 
|---|
| 5606 | if (!sk) | 
|---|
| 5607 | return; | 
|---|
| 5608 |  | 
|---|
| 5609 | if (skb_shinfo(orig_skb)->tx_flags & SKBTX_BPF) | 
|---|
| 5610 | skb_tstamp_tx_report_bpf_timestamping(skb: orig_skb, hwtstamps, | 
|---|
| 5611 | sk, tstype); | 
|---|
| 5612 |  | 
|---|
| 5613 | if (!skb_tstamp_tx_report_so_timestamping(skb: orig_skb, hwtstamps, tstype)) | 
|---|
| 5614 | return; | 
|---|
| 5615 |  | 
|---|
| 5616 | tsflags = READ_ONCE(sk->sk_tsflags); | 
|---|
| 5617 | if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && | 
|---|
| 5618 | skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) | 
|---|
| 5619 | return; | 
|---|
| 5620 |  | 
|---|
| 5621 | tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY; | 
|---|
| 5622 | if (!skb_may_tx_timestamp(sk, tsonly)) | 
|---|
| 5623 | return; | 
|---|
| 5624 |  | 
|---|
| 5625 | if (tsonly) { | 
|---|
| 5626 | #ifdef CONFIG_INET | 
|---|
| 5627 | if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) && | 
|---|
| 5628 | sk_is_tcp(sk)) { | 
|---|
| 5629 | skb = tcp_get_timestamping_opt_stats(sk, orig_skb, | 
|---|
| 5630 | ack_skb); | 
|---|
| 5631 | opt_stats = true; | 
|---|
| 5632 | } else | 
|---|
| 5633 | #endif | 
|---|
| 5634 | skb = alloc_skb(size: 0, GFP_ATOMIC); | 
|---|
| 5635 | } else { | 
|---|
| 5636 | skb = skb_clone(orig_skb, GFP_ATOMIC); | 
|---|
| 5637 |  | 
|---|
| 5638 | if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { | 
|---|
| 5639 | kfree_skb(skb); | 
|---|
| 5640 | return; | 
|---|
| 5641 | } | 
|---|
| 5642 | } | 
|---|
| 5643 | if (!skb) | 
|---|
| 5644 | return; | 
|---|
| 5645 |  | 
|---|
| 5646 | if (tsonly) { | 
|---|
| 5647 | skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & | 
|---|
| 5648 | SKBTX_ANY_TSTAMP; | 
|---|
| 5649 | skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; | 
|---|
| 5650 | } | 
|---|
| 5651 |  | 
|---|
| 5652 | if (hwtstamps) | 
|---|
| 5653 | *skb_hwtstamps(skb) = *hwtstamps; | 
|---|
| 5654 | else | 
|---|
| 5655 | __net_timestamp(skb); | 
|---|
| 5656 |  | 
|---|
| 5657 | __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); | 
|---|
| 5658 | } | 
|---|
| 5659 | EXPORT_SYMBOL_GPL(__skb_tstamp_tx); | 
|---|
| 5660 |  | 
|---|
| 5661 | void skb_tstamp_tx(struct sk_buff *orig_skb, | 
|---|
| 5662 | struct skb_shared_hwtstamps *hwtstamps) | 
|---|
| 5663 | { | 
|---|
| 5664 | return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, | 
|---|
| 5665 | SCM_TSTAMP_SND); | 
|---|
| 5666 | } | 
|---|
| 5667 | EXPORT_SYMBOL_GPL(skb_tstamp_tx); | 
|---|
| 5668 |  | 
|---|
| 5669 | #ifdef CONFIG_WIRELESS | 
|---|
| 5670 | void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) | 
|---|
| 5671 | { | 
|---|
| 5672 | struct sock *sk = skb->sk; | 
|---|
| 5673 | struct sock_exterr_skb *serr; | 
|---|
| 5674 | int err = 1; | 
|---|
| 5675 |  | 
|---|
| 5676 | skb->wifi_acked_valid = 1; | 
|---|
| 5677 | skb->wifi_acked = acked; | 
|---|
| 5678 |  | 
|---|
| 5679 | serr = SKB_EXT_ERR(skb); | 
|---|
| 5680 | memset(s: serr, c: 0, n: sizeof(*serr)); | 
|---|
| 5681 | serr->ee.ee_errno = ENOMSG; | 
|---|
| 5682 | serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; | 
|---|
| 5683 |  | 
|---|
| 5684 | /* Take a reference to prevent skb_orphan() from freeing the socket, | 
|---|
| 5685 | * but only if the socket refcount is not zero. | 
|---|
| 5686 | */ | 
|---|
| 5687 | if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { | 
|---|
| 5688 | err = sock_queue_err_skb(sk, skb); | 
|---|
| 5689 | sock_put(sk); | 
|---|
| 5690 | } | 
|---|
| 5691 | if (err) | 
|---|
| 5692 | kfree_skb(skb); | 
|---|
| 5693 | } | 
|---|
| 5694 | EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); | 
|---|
| 5695 | #endif /* CONFIG_WIRELESS */ | 
|---|
| 5696 |  | 
|---|
| 5697 | /** | 
|---|
| 5698 | * skb_partial_csum_set - set up and verify partial csum values for packet | 
|---|
| 5699 | * @skb: the skb to set | 
|---|
| 5700 | * @start: the number of bytes after skb->data to start checksumming. | 
|---|
| 5701 | * @off: the offset from start to place the checksum. | 
|---|
| 5702 | * | 
|---|
| 5703 | * For untrusted partially-checksummed packets, we need to make sure the values | 
|---|
| 5704 | * for skb->csum_start and skb->csum_offset are valid so we don't oops. | 
|---|
| 5705 | * | 
|---|
| 5706 | * This function checks and sets those values and skb->ip_summed: if this | 
|---|
| 5707 | * returns false you should drop the packet. | 
|---|
| 5708 | */ | 
|---|
| 5709 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) | 
|---|
| 5710 | { | 
|---|
| 5711 | u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); | 
|---|
| 5712 | u32 csum_start = skb_headroom(skb) + (u32)start; | 
|---|
| 5713 |  | 
|---|
| 5714 | if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) { | 
|---|
| 5715 | net_warn_ratelimited( "bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", | 
|---|
| 5716 | start, off, skb_headroom(skb), skb_headlen(skb)); | 
|---|
| 5717 | return false; | 
|---|
| 5718 | } | 
|---|
| 5719 | skb->ip_summed = CHECKSUM_PARTIAL; | 
|---|
| 5720 | skb->csum_start = csum_start; | 
|---|
| 5721 | skb->csum_offset = off; | 
|---|
| 5722 | skb->transport_header = csum_start; | 
|---|
| 5723 | return true; | 
|---|
| 5724 | } | 
|---|
| 5725 | EXPORT_SYMBOL_GPL(skb_partial_csum_set); | 
|---|
| 5726 |  | 
|---|
| 5727 | static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, | 
|---|
| 5728 | unsigned int max) | 
|---|
| 5729 | { | 
|---|
| 5730 | if (skb_headlen(skb) >= len) | 
|---|
| 5731 | return 0; | 
|---|
| 5732 |  | 
|---|
| 5733 | /* If we need to pullup then pullup to the max, so we | 
|---|
| 5734 | * won't need to do it again. | 
|---|
| 5735 | */ | 
|---|
| 5736 | if (max > skb->len) | 
|---|
| 5737 | max = skb->len; | 
|---|
| 5738 |  | 
|---|
| 5739 | if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) | 
|---|
| 5740 | return -ENOMEM; | 
|---|
| 5741 |  | 
|---|
| 5742 | if (skb_headlen(skb) < len) | 
|---|
| 5743 | return -EPROTO; | 
|---|
| 5744 |  | 
|---|
| 5745 | return 0; | 
|---|
| 5746 | } | 
|---|
| 5747 |  | 
|---|
| 5748 | #define MAX_TCP_HDR_LEN (15 * 4) | 
|---|
| 5749 |  | 
|---|
| 5750 | static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, | 
|---|
| 5751 | typeof(IPPROTO_IP) proto, | 
|---|
| 5752 | unsigned int off) | 
|---|
| 5753 | { | 
|---|
| 5754 | int err; | 
|---|
| 5755 |  | 
|---|
| 5756 | switch (proto) { | 
|---|
| 5757 | case IPPROTO_TCP: | 
|---|
| 5758 | err = skb_maybe_pull_tail(skb, len: off + sizeof(struct tcphdr), | 
|---|
| 5759 | max: off + MAX_TCP_HDR_LEN); | 
|---|
| 5760 | if (!err && !skb_partial_csum_set(skb, off, | 
|---|
| 5761 | offsetof(struct tcphdr, | 
|---|
| 5762 | check))) | 
|---|
| 5763 | err = -EPROTO; | 
|---|
| 5764 | return err ? ERR_PTR(error: err) : &tcp_hdr(skb)->check; | 
|---|
| 5765 |  | 
|---|
| 5766 | case IPPROTO_UDP: | 
|---|
| 5767 | err = skb_maybe_pull_tail(skb, len: off + sizeof(struct udphdr), | 
|---|
| 5768 | max: off + sizeof(struct udphdr)); | 
|---|
| 5769 | if (!err && !skb_partial_csum_set(skb, off, | 
|---|
| 5770 | offsetof(struct udphdr, | 
|---|
| 5771 | check))) | 
|---|
| 5772 | err = -EPROTO; | 
|---|
| 5773 | return err ? ERR_PTR(error: err) : &udp_hdr(skb)->check; | 
|---|
| 5774 | } | 
|---|
| 5775 |  | 
|---|
| 5776 | return ERR_PTR(error: -EPROTO); | 
|---|
| 5777 | } | 
|---|
| 5778 |  | 
|---|
| 5779 | /* This value should be large enough to cover a tagged ethernet header plus | 
|---|
| 5780 | * maximally sized IP and TCP or UDP headers. | 
|---|
| 5781 | */ | 
|---|
| 5782 | #define MAX_IP_HDR_LEN 128 | 
|---|
| 5783 |  | 
|---|
| 5784 | static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) | 
|---|
| 5785 | { | 
|---|
| 5786 | unsigned int off; | 
|---|
| 5787 | bool fragment; | 
|---|
| 5788 | __sum16 *csum; | 
|---|
| 5789 | int err; | 
|---|
| 5790 |  | 
|---|
| 5791 | fragment = false; | 
|---|
| 5792 |  | 
|---|
| 5793 | err = skb_maybe_pull_tail(skb, | 
|---|
| 5794 | len: sizeof(struct iphdr), | 
|---|
| 5795 | MAX_IP_HDR_LEN); | 
|---|
| 5796 | if (err < 0) | 
|---|
| 5797 | goto out; | 
|---|
| 5798 |  | 
|---|
| 5799 | if (ip_is_fragment(iph: ip_hdr(skb))) | 
|---|
| 5800 | fragment = true; | 
|---|
| 5801 |  | 
|---|
| 5802 | off = ip_hdrlen(skb); | 
|---|
| 5803 |  | 
|---|
| 5804 | err = -EPROTO; | 
|---|
| 5805 |  | 
|---|
| 5806 | if (fragment) | 
|---|
| 5807 | goto out; | 
|---|
| 5808 |  | 
|---|
| 5809 | csum = skb_checksum_setup_ip(skb, proto: ip_hdr(skb)->protocol, off); | 
|---|
| 5810 | if (IS_ERR(ptr: csum)) | 
|---|
| 5811 | return PTR_ERR(ptr: csum); | 
|---|
| 5812 |  | 
|---|
| 5813 | if (recalculate) | 
|---|
| 5814 | *csum = ~csum_tcpudp_magic(saddr: ip_hdr(skb)->saddr, | 
|---|
| 5815 | daddr: ip_hdr(skb)->daddr, | 
|---|
| 5816 | len: skb->len - off, | 
|---|
| 5817 | proto: ip_hdr(skb)->protocol, sum: 0); | 
|---|
| 5818 | err = 0; | 
|---|
| 5819 |  | 
|---|
| 5820 | out: | 
|---|
| 5821 | return err; | 
|---|
| 5822 | } | 
|---|
| 5823 |  | 
|---|
| 5824 | /* This value should be large enough to cover a tagged ethernet header plus | 
|---|
| 5825 | * an IPv6 header, all options, and a maximal TCP or UDP header. | 
|---|
| 5826 | */ | 
|---|
| 5827 | #define MAX_IPV6_HDR_LEN 256 | 
|---|
| 5828 |  | 
|---|
| 5829 | #define OPT_HDR(type, skb, off) \ | 
|---|
| 5830 | (type *)(skb_network_header(skb) + (off)) | 
|---|
| 5831 |  | 
|---|
| 5832 | static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) | 
|---|
| 5833 | { | 
|---|
| 5834 | int err; | 
|---|
| 5835 | u8 nexthdr; | 
|---|
| 5836 | unsigned int off; | 
|---|
| 5837 | unsigned int len; | 
|---|
| 5838 | bool fragment; | 
|---|
| 5839 | bool done; | 
|---|
| 5840 | __sum16 *csum; | 
|---|
| 5841 |  | 
|---|
| 5842 | fragment = false; | 
|---|
| 5843 | done = false; | 
|---|
| 5844 |  | 
|---|
| 5845 | off = sizeof(struct ipv6hdr); | 
|---|
| 5846 |  | 
|---|
| 5847 | err = skb_maybe_pull_tail(skb, len: off, MAX_IPV6_HDR_LEN); | 
|---|
| 5848 | if (err < 0) | 
|---|
| 5849 | goto out; | 
|---|
| 5850 |  | 
|---|
| 5851 | nexthdr = ipv6_hdr(skb)->nexthdr; | 
|---|
| 5852 |  | 
|---|
| 5853 | len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); | 
|---|
| 5854 | while (off <= len && !done) { | 
|---|
| 5855 | switch (nexthdr) { | 
|---|
| 5856 | case IPPROTO_DSTOPTS: | 
|---|
| 5857 | case IPPROTO_HOPOPTS: | 
|---|
| 5858 | case IPPROTO_ROUTING: { | 
|---|
| 5859 | struct ipv6_opt_hdr *hp; | 
|---|
| 5860 |  | 
|---|
| 5861 | err = skb_maybe_pull_tail(skb, | 
|---|
| 5862 | len: off + | 
|---|
| 5863 | sizeof(struct ipv6_opt_hdr), | 
|---|
| 5864 | MAX_IPV6_HDR_LEN); | 
|---|
| 5865 | if (err < 0) | 
|---|
| 5866 | goto out; | 
|---|
| 5867 |  | 
|---|
| 5868 | hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); | 
|---|
| 5869 | nexthdr = hp->nexthdr; | 
|---|
| 5870 | off += ipv6_optlen(hp); | 
|---|
| 5871 | break; | 
|---|
| 5872 | } | 
|---|
| 5873 | case IPPROTO_AH: { | 
|---|
| 5874 | struct ip_auth_hdr *hp; | 
|---|
| 5875 |  | 
|---|
| 5876 | err = skb_maybe_pull_tail(skb, | 
|---|
| 5877 | len: off + | 
|---|
| 5878 | sizeof(struct ip_auth_hdr), | 
|---|
| 5879 | MAX_IPV6_HDR_LEN); | 
|---|
| 5880 | if (err < 0) | 
|---|
| 5881 | goto out; | 
|---|
| 5882 |  | 
|---|
| 5883 | hp = OPT_HDR(struct ip_auth_hdr, skb, off); | 
|---|
| 5884 | nexthdr = hp->nexthdr; | 
|---|
| 5885 | off += ipv6_authlen(hp); | 
|---|
| 5886 | break; | 
|---|
| 5887 | } | 
|---|
| 5888 | case IPPROTO_FRAGMENT: { | 
|---|
| 5889 | struct frag_hdr *hp; | 
|---|
| 5890 |  | 
|---|
| 5891 | err = skb_maybe_pull_tail(skb, | 
|---|
| 5892 | len: off + | 
|---|
| 5893 | sizeof(struct frag_hdr), | 
|---|
| 5894 | MAX_IPV6_HDR_LEN); | 
|---|
| 5895 | if (err < 0) | 
|---|
| 5896 | goto out; | 
|---|
| 5897 |  | 
|---|
| 5898 | hp = OPT_HDR(struct frag_hdr, skb, off); | 
|---|
| 5899 |  | 
|---|
| 5900 | if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) | 
|---|
| 5901 | fragment = true; | 
|---|
| 5902 |  | 
|---|
| 5903 | nexthdr = hp->nexthdr; | 
|---|
| 5904 | off += sizeof(struct frag_hdr); | 
|---|
| 5905 | break; | 
|---|
| 5906 | } | 
|---|
| 5907 | default: | 
|---|
| 5908 | done = true; | 
|---|
| 5909 | break; | 
|---|
| 5910 | } | 
|---|
| 5911 | } | 
|---|
| 5912 |  | 
|---|
| 5913 | err = -EPROTO; | 
|---|
| 5914 |  | 
|---|
| 5915 | if (!done || fragment) | 
|---|
| 5916 | goto out; | 
|---|
| 5917 |  | 
|---|
| 5918 | csum = skb_checksum_setup_ip(skb, proto: nexthdr, off); | 
|---|
| 5919 | if (IS_ERR(ptr: csum)) | 
|---|
| 5920 | return PTR_ERR(ptr: csum); | 
|---|
| 5921 |  | 
|---|
| 5922 | if (recalculate) | 
|---|
| 5923 | *csum = ~csum_ipv6_magic(saddr: &ipv6_hdr(skb)->saddr, | 
|---|
| 5924 | daddr: &ipv6_hdr(skb)->daddr, | 
|---|
| 5925 | len: skb->len - off, proto: nexthdr, sum: 0); | 
|---|
| 5926 | err = 0; | 
|---|
| 5927 |  | 
|---|
| 5928 | out: | 
|---|
| 5929 | return err; | 
|---|
| 5930 | } | 
|---|
| 5931 |  | 
|---|
| 5932 | /** | 
|---|
| 5933 | * skb_checksum_setup - set up partial checksum offset | 
|---|
| 5934 | * @skb: the skb to set up | 
|---|
| 5935 | * @recalculate: if true the pseudo-header checksum will be recalculated | 
|---|
| 5936 | */ | 
|---|
| 5937 | int skb_checksum_setup(struct sk_buff *skb, bool recalculate) | 
|---|
| 5938 | { | 
|---|
| 5939 | int err; | 
|---|
| 5940 |  | 
|---|
| 5941 | switch (skb->protocol) { | 
|---|
| 5942 | case htons(ETH_P_IP): | 
|---|
| 5943 | err = skb_checksum_setup_ipv4(skb, recalculate); | 
|---|
| 5944 | break; | 
|---|
| 5945 |  | 
|---|
| 5946 | case htons(ETH_P_IPV6): | 
|---|
| 5947 | err = skb_checksum_setup_ipv6(skb, recalculate); | 
|---|
| 5948 | break; | 
|---|
| 5949 |  | 
|---|
| 5950 | default: | 
|---|
| 5951 | err = -EPROTO; | 
|---|
| 5952 | break; | 
|---|
| 5953 | } | 
|---|
| 5954 |  | 
|---|
| 5955 | return err; | 
|---|
| 5956 | } | 
|---|
| 5957 | EXPORT_SYMBOL(skb_checksum_setup); | 
|---|
| 5958 |  | 
|---|
| 5959 | /** | 
|---|
| 5960 | * skb_checksum_maybe_trim - maybe trims the given skb | 
|---|
| 5961 | * @skb: the skb to check | 
|---|
| 5962 | * @transport_len: the data length beyond the network header | 
|---|
| 5963 | * | 
|---|
| 5964 | * Checks whether the given skb has data beyond the given transport length. | 
|---|
| 5965 | * If so, returns a cloned skb trimmed to this transport length. | 
|---|
| 5966 | * Otherwise returns the provided skb. Returns NULL in error cases | 
|---|
| 5967 | * (e.g. transport_len exceeds skb length or out-of-memory). | 
|---|
| 5968 | * | 
|---|
| 5969 | * Caller needs to set the skb transport header and free any returned skb if it | 
|---|
| 5970 | * differs from the provided skb. | 
|---|
| 5971 | */ | 
|---|
| 5972 | static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, | 
|---|
| 5973 | unsigned int transport_len) | 
|---|
| 5974 | { | 
|---|
| 5975 | struct sk_buff *skb_chk; | 
|---|
| 5976 | unsigned int len = skb_transport_offset(skb) + transport_len; | 
|---|
| 5977 | int ret; | 
|---|
| 5978 |  | 
|---|
| 5979 | if (skb->len < len) | 
|---|
| 5980 | return NULL; | 
|---|
| 5981 | else if (skb->len == len) | 
|---|
| 5982 | return skb; | 
|---|
| 5983 |  | 
|---|
| 5984 | skb_chk = skb_clone(skb, GFP_ATOMIC); | 
|---|
| 5985 | if (!skb_chk) | 
|---|
| 5986 | return NULL; | 
|---|
| 5987 |  | 
|---|
| 5988 | ret = pskb_trim_rcsum(skb: skb_chk, len); | 
|---|
| 5989 | if (ret) { | 
|---|
| 5990 | kfree_skb(skb: skb_chk); | 
|---|
| 5991 | return NULL; | 
|---|
| 5992 | } | 
|---|
| 5993 |  | 
|---|
| 5994 | return skb_chk; | 
|---|
| 5995 | } | 
|---|
| 5996 |  | 
|---|
| 5997 | /** | 
|---|
| 5998 | * skb_checksum_trimmed - validate checksum of an skb | 
|---|
| 5999 | * @skb: the skb to check | 
|---|
| 6000 | * @transport_len: the data length beyond the network header | 
|---|
| 6001 | * @skb_chkf: checksum function to use | 
|---|
| 6002 | * | 
|---|
| 6003 | * Applies the given checksum function skb_chkf to the provided skb. | 
|---|
| 6004 | * Returns a checked and maybe trimmed skb. Returns NULL on error. | 
|---|
| 6005 | * | 
|---|
| 6006 | * If the skb has data beyond the given transport length, then a | 
|---|
| 6007 | * trimmed & cloned skb is checked and returned. | 
|---|
| 6008 | * | 
|---|
| 6009 | * Caller needs to set the skb transport header and free any returned skb if it | 
|---|
| 6010 | * differs from the provided skb. | 
|---|
| 6011 | */ | 
|---|
| 6012 | struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, | 
|---|
| 6013 | unsigned int transport_len, | 
|---|
| 6014 | __sum16(*skb_chkf)(struct sk_buff *skb)) | 
|---|
| 6015 | { | 
|---|
| 6016 | struct sk_buff *skb_chk; | 
|---|
| 6017 | unsigned int offset = skb_transport_offset(skb); | 
|---|
| 6018 | __sum16 ret; | 
|---|
| 6019 |  | 
|---|
| 6020 | skb_chk = skb_checksum_maybe_trim(skb, transport_len); | 
|---|
| 6021 | if (!skb_chk) | 
|---|
| 6022 | goto err; | 
|---|
| 6023 |  | 
|---|
| 6024 | if (!pskb_may_pull(skb: skb_chk, len: offset)) | 
|---|
| 6025 | goto err; | 
|---|
| 6026 |  | 
|---|
| 6027 | skb_pull_rcsum(skb_chk, offset); | 
|---|
| 6028 | ret = skb_chkf(skb_chk); | 
|---|
| 6029 | skb_push_rcsum(skb: skb_chk, len: offset); | 
|---|
| 6030 |  | 
|---|
| 6031 | if (ret) | 
|---|
| 6032 | goto err; | 
|---|
| 6033 |  | 
|---|
| 6034 | return skb_chk; | 
|---|
| 6035 |  | 
|---|
| 6036 | err: | 
|---|
| 6037 | if (skb_chk && skb_chk != skb) | 
|---|
| 6038 | kfree_skb(skb: skb_chk); | 
|---|
| 6039 |  | 
|---|
| 6040 | return NULL; | 
|---|
| 6041 |  | 
|---|
| 6042 | } | 
|---|
| 6043 | EXPORT_SYMBOL(skb_checksum_trimmed); | 
|---|
| 6044 |  | 
|---|
| 6045 | void __skb_warn_lro_forwarding(const struct sk_buff *skb) | 
|---|
| 6046 | { | 
|---|
| 6047 | net_warn_ratelimited( "%s: received packets cannot be forwarded while LRO is enabled\n", | 
|---|
| 6048 | skb->dev->name); | 
|---|
| 6049 | } | 
|---|
| 6050 | EXPORT_SYMBOL(__skb_warn_lro_forwarding); | 
|---|
| 6051 |  | 
|---|
| 6052 | void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) | 
|---|
| 6053 | { | 
|---|
| 6054 | if (head_stolen) { | 
|---|
| 6055 | skb_release_head_state(skb); | 
|---|
| 6056 | kmem_cache_free(s: net_hotdata.skbuff_cache, objp: skb); | 
|---|
| 6057 | } else { | 
|---|
| 6058 | __kfree_skb(skb); | 
|---|
| 6059 | } | 
|---|
| 6060 | } | 
|---|
| 6061 | EXPORT_SYMBOL(kfree_skb_partial); | 
|---|
| 6062 |  | 
|---|
| 6063 | /** | 
|---|
| 6064 | * skb_try_coalesce - try to merge skb to prior one | 
|---|
| 6065 | * @to: prior buffer | 
|---|
| 6066 | * @from: buffer to add | 
|---|
| 6067 | * @fragstolen: pointer to boolean | 
|---|
| 6068 | * @delta_truesize: how much more was allocated than was requested | 
|---|
| 6069 | */ | 
|---|
| 6070 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, | 
|---|
| 6071 | bool *fragstolen, int *delta_truesize) | 
|---|
| 6072 | { | 
|---|
| 6073 | struct skb_shared_info *to_shinfo, *from_shinfo; | 
|---|
| 6074 | int i, delta, len = from->len; | 
|---|
| 6075 |  | 
|---|
| 6076 | *fragstolen = false; | 
|---|
| 6077 |  | 
|---|
| 6078 | if (skb_cloned(skb: to)) | 
|---|
| 6079 | return false; | 
|---|
| 6080 |  | 
|---|
| 6081 | /* In general, avoid mixing page_pool and non-page_pool allocated | 
|---|
| 6082 | * pages within the same SKB. In theory we could take full | 
|---|
| 6083 | * references if @from is cloned and !@to->pp_recycle but its | 
|---|
| 6084 | * tricky (due to potential race with the clone disappearing) and | 
|---|
| 6085 | * rare, so not worth dealing with. | 
|---|
| 6086 | */ | 
|---|
| 6087 | if (to->pp_recycle != from->pp_recycle) | 
|---|
| 6088 | return false; | 
|---|
| 6089 |  | 
|---|
| 6090 | if (skb_frags_readable(skb: from) != skb_frags_readable(skb: to)) | 
|---|
| 6091 | return false; | 
|---|
| 6092 |  | 
|---|
| 6093 | if (len <= skb_tailroom(skb: to) && skb_frags_readable(skb: from)) { | 
|---|
| 6094 | if (len) | 
|---|
| 6095 | BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); | 
|---|
| 6096 | *delta_truesize = 0; | 
|---|
| 6097 | return true; | 
|---|
| 6098 | } | 
|---|
| 6099 |  | 
|---|
| 6100 | to_shinfo = skb_shinfo(to); | 
|---|
| 6101 | from_shinfo = skb_shinfo(from); | 
|---|
| 6102 | if (to_shinfo->frag_list || from_shinfo->frag_list) | 
|---|
| 6103 | return false; | 
|---|
| 6104 | if (skb_zcopy(skb: to) || skb_zcopy(skb: from)) | 
|---|
| 6105 | return false; | 
|---|
| 6106 |  | 
|---|
| 6107 | if (skb_headlen(skb: from) != 0) { | 
|---|
| 6108 | struct page *page; | 
|---|
| 6109 | unsigned int offset; | 
|---|
| 6110 |  | 
|---|
| 6111 | if (to_shinfo->nr_frags + | 
|---|
| 6112 | from_shinfo->nr_frags >= MAX_SKB_FRAGS) | 
|---|
| 6113 | return false; | 
|---|
| 6114 |  | 
|---|
| 6115 | if (skb_head_is_locked(skb: from)) | 
|---|
| 6116 | return false; | 
|---|
| 6117 |  | 
|---|
| 6118 | delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); | 
|---|
| 6119 |  | 
|---|
| 6120 | page = virt_to_head_page(x: from->head); | 
|---|
| 6121 | offset = from->data - (unsigned char *)page_address(page); | 
|---|
| 6122 |  | 
|---|
| 6123 | skb_fill_page_desc(skb: to, i: to_shinfo->nr_frags, | 
|---|
| 6124 | page, off: offset, size: skb_headlen(skb: from)); | 
|---|
| 6125 | *fragstolen = true; | 
|---|
| 6126 | } else { | 
|---|
| 6127 | if (to_shinfo->nr_frags + | 
|---|
| 6128 | from_shinfo->nr_frags > MAX_SKB_FRAGS) | 
|---|
| 6129 | return false; | 
|---|
| 6130 |  | 
|---|
| 6131 | delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); | 
|---|
| 6132 | } | 
|---|
| 6133 |  | 
|---|
| 6134 | WARN_ON_ONCE(delta < len); | 
|---|
| 6135 |  | 
|---|
| 6136 | memcpy(to: to_shinfo->frags + to_shinfo->nr_frags, | 
|---|
| 6137 | from: from_shinfo->frags, | 
|---|
| 6138 | len: from_shinfo->nr_frags * sizeof(skb_frag_t)); | 
|---|
| 6139 | to_shinfo->nr_frags += from_shinfo->nr_frags; | 
|---|
| 6140 |  | 
|---|
| 6141 | if (!skb_cloned(skb: from)) | 
|---|
| 6142 | from_shinfo->nr_frags = 0; | 
|---|
| 6143 |  | 
|---|
| 6144 | /* if the skb is not cloned this does nothing | 
|---|
| 6145 | * since we set nr_frags to 0. | 
|---|
| 6146 | */ | 
|---|
| 6147 | if (skb_pp_frag_ref(skb: from)) { | 
|---|
| 6148 | for (i = 0; i < from_shinfo->nr_frags; i++) | 
|---|
| 6149 | __skb_frag_ref(frag: &from_shinfo->frags[i]); | 
|---|
| 6150 | } | 
|---|
| 6151 |  | 
|---|
| 6152 | to->truesize += delta; | 
|---|
| 6153 | to->len += len; | 
|---|
| 6154 | to->data_len += len; | 
|---|
| 6155 |  | 
|---|
| 6156 | *delta_truesize = delta; | 
|---|
| 6157 | return true; | 
|---|
| 6158 | } | 
|---|
| 6159 | EXPORT_SYMBOL(skb_try_coalesce); | 
|---|
| 6160 |  | 
|---|
| 6161 | /** | 
|---|
| 6162 | * skb_scrub_packet - scrub an skb | 
|---|
| 6163 | * | 
|---|
| 6164 | * @skb: buffer to clean | 
|---|
| 6165 | * @xnet: packet is crossing netns | 
|---|
| 6166 | * | 
|---|
| 6167 | * skb_scrub_packet can be used after encapsulating or decapsulating a packet | 
|---|
| 6168 | * into/from a tunnel. Some information have to be cleared during these | 
|---|
| 6169 | * operations. | 
|---|
| 6170 | * skb_scrub_packet can also be used to clean a skb before injecting it in | 
|---|
| 6171 | * another namespace (@xnet == true). We have to clear all information in the | 
|---|
| 6172 | * skb that could impact namespace isolation. | 
|---|
| 6173 | */ | 
|---|
| 6174 | void skb_scrub_packet(struct sk_buff *skb, bool xnet) | 
|---|
| 6175 | { | 
|---|
| 6176 | skb->pkt_type = PACKET_HOST; | 
|---|
| 6177 | skb->skb_iif = 0; | 
|---|
| 6178 | skb->ignore_df = 0; | 
|---|
| 6179 | skb_dst_drop(skb); | 
|---|
| 6180 | skb_ext_reset(skb); | 
|---|
| 6181 | nf_reset_ct(skb); | 
|---|
| 6182 | nf_reset_trace(skb); | 
|---|
| 6183 |  | 
|---|
| 6184 | #ifdef CONFIG_NET_SWITCHDEV | 
|---|
| 6185 | skb->offload_fwd_mark = 0; | 
|---|
| 6186 | skb->offload_l3_fwd_mark = 0; | 
|---|
| 6187 | #endif | 
|---|
| 6188 | ipvs_reset(skb); | 
|---|
| 6189 |  | 
|---|
| 6190 | if (!xnet) | 
|---|
| 6191 | return; | 
|---|
| 6192 |  | 
|---|
| 6193 | skb->mark = 0; | 
|---|
| 6194 | skb_clear_tstamp(skb); | 
|---|
| 6195 | } | 
|---|
| 6196 | EXPORT_SYMBOL_GPL(skb_scrub_packet); | 
|---|
| 6197 |  | 
|---|
| 6198 | static struct sk_buff *(struct sk_buff *skb) | 
|---|
| 6199 | { | 
|---|
| 6200 | int mac_len, meta_len; | 
|---|
| 6201 | void *meta; | 
|---|
| 6202 |  | 
|---|
| 6203 | if (skb_cow(skb, headroom: skb_headroom(skb)) < 0) { | 
|---|
| 6204 | kfree_skb(skb); | 
|---|
| 6205 | return NULL; | 
|---|
| 6206 | } | 
|---|
| 6207 |  | 
|---|
| 6208 | mac_len = skb->data - skb_mac_header(skb); | 
|---|
| 6209 | if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { | 
|---|
| 6210 | memmove(dest: skb_mac_header(skb) + VLAN_HLEN, src: skb_mac_header(skb), | 
|---|
| 6211 | count: mac_len - VLAN_HLEN - ETH_TLEN); | 
|---|
| 6212 | } | 
|---|
| 6213 |  | 
|---|
| 6214 | meta_len = skb_metadata_len(skb); | 
|---|
| 6215 | if (meta_len) { | 
|---|
| 6216 | meta = skb_metadata_end(skb) - meta_len; | 
|---|
| 6217 | memmove(dest: meta + VLAN_HLEN, src: meta, count: meta_len); | 
|---|
| 6218 | } | 
|---|
| 6219 |  | 
|---|
| 6220 | skb->mac_header += VLAN_HLEN; | 
|---|
| 6221 | return skb; | 
|---|
| 6222 | } | 
|---|
| 6223 |  | 
|---|
| 6224 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb) | 
|---|
| 6225 | { | 
|---|
| 6226 | struct vlan_hdr *vhdr; | 
|---|
| 6227 | u16 vlan_tci; | 
|---|
| 6228 |  | 
|---|
| 6229 | if (unlikely(skb_vlan_tag_present(skb))) { | 
|---|
| 6230 | /* vlan_tci is already set-up so leave this for another time */ | 
|---|
| 6231 | return skb; | 
|---|
| 6232 | } | 
|---|
| 6233 |  | 
|---|
| 6234 | skb = skb_share_check(skb, GFP_ATOMIC); | 
|---|
| 6235 | if (unlikely(!skb)) | 
|---|
| 6236 | goto err_free; | 
|---|
| 6237 | /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ | 
|---|
| 6238 | if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) | 
|---|
| 6239 | goto err_free; | 
|---|
| 6240 |  | 
|---|
| 6241 | vhdr = (struct vlan_hdr *)skb->data; | 
|---|
| 6242 | vlan_tci = ntohs(vhdr->h_vlan_TCI); | 
|---|
| 6243 | __vlan_hwaccel_put_tag(skb, vlan_proto: skb->protocol, vlan_tci); | 
|---|
| 6244 |  | 
|---|
| 6245 | skb_pull_rcsum(skb, VLAN_HLEN); | 
|---|
| 6246 | vlan_set_encap_proto(skb, vhdr); | 
|---|
| 6247 |  | 
|---|
| 6248 | skb = skb_reorder_vlan_header(skb); | 
|---|
| 6249 | if (unlikely(!skb)) | 
|---|
| 6250 | goto err_free; | 
|---|
| 6251 |  | 
|---|
| 6252 | skb_reset_network_header(skb); | 
|---|
| 6253 | if (!skb_transport_header_was_set(skb)) | 
|---|
| 6254 | skb_reset_transport_header(skb); | 
|---|
| 6255 | skb_reset_mac_len(skb); | 
|---|
| 6256 |  | 
|---|
| 6257 | return skb; | 
|---|
| 6258 |  | 
|---|
| 6259 | err_free: | 
|---|
| 6260 | kfree_skb(skb); | 
|---|
| 6261 | return NULL; | 
|---|
| 6262 | } | 
|---|
| 6263 | EXPORT_SYMBOL(skb_vlan_untag); | 
|---|
| 6264 |  | 
|---|
| 6265 | int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) | 
|---|
| 6266 | { | 
|---|
| 6267 | if (!pskb_may_pull(skb, len: write_len)) | 
|---|
| 6268 | return -ENOMEM; | 
|---|
| 6269 |  | 
|---|
| 6270 | if (!skb_cloned(skb) || skb_clone_writable(skb, len: write_len)) | 
|---|
| 6271 | return 0; | 
|---|
| 6272 |  | 
|---|
| 6273 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 
|---|
| 6274 | } | 
|---|
| 6275 | EXPORT_SYMBOL(skb_ensure_writable); | 
|---|
| 6276 |  | 
|---|
| 6277 | int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev) | 
|---|
| 6278 | { | 
|---|
| 6279 | int needed_headroom = dev->needed_headroom; | 
|---|
| 6280 | int needed_tailroom = dev->needed_tailroom; | 
|---|
| 6281 |  | 
|---|
| 6282 | /* For tail taggers, we need to pad short frames ourselves, to ensure | 
|---|
| 6283 | * that the tail tag does not fail at its role of being at the end of | 
|---|
| 6284 | * the packet, once the conduit interface pads the frame. Account for | 
|---|
| 6285 | * that pad length here, and pad later. | 
|---|
| 6286 | */ | 
|---|
| 6287 | if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) | 
|---|
| 6288 | needed_tailroom += ETH_ZLEN - skb->len; | 
|---|
| 6289 | /* skb_headroom() returns unsigned int... */ | 
|---|
| 6290 | needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); | 
|---|
| 6291 | needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); | 
|---|
| 6292 |  | 
|---|
| 6293 | if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) | 
|---|
| 6294 | /* No reallocation needed, yay! */ | 
|---|
| 6295 | return 0; | 
|---|
| 6296 |  | 
|---|
| 6297 | return pskb_expand_head(skb, needed_headroom, needed_tailroom, | 
|---|
| 6298 | GFP_ATOMIC); | 
|---|
| 6299 | } | 
|---|
| 6300 | EXPORT_SYMBOL(skb_ensure_writable_head_tail); | 
|---|
| 6301 |  | 
|---|
| 6302 | /* remove VLAN header from packet and update csum accordingly. | 
|---|
| 6303 | * expects a non skb_vlan_tag_present skb with a vlan tag payload | 
|---|
| 6304 | */ | 
|---|
| 6305 | int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) | 
|---|
| 6306 | { | 
|---|
| 6307 | int offset = skb->data - skb_mac_header(skb); | 
|---|
| 6308 | int err; | 
|---|
| 6309 |  | 
|---|
| 6310 | if (WARN_ONCE(offset, | 
|---|
| 6311 | "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", | 
|---|
| 6312 | offset)) { | 
|---|
| 6313 | return -EINVAL; | 
|---|
| 6314 | } | 
|---|
| 6315 |  | 
|---|
| 6316 | err = skb_ensure_writable(skb, VLAN_ETH_HLEN); | 
|---|
| 6317 | if (unlikely(err)) | 
|---|
| 6318 | return err; | 
|---|
| 6319 |  | 
|---|
| 6320 | skb_postpull_rcsum(skb, start: skb->data + (2 * ETH_ALEN), VLAN_HLEN); | 
|---|
| 6321 |  | 
|---|
| 6322 | vlan_remove_tag(skb, vlan_tci); | 
|---|
| 6323 |  | 
|---|
| 6324 | skb->mac_header += VLAN_HLEN; | 
|---|
| 6325 |  | 
|---|
| 6326 | if (skb_network_offset(skb) < ETH_HLEN) | 
|---|
| 6327 | skb_set_network_header(skb, ETH_HLEN); | 
|---|
| 6328 |  | 
|---|
| 6329 | skb_reset_mac_len(skb); | 
|---|
| 6330 |  | 
|---|
| 6331 | return err; | 
|---|
| 6332 | } | 
|---|
| 6333 | EXPORT_SYMBOL(__skb_vlan_pop); | 
|---|
| 6334 |  | 
|---|
| 6335 | /* Pop a vlan tag either from hwaccel or from payload. | 
|---|
| 6336 | * Expects skb->data at mac header. | 
|---|
| 6337 | */ | 
|---|
| 6338 | int skb_vlan_pop(struct sk_buff *skb) | 
|---|
| 6339 | { | 
|---|
| 6340 | u16 vlan_tci; | 
|---|
| 6341 | __be16 vlan_proto; | 
|---|
| 6342 | int err; | 
|---|
| 6343 |  | 
|---|
| 6344 | if (likely(skb_vlan_tag_present(skb))) { | 
|---|
| 6345 | __vlan_hwaccel_clear_tag(skb); | 
|---|
| 6346 | } else { | 
|---|
| 6347 | if (unlikely(!eth_type_vlan(skb->protocol))) | 
|---|
| 6348 | return 0; | 
|---|
| 6349 |  | 
|---|
| 6350 | err = __skb_vlan_pop(skb, &vlan_tci); | 
|---|
| 6351 | if (err) | 
|---|
| 6352 | return err; | 
|---|
| 6353 | } | 
|---|
| 6354 | /* move next vlan tag to hw accel tag */ | 
|---|
| 6355 | if (likely(!eth_type_vlan(skb->protocol))) | 
|---|
| 6356 | return 0; | 
|---|
| 6357 |  | 
|---|
| 6358 | vlan_proto = skb->protocol; | 
|---|
| 6359 | err = __skb_vlan_pop(skb, &vlan_tci); | 
|---|
| 6360 | if (unlikely(err)) | 
|---|
| 6361 | return err; | 
|---|
| 6362 |  | 
|---|
| 6363 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); | 
|---|
| 6364 | return 0; | 
|---|
| 6365 | } | 
|---|
| 6366 | EXPORT_SYMBOL(skb_vlan_pop); | 
|---|
| 6367 |  | 
|---|
| 6368 | /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). | 
|---|
| 6369 | * Expects skb->data at mac header. | 
|---|
| 6370 | */ | 
|---|
| 6371 | int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) | 
|---|
| 6372 | { | 
|---|
| 6373 | if (skb_vlan_tag_present(skb)) { | 
|---|
| 6374 | int offset = skb->data - skb_mac_header(skb); | 
|---|
| 6375 | int err; | 
|---|
| 6376 |  | 
|---|
| 6377 | if (WARN_ONCE(offset, | 
|---|
| 6378 | "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", | 
|---|
| 6379 | offset)) { | 
|---|
| 6380 | return -EINVAL; | 
|---|
| 6381 | } | 
|---|
| 6382 |  | 
|---|
| 6383 | err = __vlan_insert_tag(skb, vlan_proto: skb->vlan_proto, | 
|---|
| 6384 | skb_vlan_tag_get(skb)); | 
|---|
| 6385 | if (err) | 
|---|
| 6386 | return err; | 
|---|
| 6387 |  | 
|---|
| 6388 | skb->protocol = skb->vlan_proto; | 
|---|
| 6389 | skb->network_header -= VLAN_HLEN; | 
|---|
| 6390 |  | 
|---|
| 6391 | skb_postpush_rcsum(skb, start: skb->data + (2 * ETH_ALEN), VLAN_HLEN); | 
|---|
| 6392 | } | 
|---|
| 6393 | __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); | 
|---|
| 6394 | return 0; | 
|---|
| 6395 | } | 
|---|
| 6396 | EXPORT_SYMBOL(skb_vlan_push); | 
|---|
| 6397 |  | 
|---|
| 6398 | /** | 
|---|
| 6399 | * skb_eth_pop() - Drop the Ethernet header at the head of a packet | 
|---|
| 6400 | * | 
|---|
| 6401 | * @skb: Socket buffer to modify | 
|---|
| 6402 | * | 
|---|
| 6403 | * Drop the Ethernet header of @skb. | 
|---|
| 6404 | * | 
|---|
| 6405 | * Expects that skb->data points to the mac header and that no VLAN tags are | 
|---|
| 6406 | * present. | 
|---|
| 6407 | * | 
|---|
| 6408 | * Returns 0 on success, -errno otherwise. | 
|---|
| 6409 | */ | 
|---|
| 6410 | int skb_eth_pop(struct sk_buff *skb) | 
|---|
| 6411 | { | 
|---|
| 6412 | if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || | 
|---|
| 6413 | skb_network_offset(skb) < ETH_HLEN) | 
|---|
| 6414 | return -EPROTO; | 
|---|
| 6415 |  | 
|---|
| 6416 | skb_pull_rcsum(skb, ETH_HLEN); | 
|---|
| 6417 | skb_reset_mac_header(skb); | 
|---|
| 6418 | skb_reset_mac_len(skb); | 
|---|
| 6419 |  | 
|---|
| 6420 | return 0; | 
|---|
| 6421 | } | 
|---|
| 6422 | EXPORT_SYMBOL(skb_eth_pop); | 
|---|
| 6423 |  | 
|---|
| 6424 | /** | 
|---|
| 6425 | * skb_eth_push() - Add a new Ethernet header at the head of a packet | 
|---|
| 6426 | * | 
|---|
| 6427 | * @skb: Socket buffer to modify | 
|---|
| 6428 | * @dst: Destination MAC address of the new header | 
|---|
| 6429 | * @src: Source MAC address of the new header | 
|---|
| 6430 | * | 
|---|
| 6431 | * Prepend @skb with a new Ethernet header. | 
|---|
| 6432 | * | 
|---|
| 6433 | * Expects that skb->data points to the mac header, which must be empty. | 
|---|
| 6434 | * | 
|---|
| 6435 | * Returns 0 on success, -errno otherwise. | 
|---|
| 6436 | */ | 
|---|
| 6437 | int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, | 
|---|
| 6438 | const unsigned char *src) | 
|---|
| 6439 | { | 
|---|
| 6440 | struct ethhdr *eth; | 
|---|
| 6441 | int err; | 
|---|
| 6442 |  | 
|---|
| 6443 | if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) | 
|---|
| 6444 | return -EPROTO; | 
|---|
| 6445 |  | 
|---|
| 6446 | err = skb_cow_head(skb, headroom: sizeof(*eth)); | 
|---|
| 6447 | if (err < 0) | 
|---|
| 6448 | return err; | 
|---|
| 6449 |  | 
|---|
| 6450 | skb_push(skb, sizeof(*eth)); | 
|---|
| 6451 | skb_reset_mac_header(skb); | 
|---|
| 6452 | skb_reset_mac_len(skb); | 
|---|
| 6453 |  | 
|---|
| 6454 | eth = eth_hdr(skb); | 
|---|
| 6455 | ether_addr_copy(dst: eth->h_dest, src: dst); | 
|---|
| 6456 | ether_addr_copy(dst: eth->h_source, src); | 
|---|
| 6457 | eth->h_proto = skb->protocol; | 
|---|
| 6458 |  | 
|---|
| 6459 | skb_postpush_rcsum(skb, start: eth, len: sizeof(*eth)); | 
|---|
| 6460 |  | 
|---|
| 6461 | return 0; | 
|---|
| 6462 | } | 
|---|
| 6463 | EXPORT_SYMBOL(skb_eth_push); | 
|---|
| 6464 |  | 
|---|
| 6465 | /* Update the ethertype of hdr and the skb csum value if required. */ | 
|---|
| 6466 | static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, | 
|---|
| 6467 | __be16 ethertype) | 
|---|
| 6468 | { | 
|---|
| 6469 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 
|---|
| 6470 | __be16 diff[] = { ~hdr->h_proto, ethertype }; | 
|---|
| 6471 |  | 
|---|
| 6472 | skb->csum = csum_partial(buff: (char *)diff, len: sizeof(diff), sum: skb->csum); | 
|---|
| 6473 | } | 
|---|
| 6474 |  | 
|---|
| 6475 | hdr->h_proto = ethertype; | 
|---|
| 6476 | } | 
|---|
| 6477 |  | 
|---|
| 6478 | /** | 
|---|
| 6479 | * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of | 
|---|
| 6480 | *                   the packet | 
|---|
| 6481 | * | 
|---|
| 6482 | * @skb: buffer | 
|---|
| 6483 | * @mpls_lse: MPLS label stack entry to push | 
|---|
| 6484 | * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) | 
|---|
| 6485 | * @mac_len: length of the MAC header | 
|---|
| 6486 | * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is | 
|---|
| 6487 | *            ethernet | 
|---|
| 6488 | * | 
|---|
| 6489 | * Expects skb->data at mac header. | 
|---|
| 6490 | * | 
|---|
| 6491 | * Returns 0 on success, -errno otherwise. | 
|---|
| 6492 | */ | 
|---|
| 6493 | int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, | 
|---|
| 6494 | int mac_len, bool ethernet) | 
|---|
| 6495 | { | 
|---|
| 6496 | struct mpls_shim_hdr *lse; | 
|---|
| 6497 | int err; | 
|---|
| 6498 |  | 
|---|
| 6499 | if (unlikely(!eth_p_mpls(mpls_proto))) | 
|---|
| 6500 | return -EINVAL; | 
|---|
| 6501 |  | 
|---|
| 6502 | /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ | 
|---|
| 6503 | if (skb->encapsulation) | 
|---|
| 6504 | return -EINVAL; | 
|---|
| 6505 |  | 
|---|
| 6506 | err = skb_cow_head(skb, MPLS_HLEN); | 
|---|
| 6507 | if (unlikely(err)) | 
|---|
| 6508 | return err; | 
|---|
| 6509 |  | 
|---|
| 6510 | if (!skb->inner_protocol) { | 
|---|
| 6511 | skb_set_inner_network_header(skb, offset: skb_network_offset(skb)); | 
|---|
| 6512 | skb_set_inner_protocol(skb, protocol: skb->protocol); | 
|---|
| 6513 | } | 
|---|
| 6514 |  | 
|---|
| 6515 | skb_push(skb, MPLS_HLEN); | 
|---|
| 6516 | memmove(dest: skb_mac_header(skb) - MPLS_HLEN, src: skb_mac_header(skb), | 
|---|
| 6517 | count: mac_len); | 
|---|
| 6518 | skb_reset_mac_header(skb); | 
|---|
| 6519 | skb_set_network_header(skb, offset: mac_len); | 
|---|
| 6520 | skb_reset_mac_len(skb); | 
|---|
| 6521 |  | 
|---|
| 6522 | lse = mpls_hdr(skb); | 
|---|
| 6523 | lse->label_stack_entry = mpls_lse; | 
|---|
| 6524 | skb_postpush_rcsum(skb, start: lse, MPLS_HLEN); | 
|---|
| 6525 |  | 
|---|
| 6526 | if (ethernet && mac_len >= ETH_HLEN) | 
|---|
| 6527 | skb_mod_eth_type(skb, hdr: eth_hdr(skb), ethertype: mpls_proto); | 
|---|
| 6528 | skb->protocol = mpls_proto; | 
|---|
| 6529 |  | 
|---|
| 6530 | return 0; | 
|---|
| 6531 | } | 
|---|
| 6532 | EXPORT_SYMBOL_GPL(skb_mpls_push); | 
|---|
| 6533 |  | 
|---|
| 6534 | /** | 
|---|
| 6535 | * skb_mpls_pop() - pop the outermost MPLS header | 
|---|
| 6536 | * | 
|---|
| 6537 | * @skb: buffer | 
|---|
| 6538 | * @next_proto: ethertype of header after popped MPLS header | 
|---|
| 6539 | * @mac_len: length of the MAC header | 
|---|
| 6540 | * @ethernet: flag to indicate if the packet is ethernet | 
|---|
| 6541 | * | 
|---|
| 6542 | * Expects skb->data at mac header. | 
|---|
| 6543 | * | 
|---|
| 6544 | * Returns 0 on success, -errno otherwise. | 
|---|
| 6545 | */ | 
|---|
| 6546 | int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, | 
|---|
| 6547 | bool ethernet) | 
|---|
| 6548 | { | 
|---|
| 6549 | int err; | 
|---|
| 6550 |  | 
|---|
| 6551 | if (unlikely(!eth_p_mpls(skb->protocol))) | 
|---|
| 6552 | return 0; | 
|---|
| 6553 |  | 
|---|
| 6554 | err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); | 
|---|
| 6555 | if (unlikely(err)) | 
|---|
| 6556 | return err; | 
|---|
| 6557 |  | 
|---|
| 6558 | skb_postpull_rcsum(skb, start: mpls_hdr(skb), MPLS_HLEN); | 
|---|
| 6559 | memmove(dest: skb_mac_header(skb) + MPLS_HLEN, src: skb_mac_header(skb), | 
|---|
| 6560 | count: mac_len); | 
|---|
| 6561 |  | 
|---|
| 6562 | __skb_pull(skb, MPLS_HLEN); | 
|---|
| 6563 | skb_reset_mac_header(skb); | 
|---|
| 6564 | skb_set_network_header(skb, offset: mac_len); | 
|---|
| 6565 |  | 
|---|
| 6566 | if (ethernet && mac_len >= ETH_HLEN) { | 
|---|
| 6567 | struct ethhdr *hdr; | 
|---|
| 6568 |  | 
|---|
| 6569 | /* use mpls_hdr() to get ethertype to account for VLANs. */ | 
|---|
| 6570 | hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); | 
|---|
| 6571 | skb_mod_eth_type(skb, hdr, ethertype: next_proto); | 
|---|
| 6572 | } | 
|---|
| 6573 | skb->protocol = next_proto; | 
|---|
| 6574 |  | 
|---|
| 6575 | return 0; | 
|---|
| 6576 | } | 
|---|
| 6577 | EXPORT_SYMBOL_GPL(skb_mpls_pop); | 
|---|
| 6578 |  | 
|---|
| 6579 | /** | 
|---|
| 6580 | * skb_mpls_update_lse() - modify outermost MPLS header and update csum | 
|---|
| 6581 | * | 
|---|
| 6582 | * @skb: buffer | 
|---|
| 6583 | * @mpls_lse: new MPLS label stack entry to update to | 
|---|
| 6584 | * | 
|---|
| 6585 | * Expects skb->data at mac header. | 
|---|
| 6586 | * | 
|---|
| 6587 | * Returns 0 on success, -errno otherwise. | 
|---|
| 6588 | */ | 
|---|
| 6589 | int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) | 
|---|
| 6590 | { | 
|---|
| 6591 | int err; | 
|---|
| 6592 |  | 
|---|
| 6593 | if (unlikely(!eth_p_mpls(skb->protocol))) | 
|---|
| 6594 | return -EINVAL; | 
|---|
| 6595 |  | 
|---|
| 6596 | err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); | 
|---|
| 6597 | if (unlikely(err)) | 
|---|
| 6598 | return err; | 
|---|
| 6599 |  | 
|---|
| 6600 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 
|---|
| 6601 | __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; | 
|---|
| 6602 |  | 
|---|
| 6603 | skb->csum = csum_partial(buff: (char *)diff, len: sizeof(diff), sum: skb->csum); | 
|---|
| 6604 | } | 
|---|
| 6605 |  | 
|---|
| 6606 | mpls_hdr(skb)->label_stack_entry = mpls_lse; | 
|---|
| 6607 |  | 
|---|
| 6608 | return 0; | 
|---|
| 6609 | } | 
|---|
| 6610 | EXPORT_SYMBOL_GPL(skb_mpls_update_lse); | 
|---|
| 6611 |  | 
|---|
| 6612 | /** | 
|---|
| 6613 | * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header | 
|---|
| 6614 | * | 
|---|
| 6615 | * @skb: buffer | 
|---|
| 6616 | * | 
|---|
| 6617 | * Expects skb->data at mac header. | 
|---|
| 6618 | * | 
|---|
| 6619 | * Returns 0 on success, -errno otherwise. | 
|---|
| 6620 | */ | 
|---|
| 6621 | int skb_mpls_dec_ttl(struct sk_buff *skb) | 
|---|
| 6622 | { | 
|---|
| 6623 | u32 lse; | 
|---|
| 6624 | u8 ttl; | 
|---|
| 6625 |  | 
|---|
| 6626 | if (unlikely(!eth_p_mpls(skb->protocol))) | 
|---|
| 6627 | return -EINVAL; | 
|---|
| 6628 |  | 
|---|
| 6629 | if (!pskb_may_pull(skb, len: skb_network_offset(skb) + MPLS_HLEN)) | 
|---|
| 6630 | return -ENOMEM; | 
|---|
| 6631 |  | 
|---|
| 6632 | lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); | 
|---|
| 6633 | ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; | 
|---|
| 6634 | if (!--ttl) | 
|---|
| 6635 | return -EINVAL; | 
|---|
| 6636 |  | 
|---|
| 6637 | lse &= ~MPLS_LS_TTL_MASK; | 
|---|
| 6638 | lse |= ttl << MPLS_LS_TTL_SHIFT; | 
|---|
| 6639 |  | 
|---|
| 6640 | return skb_mpls_update_lse(skb, cpu_to_be32(lse)); | 
|---|
| 6641 | } | 
|---|
| 6642 | EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); | 
|---|
| 6643 |  | 
|---|
| 6644 | /** | 
|---|
| 6645 | * alloc_skb_with_frags - allocate skb with page frags | 
|---|
| 6646 | * | 
|---|
| 6647 | * @header_len: size of linear part | 
|---|
| 6648 | * @data_len: needed length in frags | 
|---|
| 6649 | * @order: max page order desired. | 
|---|
| 6650 | * @errcode: pointer to error code if any | 
|---|
| 6651 | * @gfp_mask: allocation mask | 
|---|
| 6652 | * | 
|---|
| 6653 | * This can be used to allocate a paged skb, given a maximal order for frags. | 
|---|
| 6654 | */ | 
|---|
| 6655 | struct sk_buff *alloc_skb_with_frags(unsigned long , | 
|---|
| 6656 | unsigned long data_len, | 
|---|
| 6657 | int order, | 
|---|
| 6658 | int *errcode, | 
|---|
| 6659 | gfp_t gfp_mask) | 
|---|
| 6660 | { | 
|---|
| 6661 | unsigned long chunk; | 
|---|
| 6662 | struct sk_buff *skb; | 
|---|
| 6663 | struct page *page; | 
|---|
| 6664 | int nr_frags = 0; | 
|---|
| 6665 |  | 
|---|
| 6666 | *errcode = -EMSGSIZE; | 
|---|
| 6667 | if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order))) | 
|---|
| 6668 | return NULL; | 
|---|
| 6669 |  | 
|---|
| 6670 | *errcode = -ENOBUFS; | 
|---|
| 6671 | skb = alloc_skb(size: header_len, priority: gfp_mask); | 
|---|
| 6672 | if (!skb) | 
|---|
| 6673 | return NULL; | 
|---|
| 6674 |  | 
|---|
| 6675 | while (data_len) { | 
|---|
| 6676 | if (nr_frags == MAX_SKB_FRAGS) | 
|---|
| 6677 | goto failure; | 
|---|
| 6678 | while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order)) | 
|---|
| 6679 | order--; | 
|---|
| 6680 |  | 
|---|
| 6681 | if (order) { | 
|---|
| 6682 | page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | | 
|---|
| 6683 | __GFP_COMP | | 
|---|
| 6684 | __GFP_NOWARN, | 
|---|
| 6685 | order); | 
|---|
| 6686 | if (!page) { | 
|---|
| 6687 | order--; | 
|---|
| 6688 | continue; | 
|---|
| 6689 | } | 
|---|
| 6690 | } else { | 
|---|
| 6691 | page = alloc_page(gfp_mask); | 
|---|
| 6692 | if (!page) | 
|---|
| 6693 | goto failure; | 
|---|
| 6694 | } | 
|---|
| 6695 | chunk = min_t(unsigned long, data_len, | 
|---|
| 6696 | PAGE_SIZE << order); | 
|---|
| 6697 | skb_fill_page_desc(skb, i: nr_frags, page, off: 0, size: chunk); | 
|---|
| 6698 | nr_frags++; | 
|---|
| 6699 | skb->truesize += (PAGE_SIZE << order); | 
|---|
| 6700 | data_len -= chunk; | 
|---|
| 6701 | } | 
|---|
| 6702 | return skb; | 
|---|
| 6703 |  | 
|---|
| 6704 | failure: | 
|---|
| 6705 | kfree_skb(skb); | 
|---|
| 6706 | return NULL; | 
|---|
| 6707 | } | 
|---|
| 6708 | EXPORT_SYMBOL(alloc_skb_with_frags); | 
|---|
| 6709 |  | 
|---|
| 6710 | /* carve out the first off bytes from skb when off < headlen */ | 
|---|
| 6711 | static int (struct sk_buff *skb, const u32 off, | 
|---|
| 6712 | const int headlen, gfp_t gfp_mask) | 
|---|
| 6713 | { | 
|---|
| 6714 | int i; | 
|---|
| 6715 | unsigned int size = skb_end_offset(skb); | 
|---|
| 6716 | int new_hlen = headlen - off; | 
|---|
| 6717 | u8 *data; | 
|---|
| 6718 |  | 
|---|
| 6719 | if (skb_pfmemalloc(skb)) | 
|---|
| 6720 | gfp_mask |= __GFP_MEMALLOC; | 
|---|
| 6721 |  | 
|---|
| 6722 | data = kmalloc_reserve(size: &size, flags: gfp_mask, NUMA_NO_NODE, NULL); | 
|---|
| 6723 | if (!data) | 
|---|
| 6724 | return -ENOMEM; | 
|---|
| 6725 | size = SKB_WITH_OVERHEAD(size); | 
|---|
| 6726 |  | 
|---|
| 6727 | /* Copy real data, and all frags */ | 
|---|
| 6728 | skb_copy_from_linear_data_offset(skb, offset: off, to: data, len: new_hlen); | 
|---|
| 6729 | skb->len -= off; | 
|---|
| 6730 |  | 
|---|
| 6731 | memcpy(to: (struct skb_shared_info *)(data + size), | 
|---|
| 6732 | skb_shinfo(skb), | 
|---|
| 6733 | offsetof(struct skb_shared_info, | 
|---|
| 6734 | frags[skb_shinfo(skb)->nr_frags])); | 
|---|
| 6735 | if (skb_cloned(skb)) { | 
|---|
| 6736 | /* drop the old head gracefully */ | 
|---|
| 6737 | if (skb_orphan_frags(skb, gfp_mask)) { | 
|---|
| 6738 | skb_kfree_head(head: data, end_offset: size); | 
|---|
| 6739 | return -ENOMEM; | 
|---|
| 6740 | } | 
|---|
| 6741 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 
|---|
| 6742 | skb_frag_ref(skb, f: i); | 
|---|
| 6743 | if (skb_has_frag_list(skb)) | 
|---|
| 6744 | skb_clone_fraglist(skb); | 
|---|
| 6745 | skb_release_data(skb, reason: SKB_CONSUMED); | 
|---|
| 6746 | } else { | 
|---|
| 6747 | /* we can reuse existing recount- all we did was | 
|---|
| 6748 | * relocate values | 
|---|
| 6749 | */ | 
|---|
| 6750 | skb_free_head(skb); | 
|---|
| 6751 | } | 
|---|
| 6752 |  | 
|---|
| 6753 | skb->head = data; | 
|---|
| 6754 | skb->data = data; | 
|---|
| 6755 | skb->head_frag = 0; | 
|---|
| 6756 | skb_set_end_offset(skb, offset: size); | 
|---|
| 6757 | skb_set_tail_pointer(skb, offset: skb_headlen(skb)); | 
|---|
| 6758 | skb_headers_offset_update(skb, 0); | 
|---|
| 6759 | skb->cloned = 0; | 
|---|
| 6760 | skb->hdr_len = 0; | 
|---|
| 6761 | skb->nohdr = 0; | 
|---|
| 6762 | atomic_set(v: &skb_shinfo(skb)->dataref, i: 1); | 
|---|
| 6763 |  | 
|---|
| 6764 | return 0; | 
|---|
| 6765 | } | 
|---|
| 6766 |  | 
|---|
| 6767 | static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); | 
|---|
| 6768 |  | 
|---|
| 6769 | /* carve out the first eat bytes from skb's frag_list. May recurse into | 
|---|
| 6770 | * pskb_carve() | 
|---|
| 6771 | */ | 
|---|
| 6772 | static int pskb_carve_frag_list(struct skb_shared_info *shinfo, int eat, | 
|---|
| 6773 | gfp_t gfp_mask) | 
|---|
| 6774 | { | 
|---|
| 6775 | struct sk_buff *list = shinfo->frag_list; | 
|---|
| 6776 | struct sk_buff *clone = NULL; | 
|---|
| 6777 | struct sk_buff *insp = NULL; | 
|---|
| 6778 |  | 
|---|
| 6779 | do { | 
|---|
| 6780 | if (!list) { | 
|---|
| 6781 | pr_err( "Not enough bytes to eat. Want %d\n", eat); | 
|---|
| 6782 | return -EFAULT; | 
|---|
| 6783 | } | 
|---|
| 6784 | if (list->len <= eat) { | 
|---|
| 6785 | /* Eaten as whole. */ | 
|---|
| 6786 | eat -= list->len; | 
|---|
| 6787 | list = list->next; | 
|---|
| 6788 | insp = list; | 
|---|
| 6789 | } else { | 
|---|
| 6790 | /* Eaten partially. */ | 
|---|
| 6791 | if (skb_shared(skb: list)) { | 
|---|
| 6792 | clone = skb_clone(list, gfp_mask); | 
|---|
| 6793 | if (!clone) | 
|---|
| 6794 | return -ENOMEM; | 
|---|
| 6795 | insp = list->next; | 
|---|
| 6796 | list = clone; | 
|---|
| 6797 | } else { | 
|---|
| 6798 | /* This may be pulled without problems. */ | 
|---|
| 6799 | insp = list; | 
|---|
| 6800 | } | 
|---|
| 6801 | if (pskb_carve(skb: list, off: eat, gfp: gfp_mask) < 0) { | 
|---|
| 6802 | kfree_skb(skb: clone); | 
|---|
| 6803 | return -ENOMEM; | 
|---|
| 6804 | } | 
|---|
| 6805 | break; | 
|---|
| 6806 | } | 
|---|
| 6807 | } while (eat); | 
|---|
| 6808 |  | 
|---|
| 6809 | /* Free pulled out fragments. */ | 
|---|
| 6810 | while ((list = shinfo->frag_list) != insp) { | 
|---|
| 6811 | shinfo->frag_list = list->next; | 
|---|
| 6812 | consume_skb(list); | 
|---|
| 6813 | } | 
|---|
| 6814 | /* And insert new clone at head. */ | 
|---|
| 6815 | if (clone) { | 
|---|
| 6816 | clone->next = list; | 
|---|
| 6817 | shinfo->frag_list = clone; | 
|---|
| 6818 | } | 
|---|
| 6819 | return 0; | 
|---|
| 6820 | } | 
|---|
| 6821 |  | 
|---|
| 6822 | /* carve off first len bytes from skb. Split line (off) is in the | 
|---|
| 6823 | * non-linear part of skb | 
|---|
| 6824 | */ | 
|---|
| 6825 | static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, | 
|---|
| 6826 | int pos, gfp_t gfp_mask) | 
|---|
| 6827 | { | 
|---|
| 6828 | int i, k = 0; | 
|---|
| 6829 | unsigned int size = skb_end_offset(skb); | 
|---|
| 6830 | u8 *data; | 
|---|
| 6831 | const int nfrags = skb_shinfo(skb)->nr_frags; | 
|---|
| 6832 | struct skb_shared_info *shinfo; | 
|---|
| 6833 |  | 
|---|
| 6834 | if (skb_pfmemalloc(skb)) | 
|---|
| 6835 | gfp_mask |= __GFP_MEMALLOC; | 
|---|
| 6836 |  | 
|---|
| 6837 | data = kmalloc_reserve(size: &size, flags: gfp_mask, NUMA_NO_NODE, NULL); | 
|---|
| 6838 | if (!data) | 
|---|
| 6839 | return -ENOMEM; | 
|---|
| 6840 | size = SKB_WITH_OVERHEAD(size); | 
|---|
| 6841 |  | 
|---|
| 6842 | memcpy(to: (struct skb_shared_info *)(data + size), | 
|---|
| 6843 | skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); | 
|---|
| 6844 | if (skb_orphan_frags(skb, gfp_mask)) { | 
|---|
| 6845 | skb_kfree_head(head: data, end_offset: size); | 
|---|
| 6846 | return -ENOMEM; | 
|---|
| 6847 | } | 
|---|
| 6848 | shinfo = (struct skb_shared_info *)(data + size); | 
|---|
| 6849 | for (i = 0; i < nfrags; i++) { | 
|---|
| 6850 | int fsize = skb_frag_size(frag: &skb_shinfo(skb)->frags[i]); | 
|---|
| 6851 |  | 
|---|
| 6852 | if (pos + fsize > off) { | 
|---|
| 6853 | shinfo->frags[k] = skb_shinfo(skb)->frags[i]; | 
|---|
| 6854 |  | 
|---|
| 6855 | if (pos < off) { | 
|---|
| 6856 | /* Split frag. | 
|---|
| 6857 | * We have two variants in this case: | 
|---|
| 6858 | * 1. Move all the frag to the second | 
|---|
| 6859 | *    part, if it is possible. F.e. | 
|---|
| 6860 | *    this approach is mandatory for TUX, | 
|---|
| 6861 | *    where splitting is expensive. | 
|---|
| 6862 | * 2. Split is accurately. We make this. | 
|---|
| 6863 | */ | 
|---|
| 6864 | skb_frag_off_add(frag: &shinfo->frags[0], delta: off - pos); | 
|---|
| 6865 | skb_frag_size_sub(frag: &shinfo->frags[0], delta: off - pos); | 
|---|
| 6866 | } | 
|---|
| 6867 | skb_frag_ref(skb, f: i); | 
|---|
| 6868 | k++; | 
|---|
| 6869 | } | 
|---|
| 6870 | pos += fsize; | 
|---|
| 6871 | } | 
|---|
| 6872 | shinfo->nr_frags = k; | 
|---|
| 6873 | if (skb_has_frag_list(skb)) | 
|---|
| 6874 | skb_clone_fraglist(skb); | 
|---|
| 6875 |  | 
|---|
| 6876 | /* split line is in frag list */ | 
|---|
| 6877 | if (k == 0 && pskb_carve_frag_list(shinfo, eat: off - pos, gfp_mask)) { | 
|---|
| 6878 | /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ | 
|---|
| 6879 | if (skb_has_frag_list(skb)) | 
|---|
| 6880 | kfree_skb_list(skb_shinfo(skb)->frag_list); | 
|---|
| 6881 | skb_kfree_head(head: data, end_offset: size); | 
|---|
| 6882 | return -ENOMEM; | 
|---|
| 6883 | } | 
|---|
| 6884 | skb_release_data(skb, reason: SKB_CONSUMED); | 
|---|
| 6885 |  | 
|---|
| 6886 | skb->head = data; | 
|---|
| 6887 | skb->head_frag = 0; | 
|---|
| 6888 | skb->data = data; | 
|---|
| 6889 | skb_set_end_offset(skb, offset: size); | 
|---|
| 6890 | skb_reset_tail_pointer(skb); | 
|---|
| 6891 | skb_headers_offset_update(skb, 0); | 
|---|
| 6892 | skb->cloned   = 0; | 
|---|
| 6893 | skb->hdr_len  = 0; | 
|---|
| 6894 | skb->nohdr    = 0; | 
|---|
| 6895 | skb->len -= off; | 
|---|
| 6896 | skb->data_len = skb->len; | 
|---|
| 6897 | atomic_set(v: &skb_shinfo(skb)->dataref, i: 1); | 
|---|
| 6898 | return 0; | 
|---|
| 6899 | } | 
|---|
| 6900 |  | 
|---|
| 6901 | /* remove len bytes from the beginning of the skb */ | 
|---|
| 6902 | static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) | 
|---|
| 6903 | { | 
|---|
| 6904 | int headlen = skb_headlen(skb); | 
|---|
| 6905 |  | 
|---|
| 6906 | if (len < headlen) | 
|---|
| 6907 | return pskb_carve_inside_header(skb, off: len, headlen, gfp_mask: gfp); | 
|---|
| 6908 | else | 
|---|
| 6909 | return pskb_carve_inside_nonlinear(skb, off: len, pos: headlen, gfp_mask: gfp); | 
|---|
| 6910 | } | 
|---|
| 6911 |  | 
|---|
| 6912 | /* Extract to_copy bytes starting at off from skb, and return this in | 
|---|
| 6913 | * a new skb | 
|---|
| 6914 | */ | 
|---|
| 6915 | struct sk_buff *(struct sk_buff *skb, int off, | 
|---|
| 6916 | int to_copy, gfp_t gfp) | 
|---|
| 6917 | { | 
|---|
| 6918 | struct sk_buff  *clone = skb_clone(skb, gfp); | 
|---|
| 6919 |  | 
|---|
| 6920 | if (!clone) | 
|---|
| 6921 | return NULL; | 
|---|
| 6922 |  | 
|---|
| 6923 | if (pskb_carve(skb: clone, len: off, gfp) < 0 || | 
|---|
| 6924 | pskb_trim(skb: clone, len: to_copy)) { | 
|---|
| 6925 | kfree_skb(skb: clone); | 
|---|
| 6926 | return NULL; | 
|---|
| 6927 | } | 
|---|
| 6928 | return clone; | 
|---|
| 6929 | } | 
|---|
| 6930 | EXPORT_SYMBOL(pskb_extract); | 
|---|
| 6931 |  | 
|---|
| 6932 | /** | 
|---|
| 6933 | * skb_condense - try to get rid of fragments/frag_list if possible | 
|---|
| 6934 | * @skb: buffer | 
|---|
| 6935 | * | 
|---|
| 6936 | * Can be used to save memory before skb is added to a busy queue. | 
|---|
| 6937 | * If packet has bytes in frags and enough tail room in skb->head, | 
|---|
| 6938 | * pull all of them, so that we can free the frags right now and adjust | 
|---|
| 6939 | * truesize. | 
|---|
| 6940 | * Notes: | 
|---|
| 6941 | *	We do not reallocate skb->head thus can not fail. | 
|---|
| 6942 | *	Caller must re-evaluate skb->truesize if needed. | 
|---|
| 6943 | */ | 
|---|
| 6944 | void skb_condense(struct sk_buff *skb) | 
|---|
| 6945 | { | 
|---|
| 6946 | if (skb->data_len) { | 
|---|
| 6947 | if (skb->data_len > skb->end - skb->tail || | 
|---|
| 6948 | skb_cloned(skb) || !skb_frags_readable(skb)) | 
|---|
| 6949 | return; | 
|---|
| 6950 |  | 
|---|
| 6951 | /* Nice, we can free page frag(s) right now */ | 
|---|
| 6952 | __pskb_pull_tail(skb, skb->data_len); | 
|---|
| 6953 | } | 
|---|
| 6954 | /* At this point, skb->truesize might be over estimated, | 
|---|
| 6955 | * because skb had a fragment, and fragments do not tell | 
|---|
| 6956 | * their truesize. | 
|---|
| 6957 | * When we pulled its content into skb->head, fragment | 
|---|
| 6958 | * was freed, but __pskb_pull_tail() could not possibly | 
|---|
| 6959 | * adjust skb->truesize, not knowing the frag truesize. | 
|---|
| 6960 | */ | 
|---|
| 6961 | skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); | 
|---|
| 6962 | } | 
|---|
| 6963 | EXPORT_SYMBOL(skb_condense); | 
|---|
| 6964 |  | 
|---|
| 6965 | #ifdef CONFIG_SKB_EXTENSIONS | 
|---|
| 6966 | static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) | 
|---|
| 6967 | { | 
|---|
| 6968 | return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); | 
|---|
| 6969 | } | 
|---|
| 6970 |  | 
|---|
| 6971 | /** | 
|---|
| 6972 | * __skb_ext_alloc - allocate a new skb extensions storage | 
|---|
| 6973 | * | 
|---|
| 6974 | * @flags: See kmalloc(). | 
|---|
| 6975 | * | 
|---|
| 6976 | * Returns the newly allocated pointer. The pointer can later attached to a | 
|---|
| 6977 | * skb via __skb_ext_set(). | 
|---|
| 6978 | * Note: caller must handle the skb_ext as an opaque data. | 
|---|
| 6979 | */ | 
|---|
| 6980 | struct skb_ext *__skb_ext_alloc(gfp_t flags) | 
|---|
| 6981 | { | 
|---|
| 6982 | struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); | 
|---|
| 6983 |  | 
|---|
| 6984 | if (new) { | 
|---|
| 6985 | memset(s: new->offset, c: 0, n: sizeof(new->offset)); | 
|---|
| 6986 | refcount_set(r: &new->refcnt, n: 1); | 
|---|
| 6987 | } | 
|---|
| 6988 |  | 
|---|
| 6989 | return new; | 
|---|
| 6990 | } | 
|---|
| 6991 |  | 
|---|
| 6992 | static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, | 
|---|
| 6993 | unsigned int old_active) | 
|---|
| 6994 | { | 
|---|
| 6995 | struct skb_ext *new; | 
|---|
| 6996 |  | 
|---|
| 6997 | if (refcount_read(r: &old->refcnt) == 1) | 
|---|
| 6998 | return old; | 
|---|
| 6999 |  | 
|---|
| 7000 | new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); | 
|---|
| 7001 | if (!new) | 
|---|
| 7002 | return NULL; | 
|---|
| 7003 |  | 
|---|
| 7004 | memcpy(to: new, from: old, len: old->chunks * SKB_EXT_ALIGN_VALUE); | 
|---|
| 7005 | refcount_set(r: &new->refcnt, n: 1); | 
|---|
| 7006 |  | 
|---|
| 7007 | #ifdef CONFIG_XFRM | 
|---|
| 7008 | if (old_active & (1 << SKB_EXT_SEC_PATH)) { | 
|---|
| 7009 | struct sec_path *sp = skb_ext_get_ptr(ext: old, id: SKB_EXT_SEC_PATH); | 
|---|
| 7010 | unsigned int i; | 
|---|
| 7011 |  | 
|---|
| 7012 | for (i = 0; i < sp->len; i++) | 
|---|
| 7013 | xfrm_state_hold(x: sp->xvec[i]); | 
|---|
| 7014 | } | 
|---|
| 7015 | #endif | 
|---|
| 7016 | #ifdef CONFIG_MCTP_FLOWS | 
|---|
| 7017 | if (old_active & (1 << SKB_EXT_MCTP)) { | 
|---|
| 7018 | struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP); | 
|---|
| 7019 |  | 
|---|
| 7020 | if (flow->key) | 
|---|
| 7021 | refcount_inc(&flow->key->refs); | 
|---|
| 7022 | } | 
|---|
| 7023 | #endif | 
|---|
| 7024 | __skb_ext_put(ext: old); | 
|---|
| 7025 | return new; | 
|---|
| 7026 | } | 
|---|
| 7027 |  | 
|---|
| 7028 | /** | 
|---|
| 7029 | * __skb_ext_set - attach the specified extension storage to this skb | 
|---|
| 7030 | * @skb: buffer | 
|---|
| 7031 | * @id: extension id | 
|---|
| 7032 | * @ext: extension storage previously allocated via __skb_ext_alloc() | 
|---|
| 7033 | * | 
|---|
| 7034 | * Existing extensions, if any, are cleared. | 
|---|
| 7035 | * | 
|---|
| 7036 | * Returns the pointer to the extension. | 
|---|
| 7037 | */ | 
|---|
| 7038 | void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, | 
|---|
| 7039 | struct skb_ext *ext) | 
|---|
| 7040 | { | 
|---|
| 7041 | unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); | 
|---|
| 7042 |  | 
|---|
| 7043 | skb_ext_put(skb); | 
|---|
| 7044 | newlen = newoff + skb_ext_type_len[id]; | 
|---|
| 7045 | ext->chunks = newlen; | 
|---|
| 7046 | ext->offset[id] = newoff; | 
|---|
| 7047 | skb->extensions = ext; | 
|---|
| 7048 | skb->active_extensions = 1 << id; | 
|---|
| 7049 | return skb_ext_get_ptr(ext, id); | 
|---|
| 7050 | } | 
|---|
| 7051 | EXPORT_SYMBOL_NS_GPL(__skb_ext_set, "NETDEV_INTERNAL"); | 
|---|
| 7052 |  | 
|---|
| 7053 | /** | 
|---|
| 7054 | * skb_ext_add - allocate space for given extension, COW if needed | 
|---|
| 7055 | * @skb: buffer | 
|---|
| 7056 | * @id: extension to allocate space for | 
|---|
| 7057 | * | 
|---|
| 7058 | * Allocates enough space for the given extension. | 
|---|
| 7059 | * If the extension is already present, a pointer to that extension | 
|---|
| 7060 | * is returned. | 
|---|
| 7061 | * | 
|---|
| 7062 | * If the skb was cloned, COW applies and the returned memory can be | 
|---|
| 7063 | * modified without changing the extension space of clones buffers. | 
|---|
| 7064 | * | 
|---|
| 7065 | * Returns pointer to the extension or NULL on allocation failure. | 
|---|
| 7066 | */ | 
|---|
| 7067 | void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) | 
|---|
| 7068 | { | 
|---|
| 7069 | struct skb_ext *new, *old = NULL; | 
|---|
| 7070 | unsigned int newlen, newoff; | 
|---|
| 7071 |  | 
|---|
| 7072 | if (skb->active_extensions) { | 
|---|
| 7073 | old = skb->extensions; | 
|---|
| 7074 |  | 
|---|
| 7075 | new = skb_ext_maybe_cow(old, old_active: skb->active_extensions); | 
|---|
| 7076 | if (!new) | 
|---|
| 7077 | return NULL; | 
|---|
| 7078 |  | 
|---|
| 7079 | if (__skb_ext_exist(ext: new, i: id)) | 
|---|
| 7080 | goto set_active; | 
|---|
| 7081 |  | 
|---|
| 7082 | newoff = new->chunks; | 
|---|
| 7083 | } else { | 
|---|
| 7084 | newoff = SKB_EXT_CHUNKSIZEOF(*new); | 
|---|
| 7085 |  | 
|---|
| 7086 | new = __skb_ext_alloc(GFP_ATOMIC); | 
|---|
| 7087 | if (!new) | 
|---|
| 7088 | return NULL; | 
|---|
| 7089 | } | 
|---|
| 7090 |  | 
|---|
| 7091 | newlen = newoff + skb_ext_type_len[id]; | 
|---|
| 7092 | new->chunks = newlen; | 
|---|
| 7093 | new->offset[id] = newoff; | 
|---|
| 7094 | set_active: | 
|---|
| 7095 | skb->slow_gro = 1; | 
|---|
| 7096 | skb->extensions = new; | 
|---|
| 7097 | skb->active_extensions |= 1 << id; | 
|---|
| 7098 | return skb_ext_get_ptr(ext: new, id); | 
|---|
| 7099 | } | 
|---|
| 7100 | EXPORT_SYMBOL(skb_ext_add); | 
|---|
| 7101 |  | 
|---|
| 7102 | #ifdef CONFIG_XFRM | 
|---|
| 7103 | static void skb_ext_put_sp(struct sec_path *sp) | 
|---|
| 7104 | { | 
|---|
| 7105 | unsigned int i; | 
|---|
| 7106 |  | 
|---|
| 7107 | for (i = 0; i < sp->len; i++) | 
|---|
| 7108 | xfrm_state_put(x: sp->xvec[i]); | 
|---|
| 7109 | } | 
|---|
| 7110 | #endif | 
|---|
| 7111 |  | 
|---|
| 7112 | #ifdef CONFIG_MCTP_FLOWS | 
|---|
| 7113 | static void skb_ext_put_mctp(struct mctp_flow *flow) | 
|---|
| 7114 | { | 
|---|
| 7115 | if (flow->key) | 
|---|
| 7116 | mctp_key_unref(flow->key); | 
|---|
| 7117 | } | 
|---|
| 7118 | #endif | 
|---|
| 7119 |  | 
|---|
| 7120 | void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) | 
|---|
| 7121 | { | 
|---|
| 7122 | struct skb_ext *ext = skb->extensions; | 
|---|
| 7123 |  | 
|---|
| 7124 | skb->active_extensions &= ~(1 << id); | 
|---|
| 7125 | if (skb->active_extensions == 0) { | 
|---|
| 7126 | skb->extensions = NULL; | 
|---|
| 7127 | __skb_ext_put(ext); | 
|---|
| 7128 | #ifdef CONFIG_XFRM | 
|---|
| 7129 | } else if (id == SKB_EXT_SEC_PATH && | 
|---|
| 7130 | refcount_read(r: &ext->refcnt) == 1) { | 
|---|
| 7131 | struct sec_path *sp = skb_ext_get_ptr(ext, id: SKB_EXT_SEC_PATH); | 
|---|
| 7132 |  | 
|---|
| 7133 | skb_ext_put_sp(sp); | 
|---|
| 7134 | sp->len = 0; | 
|---|
| 7135 | #endif | 
|---|
| 7136 | } | 
|---|
| 7137 | } | 
|---|
| 7138 | EXPORT_SYMBOL(__skb_ext_del); | 
|---|
| 7139 |  | 
|---|
| 7140 | void __skb_ext_put(struct skb_ext *ext) | 
|---|
| 7141 | { | 
|---|
| 7142 | /* If this is last clone, nothing can increment | 
|---|
| 7143 | * it after check passes.  Avoids one atomic op. | 
|---|
| 7144 | */ | 
|---|
| 7145 | if (refcount_read(r: &ext->refcnt) == 1) | 
|---|
| 7146 | goto free_now; | 
|---|
| 7147 |  | 
|---|
| 7148 | if (!refcount_dec_and_test(r: &ext->refcnt)) | 
|---|
| 7149 | return; | 
|---|
| 7150 | free_now: | 
|---|
| 7151 | #ifdef CONFIG_XFRM | 
|---|
| 7152 | if (__skb_ext_exist(ext, i: SKB_EXT_SEC_PATH)) | 
|---|
| 7153 | skb_ext_put_sp(sp: skb_ext_get_ptr(ext, id: SKB_EXT_SEC_PATH)); | 
|---|
| 7154 | #endif | 
|---|
| 7155 | #ifdef CONFIG_MCTP_FLOWS | 
|---|
| 7156 | if (__skb_ext_exist(ext, SKB_EXT_MCTP)) | 
|---|
| 7157 | skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP)); | 
|---|
| 7158 | #endif | 
|---|
| 7159 |  | 
|---|
| 7160 | kmem_cache_free(s: skbuff_ext_cache, objp: ext); | 
|---|
| 7161 | } | 
|---|
| 7162 | EXPORT_SYMBOL(__skb_ext_put); | 
|---|
| 7163 | #endif /* CONFIG_SKB_EXTENSIONS */ | 
|---|
| 7164 |  | 
|---|
| 7165 | static void kfree_skb_napi_cache(struct sk_buff *skb) | 
|---|
| 7166 | { | 
|---|
| 7167 | /* if SKB is a clone, don't handle this case */ | 
|---|
| 7168 | if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { | 
|---|
| 7169 | __kfree_skb(skb); | 
|---|
| 7170 | return; | 
|---|
| 7171 | } | 
|---|
| 7172 |  | 
|---|
| 7173 | local_bh_disable(); | 
|---|
| 7174 | __napi_kfree_skb(skb, reason: SKB_CONSUMED); | 
|---|
| 7175 | local_bh_enable(); | 
|---|
| 7176 | } | 
|---|
| 7177 |  | 
|---|
| 7178 | /** | 
|---|
| 7179 | * skb_attempt_defer_free - queue skb for remote freeing | 
|---|
| 7180 | * @skb: buffer | 
|---|
| 7181 | * | 
|---|
| 7182 | * Put @skb in a per-cpu list, using the cpu which | 
|---|
| 7183 | * allocated the skb/pages to reduce false sharing | 
|---|
| 7184 | * and memory zone spinlock contention. | 
|---|
| 7185 | */ | 
|---|
| 7186 | void skb_attempt_defer_free(struct sk_buff *skb) | 
|---|
| 7187 | { | 
|---|
| 7188 | struct skb_defer_node *sdn; | 
|---|
| 7189 | unsigned long defer_count; | 
|---|
| 7190 | int cpu = skb->alloc_cpu; | 
|---|
| 7191 | unsigned int defer_max; | 
|---|
| 7192 | bool kick; | 
|---|
| 7193 |  | 
|---|
| 7194 | if (cpu == raw_smp_processor_id() || | 
|---|
| 7195 | WARN_ON_ONCE(cpu >= nr_cpu_ids) || | 
|---|
| 7196 | !cpu_online(cpu)) { | 
|---|
| 7197 | nodefer:	kfree_skb_napi_cache(skb); | 
|---|
| 7198 | return; | 
|---|
| 7199 | } | 
|---|
| 7200 |  | 
|---|
| 7201 | DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); | 
|---|
| 7202 | DEBUG_NET_WARN_ON_ONCE(skb->destructor); | 
|---|
| 7203 |  | 
|---|
| 7204 | sdn = per_cpu_ptr(net_hotdata.skb_defer_nodes, cpu) + numa_node_id(); | 
|---|
| 7205 |  | 
|---|
| 7206 | defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max); | 
|---|
| 7207 | defer_count = atomic_long_inc_return(v: &sdn->defer_count); | 
|---|
| 7208 |  | 
|---|
| 7209 | if (defer_count >= defer_max) | 
|---|
| 7210 | goto nodefer; | 
|---|
| 7211 |  | 
|---|
| 7212 | llist_add(new: &skb->ll_node, head: &sdn->defer_list); | 
|---|
| 7213 |  | 
|---|
| 7214 | /* Send an IPI every time queue reaches half capacity. */ | 
|---|
| 7215 | kick = (defer_count - 1) == (defer_max >> 1); | 
|---|
| 7216 |  | 
|---|
| 7217 | /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU | 
|---|
| 7218 | * if we are unlucky enough (this seems very unlikely). | 
|---|
| 7219 | */ | 
|---|
| 7220 | if (unlikely(kick)) | 
|---|
| 7221 | kick_defer_list_purge(cpu); | 
|---|
| 7222 | } | 
|---|
| 7223 |  | 
|---|
| 7224 | static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, | 
|---|
| 7225 | size_t offset, size_t len) | 
|---|
| 7226 | { | 
|---|
| 7227 | const char *kaddr; | 
|---|
| 7228 | __wsum csum; | 
|---|
| 7229 |  | 
|---|
| 7230 | kaddr = kmap_local_page(page); | 
|---|
| 7231 | csum = csum_partial(buff: kaddr + offset, len, sum: 0); | 
|---|
| 7232 | kunmap_local(kaddr); | 
|---|
| 7233 | skb->csum = csum_block_add(csum: skb->csum, csum2: csum, offset: skb->len); | 
|---|
| 7234 | } | 
|---|
| 7235 |  | 
|---|
| 7236 | /** | 
|---|
| 7237 | * skb_splice_from_iter - Splice (or copy) pages to skbuff | 
|---|
| 7238 | * @skb: The buffer to add pages to | 
|---|
| 7239 | * @iter: Iterator representing the pages to be added | 
|---|
| 7240 | * @maxsize: Maximum amount of pages to be added | 
|---|
| 7241 | * | 
|---|
| 7242 | * This is a common helper function for supporting MSG_SPLICE_PAGES.  It | 
|---|
| 7243 | * extracts pages from an iterator and adds them to the socket buffer if | 
|---|
| 7244 | * possible, copying them to fragments if not possible (such as if they're slab | 
|---|
| 7245 | * pages). | 
|---|
| 7246 | * | 
|---|
| 7247 | * Returns the amount of data spliced/copied or -EMSGSIZE if there's | 
|---|
| 7248 | * insufficient space in the buffer to transfer anything. | 
|---|
| 7249 | */ | 
|---|
| 7250 | ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, | 
|---|
| 7251 | ssize_t maxsize) | 
|---|
| 7252 | { | 
|---|
| 7253 | size_t frag_limit = READ_ONCE(net_hotdata.sysctl_max_skb_frags); | 
|---|
| 7254 | struct page *pages[8], **ppages = pages; | 
|---|
| 7255 | ssize_t spliced = 0, ret = 0; | 
|---|
| 7256 | unsigned int i; | 
|---|
| 7257 |  | 
|---|
| 7258 | while (iter->count > 0) { | 
|---|
| 7259 | ssize_t space, nr, len; | 
|---|
| 7260 | size_t off; | 
|---|
| 7261 |  | 
|---|
| 7262 | ret = -EMSGSIZE; | 
|---|
| 7263 | space = frag_limit - skb_shinfo(skb)->nr_frags; | 
|---|
| 7264 | if (space < 0) | 
|---|
| 7265 | break; | 
|---|
| 7266 |  | 
|---|
| 7267 | /* We might be able to coalesce without increasing nr_frags */ | 
|---|
| 7268 | nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages)); | 
|---|
| 7269 |  | 
|---|
| 7270 | len = iov_iter_extract_pages(i: iter, pages: &ppages, maxsize, maxpages: nr, extraction_flags: 0, offset0: &off); | 
|---|
| 7271 | if (len <= 0) { | 
|---|
| 7272 | ret = len ?: -EIO; | 
|---|
| 7273 | break; | 
|---|
| 7274 | } | 
|---|
| 7275 |  | 
|---|
| 7276 | i = 0; | 
|---|
| 7277 | do { | 
|---|
| 7278 | struct page *page = pages[i++]; | 
|---|
| 7279 | size_t part = min_t(size_t, PAGE_SIZE - off, len); | 
|---|
| 7280 |  | 
|---|
| 7281 | ret = -EIO; | 
|---|
| 7282 | if (WARN_ON_ONCE(!sendpage_ok(page))) | 
|---|
| 7283 | goto out; | 
|---|
| 7284 |  | 
|---|
| 7285 | ret = skb_append_pagefrags(skb, page, off, part, | 
|---|
| 7286 | frag_limit); | 
|---|
| 7287 | if (ret < 0) { | 
|---|
| 7288 | iov_iter_revert(i: iter, bytes: len); | 
|---|
| 7289 | goto out; | 
|---|
| 7290 | } | 
|---|
| 7291 |  | 
|---|
| 7292 | if (skb->ip_summed == CHECKSUM_NONE) | 
|---|
| 7293 | skb_splice_csum_page(skb, page, offset: off, len: part); | 
|---|
| 7294 |  | 
|---|
| 7295 | off = 0; | 
|---|
| 7296 | spliced += part; | 
|---|
| 7297 | maxsize -= part; | 
|---|
| 7298 | len -= part; | 
|---|
| 7299 | } while (len > 0); | 
|---|
| 7300 |  | 
|---|
| 7301 | if (maxsize <= 0) | 
|---|
| 7302 | break; | 
|---|
| 7303 | } | 
|---|
| 7304 |  | 
|---|
| 7305 | out: | 
|---|
| 7306 | skb_len_add(skb, delta: spliced); | 
|---|
| 7307 | return spliced ?: ret; | 
|---|
| 7308 | } | 
|---|
| 7309 | EXPORT_SYMBOL(skb_splice_from_iter); | 
|---|
| 7310 |  | 
|---|
| 7311 | static __always_inline | 
|---|
| 7312 | size_t memcpy_from_iter_csum(void *iter_from, size_t progress, | 
|---|
| 7313 | size_t len, void *to, void *priv2) | 
|---|
| 7314 | { | 
|---|
| 7315 | __wsum *csum = priv2; | 
|---|
| 7316 | __wsum next = csum_partial_copy_nocheck(src: iter_from, dst: to + progress, len); | 
|---|
| 7317 |  | 
|---|
| 7318 | *csum = csum_block_add(csum: *csum, csum2: next, offset: progress); | 
|---|
| 7319 | return 0; | 
|---|
| 7320 | } | 
|---|
| 7321 |  | 
|---|
| 7322 | static __always_inline | 
|---|
| 7323 | size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress, | 
|---|
| 7324 | size_t len, void *to, void *priv2) | 
|---|
| 7325 | { | 
|---|
| 7326 | __wsum next, *csum = priv2; | 
|---|
| 7327 |  | 
|---|
| 7328 | next = csum_and_copy_from_user(src: iter_from, dst: to + progress, len); | 
|---|
| 7329 | *csum = csum_block_add(csum: *csum, csum2: next, offset: progress); | 
|---|
| 7330 | return next ? 0 : len; | 
|---|
| 7331 | } | 
|---|
| 7332 |  | 
|---|
| 7333 | bool csum_and_copy_from_iter_full(void *addr, size_t bytes, | 
|---|
| 7334 | __wsum *csum, struct iov_iter *i) | 
|---|
| 7335 | { | 
|---|
| 7336 | size_t copied; | 
|---|
| 7337 |  | 
|---|
| 7338 | if (WARN_ON_ONCE(!i->data_source)) | 
|---|
| 7339 | return false; | 
|---|
| 7340 | copied = iterate_and_advance2(iter: i, len: bytes, priv: addr, priv2: csum, | 
|---|
| 7341 | ustep: copy_from_user_iter_csum, | 
|---|
| 7342 | step: memcpy_from_iter_csum); | 
|---|
| 7343 | if (likely(copied == bytes)) | 
|---|
| 7344 | return true; | 
|---|
| 7345 | iov_iter_revert(i, bytes: copied); | 
|---|
| 7346 | return false; | 
|---|
| 7347 | } | 
|---|
| 7348 | EXPORT_SYMBOL(csum_and_copy_from_iter_full); | 
|---|
| 7349 |  | 
|---|
| 7350 | void get_netmem(netmem_ref netmem) | 
|---|
| 7351 | { | 
|---|
| 7352 | struct net_iov *niov; | 
|---|
| 7353 |  | 
|---|
| 7354 | if (netmem_is_net_iov(netmem)) { | 
|---|
| 7355 | niov = netmem_to_net_iov(netmem); | 
|---|
| 7356 | if (net_is_devmem_iov(niov)) | 
|---|
| 7357 | net_devmem_get_net_iov(niov: netmem_to_net_iov(netmem)); | 
|---|
| 7358 | return; | 
|---|
| 7359 | } | 
|---|
| 7360 | get_page(page: netmem_to_page(netmem)); | 
|---|
| 7361 | } | 
|---|
| 7362 | EXPORT_SYMBOL(get_netmem); | 
|---|
| 7363 |  | 
|---|
| 7364 | void put_netmem(netmem_ref netmem) | 
|---|
| 7365 | { | 
|---|
| 7366 | struct net_iov *niov; | 
|---|
| 7367 |  | 
|---|
| 7368 | if (netmem_is_net_iov(netmem)) { | 
|---|
| 7369 | niov = netmem_to_net_iov(netmem); | 
|---|
| 7370 | if (net_is_devmem_iov(niov)) | 
|---|
| 7371 | net_devmem_put_net_iov(niov: netmem_to_net_iov(netmem)); | 
|---|
| 7372 | return; | 
|---|
| 7373 | } | 
|---|
| 7374 |  | 
|---|
| 7375 | put_page(page: netmem_to_page(netmem)); | 
|---|
| 7376 | } | 
|---|
| 7377 | EXPORT_SYMBOL(put_netmem); | 
|---|
| 7378 |  | 
|---|