1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * TCPv4 GSO/GRO support
7 */
8
9#include <linux/indirect_call_wrapper.h>
10#include <linux/skbuff.h>
11#include <net/gro.h>
12#include <net/gso.h>
13#include <net/tcp.h>
14#include <net/protocol.h>
15
16static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb,
17 unsigned int seq, unsigned int mss)
18{
19 u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP;
20 u32 ts_seq = skb_shinfo(gso_skb)->tskey;
21
22 while (skb) {
23 if (before(seq1: ts_seq, seq2: seq + mss)) {
24 skb_shinfo(skb)->tx_flags |= flags;
25 skb_shinfo(skb)->tskey = ts_seq;
26 return;
27 }
28
29 skb = skb->next;
30 seq += mss;
31 }
32}
33
34static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
35 __be32 *oldip, __be32 newip,
36 __be16 *oldport, __be16 newport)
37{
38 struct tcphdr *th;
39 struct iphdr *iph;
40
41 if (*oldip == newip && *oldport == newport)
42 return;
43
44 th = tcp_hdr(skb: seg);
45 iph = ip_hdr(skb: seg);
46
47 inet_proto_csum_replace4(sum: &th->check, skb: seg, from: *oldip, to: newip, pseudohdr: true);
48 inet_proto_csum_replace2(sum: &th->check, skb: seg, from: *oldport, to: newport, pseudohdr: false);
49 *oldport = newport;
50
51 csum_replace4(sum: &iph->check, from: *oldip, to: newip);
52 *oldip = newip;
53}
54
55static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
56{
57 const struct tcphdr *th;
58 const struct iphdr *iph;
59 struct sk_buff *seg;
60 struct tcphdr *th2;
61 struct iphdr *iph2;
62
63 seg = segs;
64 th = tcp_hdr(skb: seg);
65 iph = ip_hdr(skb: seg);
66 th2 = tcp_hdr(skb: seg->next);
67 iph2 = ip_hdr(skb: seg->next);
68
69 if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
70 iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
71 return segs;
72
73 while ((seg = seg->next)) {
74 th2 = tcp_hdr(skb: seg);
75 iph2 = ip_hdr(skb: seg);
76
77 __tcpv4_gso_segment_csum(seg,
78 oldip: &iph2->saddr, newip: iph->saddr,
79 oldport: &th2->source, newport: th->source);
80 __tcpv4_gso_segment_csum(seg,
81 oldip: &iph2->daddr, newip: iph->daddr,
82 oldport: &th2->dest, newport: th->dest);
83 }
84
85 return segs;
86}
87
88static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
89 netdev_features_t features)
90{
91 skb = skb_segment_list(skb, features, offset: skb_mac_header_len(skb));
92 if (IS_ERR(ptr: skb))
93 return skb;
94
95 return __tcpv4_gso_segment_list_csum(segs: skb);
96}
97
98static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
99 netdev_features_t features)
100{
101 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
102 return ERR_PTR(error: -EINVAL);
103
104 if (!pskb_may_pull(skb, len: sizeof(struct tcphdr)))
105 return ERR_PTR(error: -EINVAL);
106
107 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
108 struct tcphdr *th = tcp_hdr(skb);
109
110 if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
111 return __tcp4_gso_segment_list(skb, features);
112
113 skb->ip_summed = CHECKSUM_NONE;
114 }
115
116 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
117 const struct iphdr *iph = ip_hdr(skb);
118 struct tcphdr *th = tcp_hdr(skb);
119
120 /* Set up checksum pseudo header, usually expect stack to
121 * have done this already.
122 */
123
124 th->check = 0;
125 skb->ip_summed = CHECKSUM_PARTIAL;
126 __tcp_v4_send_check(skb, saddr: iph->saddr, daddr: iph->daddr);
127 }
128
129 return tcp_gso_segment(skb, features);
130}
131
132struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
133 netdev_features_t features)
134{
135 struct sk_buff *segs = ERR_PTR(error: -EINVAL);
136 unsigned int sum_truesize = 0;
137 struct tcphdr *th;
138 unsigned int thlen;
139 unsigned int seq;
140 unsigned int oldlen;
141 unsigned int mss;
142 struct sk_buff *gso_skb = skb;
143 __sum16 newcheck;
144 bool ooo_okay, copy_destructor;
145 bool ecn_cwr_mask;
146 __wsum delta;
147
148 th = tcp_hdr(skb);
149 thlen = th->doff * 4;
150 if (thlen < sizeof(*th))
151 goto out;
152
153 if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
154 goto out;
155
156 if (!pskb_may_pull(skb, len: thlen))
157 goto out;
158
159 oldlen = ~skb->len;
160 __skb_pull(skb, len: thlen);
161
162 mss = skb_shinfo(skb)->gso_size;
163 if (unlikely(skb->len <= mss))
164 goto out;
165
166 if (skb_gso_ok(skb, features: features | NETIF_F_GSO_ROBUST)) {
167 /* Packet is from an untrusted source, reset gso_segs. */
168
169 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
170
171 segs = NULL;
172 goto out;
173 }
174
175 copy_destructor = gso_skb->destructor == tcp_wfree;
176 ooo_okay = gso_skb->ooo_okay;
177 /* All segments but the first should have ooo_okay cleared */
178 skb->ooo_okay = 0;
179
180 segs = skb_segment(skb, features);
181 if (IS_ERR(ptr: segs))
182 goto out;
183
184 /* Only first segment might have ooo_okay set */
185 segs->ooo_okay = ooo_okay;
186
187 /* GSO partial and frag_list segmentation only requires splitting
188 * the frame into an MSS multiple and possibly a remainder, both
189 * cases return a GSO skb. So update the mss now.
190 */
191 if (skb_is_gso(skb: segs))
192 mss *= skb_shinfo(segs)->gso_segs;
193
194 delta = (__force __wsum)htonl(oldlen + thlen + mss);
195
196 skb = segs;
197 th = tcp_hdr(skb);
198 seq = ntohl(th->seq);
199
200 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP))
201 tcp_gso_tstamp(skb: segs, gso_skb, seq, mss);
202
203 newcheck = ~csum_fold(sum: csum_add(csum: csum_unfold(n: th->check), addend: delta));
204
205 ecn_cwr_mask = !!(skb_shinfo(gso_skb)->gso_type & SKB_GSO_TCP_ACCECN);
206
207 while (skb->next) {
208 th->fin = th->psh = 0;
209 th->check = newcheck;
210
211 if (skb->ip_summed == CHECKSUM_PARTIAL)
212 gso_reset_checksum(skb, res: ~th->check);
213 else
214 th->check = gso_make_checksum(skb, res: ~th->check);
215
216 seq += mss;
217 if (copy_destructor) {
218 skb->destructor = gso_skb->destructor;
219 skb->sk = gso_skb->sk;
220 sum_truesize += skb->truesize;
221 }
222 skb = skb->next;
223 th = tcp_hdr(skb);
224
225 th->seq = htonl(seq);
226
227 th->cwr &= ecn_cwr_mask;
228 }
229
230 /* Following permits TCP Small Queues to work well with GSO :
231 * The callback to TCP stack will be called at the time last frag
232 * is freed at TX completion, and not right now when gso_skb
233 * is freed by GSO engine
234 */
235 if (copy_destructor) {
236 int delta;
237
238 swap(gso_skb->sk, skb->sk);
239 swap(gso_skb->destructor, skb->destructor);
240 sum_truesize += skb->truesize;
241 delta = sum_truesize - gso_skb->truesize;
242 /* In some pathological cases, delta can be negative.
243 * We need to either use refcount_add() or refcount_sub_and_test()
244 */
245 if (likely(delta >= 0))
246 refcount_add(i: delta, r: &skb->sk->sk_wmem_alloc);
247 else
248 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
249 }
250
251 delta = (__force __wsum)htonl(oldlen +
252 (skb_tail_pointer(skb) -
253 skb_transport_header(skb)) +
254 skb->data_len);
255 th->check = ~csum_fold(sum: csum_add(csum: csum_unfold(n: th->check), addend: delta));
256 if (skb->ip_summed == CHECKSUM_PARTIAL)
257 gso_reset_checksum(skb, res: ~th->check);
258 else
259 th->check = gso_make_checksum(skb, res: ~th->check);
260out:
261 return segs;
262}
263
264struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
265{
266 struct tcphdr *th2;
267 struct sk_buff *p;
268
269 list_for_each_entry(p, head, list) {
270 if (!NAPI_GRO_CB(p)->same_flow)
271 continue;
272
273 th2 = tcp_hdr(skb: p);
274 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
275 NAPI_GRO_CB(p)->same_flow = 0;
276 continue;
277 }
278
279 return p;
280 }
281
282 return NULL;
283}
284
285struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
286{
287 unsigned int thlen, hlen, off;
288 struct tcphdr *th;
289
290 off = skb_gro_offset(skb);
291 hlen = off + sizeof(*th);
292 th = skb_gro_header(skb, hlen, offset: off);
293 if (unlikely(!th))
294 return NULL;
295
296 thlen = th->doff * 4;
297 if (thlen < sizeof(*th))
298 return NULL;
299
300 hlen = off + thlen;
301 if (!skb_gro_may_pull(skb, hlen)) {
302 th = skb_gro_header_slow(skb, hlen, offset: off);
303 if (unlikely(!th))
304 return NULL;
305 }
306
307 skb_gro_pull(skb, len: thlen);
308
309 return th;
310}
311
312struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
313 struct tcphdr *th)
314{
315 unsigned int thlen = th->doff * 4;
316 struct sk_buff *pp = NULL;
317 struct sk_buff *p;
318 struct tcphdr *th2;
319 unsigned int len;
320 __be32 flags;
321 unsigned int mss = 1;
322 int flush = 1;
323 int i;
324
325 len = skb_gro_len(skb);
326 flags = tcp_flag_word(th);
327
328 p = tcp_gro_lookup(head, th);
329 if (!p)
330 goto out_check_final;
331
332 th2 = tcp_hdr(skb: p);
333 flush = (__force int)(flags & TCP_FLAG_CWR);
334 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
335 ~(TCP_FLAG_FIN | TCP_FLAG_PSH));
336 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
337 for (i = sizeof(*th); i < thlen; i += 4)
338 flush |= *(u32 *)((u8 *)th + i) ^
339 *(u32 *)((u8 *)th2 + i);
340
341 flush |= gro_receive_network_flush(th, th2, p);
342
343 mss = skb_shinfo(p)->gso_size;
344
345 /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
346 * If it is a single frame, do not aggregate it if its length
347 * is bigger than our mss.
348 */
349 if (unlikely(skb_is_gso(skb)))
350 flush |= (mss != skb_shinfo(skb)->gso_size);
351 else
352 flush |= (len - 1) >= mss;
353
354 flush |= (ntohl(th2->seq) + skb_gro_len(skb: p)) ^ ntohl(th->seq);
355 flush |= skb_cmp_decrypted(skb1: p, skb2: skb);
356
357 if (unlikely(NAPI_GRO_CB(p)->is_flist)) {
358 flush |= (__force int)(flags ^ tcp_flag_word(th2));
359 flush |= skb->ip_summed != p->ip_summed;
360 flush |= skb->csum_level != p->csum_level;
361 flush |= NAPI_GRO_CB(p)->count >= 64;
362 skb_set_network_header(skb, offset: skb_gro_receive_network_offset(skb));
363
364 if (flush || skb_gro_receive_list(p, skb))
365 mss = 1;
366
367 goto out_check_final;
368 }
369
370 if (flush || skb_gro_receive(p, skb)) {
371 mss = 1;
372 goto out_check_final;
373 }
374
375 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
376
377out_check_final:
378 /* Force a flush if last segment is smaller than mss. */
379 if (unlikely(skb_is_gso(skb)))
380 flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
381 else
382 flush = len < mss;
383
384 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
385 TCP_FLAG_RST | TCP_FLAG_SYN |
386 TCP_FLAG_FIN));
387
388 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
389 pp = p;
390
391 NAPI_GRO_CB(skb)->flush |= (flush != 0);
392
393 return pp;
394}
395
396void tcp_gro_complete(struct sk_buff *skb)
397{
398 struct tcphdr *th = tcp_hdr(skb);
399 struct skb_shared_info *shinfo;
400
401 if (skb->encapsulation)
402 skb->inner_transport_header = skb->transport_header;
403
404 skb->csum_start = (unsigned char *)th - skb->head;
405 skb->csum_offset = offsetof(struct tcphdr, check);
406 skb->ip_summed = CHECKSUM_PARTIAL;
407
408 shinfo = skb_shinfo(skb);
409 shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
410
411 if (th->cwr)
412 shinfo->gso_type |= SKB_GSO_TCP_ACCECN;
413}
414EXPORT_SYMBOL(tcp_gro_complete);
415
416static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
417 struct tcphdr *th)
418{
419 const struct iphdr *iph;
420 struct sk_buff *p;
421 struct sock *sk;
422 struct net *net;
423 int iif, sdif;
424
425 if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
426 return;
427
428 p = tcp_gro_lookup(head, th);
429 if (p) {
430 NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
431 return;
432 }
433
434 inet_get_iif_sdif(skb, iif: &iif, sdif: &sdif);
435 iph = skb_gro_network_header(skb);
436 net = dev_net_rcu(dev: skb->dev);
437 sk = __inet_lookup_established(net, saddr: iph->saddr, sport: th->source,
438 daddr: iph->daddr, ntohs(th->dest),
439 dif: iif, sdif);
440 NAPI_GRO_CB(skb)->is_flist = !sk;
441 if (sk)
442 sock_gen_put(sk);
443}
444
445INDIRECT_CALLABLE_SCOPE
446struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
447{
448 struct tcphdr *th;
449
450 /* Don't bother verifying checksum if we're going to flush anyway. */
451 if (!NAPI_GRO_CB(skb)->flush &&
452 skb_gro_checksum_validate(skb, IPPROTO_TCP,
453 inet_gro_compute_pseudo))
454 goto flush;
455
456 th = tcp_gro_pull_header(skb);
457 if (!th)
458 goto flush;
459
460 tcp4_check_fraglist_gro(head, skb, th);
461
462 return tcp_gro_receive(head, skb, th);
463
464flush:
465 NAPI_GRO_CB(skb)->flush = 1;
466 return NULL;
467}
468
469INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
470{
471 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
472 const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
473 struct tcphdr *th = tcp_hdr(skb);
474
475 if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
476 skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
477 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
478
479 __skb_incr_checksum_unnecessary(skb);
480
481 return 0;
482 }
483
484 th->check = ~tcp_v4_check(len: skb->len - thoff, saddr: iph->saddr,
485 daddr: iph->daddr, base: 0);
486
487 BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID << 1 != SKB_GSO_TCP_FIXEDID_INNER);
488 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
489 (NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID);
490
491 tcp_gro_complete(skb);
492 return 0;
493}
494
495int __init tcpv4_offload_init(void)
496{
497 net_hotdata.tcpv4_offload = (struct net_offload) {
498 .callbacks = {
499 .gso_segment = tcp4_gso_segment,
500 .gro_receive = tcp4_gro_receive,
501 .gro_complete = tcp4_gro_complete,
502 },
503 };
504 return inet_add_offload(prot: &net_hotdata.tcpv4_offload, IPPROTO_TCP);
505}
506