1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20#include <linux/bpf-cgroup.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/socket.h>
24#include <linux/sockios.h>
25#include <linux/net.h>
26#include <linux/in6.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/ipv6.h>
30#include <linux/icmpv6.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#include <linux/skbuff.h>
34#include <linux/slab.h>
35#include <linux/uaccess.h>
36#include <linux/indirect_call_wrapper.h>
37#include <trace/events/udp.h>
38
39#include <net/addrconf.h>
40#include <net/ndisc.h>
41#include <net/protocol.h>
42#include <net/transp_v6.h>
43#include <net/ip6_route.h>
44#include <net/raw.h>
45#include <net/seg6.h>
46#include <net/tcp_states.h>
47#include <net/ip6_checksum.h>
48#include <net/ip6_tunnel.h>
49#include <net/udp_tunnel.h>
50#include <net/xfrm.h>
51#include <net/inet_hashtables.h>
52#include <net/inet6_hashtables.h>
53#include <net/busy_poll.h>
54#include <net/sock_reuseport.h>
55#include <net/gro.h>
56
57#include <linux/proc_fs.h>
58#include <linux/seq_file.h>
59#include <trace/events/skb.h>
60#include "udp_impl.h"
61
62static void udpv6_destruct_sock(struct sock *sk)
63{
64 udp_destruct_common(sk);
65 inet6_sock_destruct(sk);
66}
67
68int udpv6_init_sock(struct sock *sk)
69{
70 int res = udp_lib_init_sock(sk);
71
72 sk->sk_destruct = udpv6_destruct_sock;
73 set_bit(nr: SOCK_SUPPORT_ZC, addr: &sk->sk_socket->flags);
74 return res;
75}
76
77INDIRECT_CALLABLE_SCOPE
78u32 udp6_ehashfn(const struct net *net,
79 const struct in6_addr *laddr,
80 const u16 lport,
81 const struct in6_addr *faddr,
82 const __be16 fport)
83{
84 u32 lhash, fhash;
85
86 net_get_random_once(&udp6_ehash_secret,
87 sizeof(udp6_ehash_secret));
88 net_get_random_once(&udp_ipv6_hash_secret,
89 sizeof(udp_ipv6_hash_secret));
90
91 lhash = (__force u32)laddr->s6_addr32[3];
92 fhash = __ipv6_addr_jhash(a: faddr, udp_ipv6_hash_secret);
93
94 return __inet6_ehashfn(lhash, lport, fhash, fport,
95 udp6_ehash_secret + net_hash_mix(net));
96}
97
98int udp_v6_get_port(struct sock *sk, unsigned short snum)
99{
100 unsigned int hash2_nulladdr =
101 ipv6_portaddr_hash(net: sock_net(sk), addr6: &in6addr_any, port: snum);
102 unsigned int hash2_partial =
103 ipv6_portaddr_hash(net: sock_net(sk), addr6: &sk->sk_v6_rcv_saddr, port: 0);
104
105 /* precompute partial secondary hash */
106 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
107 return udp_lib_get_port(sk, snum, hash2_nulladdr);
108}
109
110void udp_v6_rehash(struct sock *sk)
111{
112 u16 new_hash = ipv6_portaddr_hash(net: sock_net(sk),
113 addr6: &sk->sk_v6_rcv_saddr,
114 inet_sk(sk)->inet_num);
115 u16 new_hash4;
116
117 if (ipv6_addr_v4mapped(a: &sk->sk_v6_rcv_saddr)) {
118 new_hash4 = udp_ehashfn(sock_net(sk),
119 sk->sk_rcv_saddr, sk->sk_num,
120 sk->sk_daddr, sk->sk_dport);
121 } else {
122 new_hash4 = udp6_ehashfn(net: sock_net(sk),
123 laddr: &sk->sk_v6_rcv_saddr, lport: sk->sk_num,
124 faddr: &sk->sk_v6_daddr, fport: sk->sk_dport);
125 }
126
127 udp_lib_rehash(sk, new_hash, new_hash4);
128}
129
130static int compute_score(struct sock *sk, const struct net *net,
131 const struct in6_addr *saddr, __be16 sport,
132 const struct in6_addr *daddr, unsigned short hnum,
133 int dif, int sdif)
134{
135 int bound_dev_if, score;
136 struct inet_sock *inet;
137 bool dev_match;
138
139 if (!net_eq(net1: sock_net(sk), net2: net) ||
140 udp_sk(sk)->udp_port_hash != hnum ||
141 sk->sk_family != PF_INET6)
142 return -1;
143
144 if (!ipv6_addr_equal(a1: &sk->sk_v6_rcv_saddr, a2: daddr))
145 return -1;
146
147 score = 0;
148 inet = inet_sk(sk);
149
150 if (inet->inet_dport) {
151 if (inet->inet_dport != sport)
152 return -1;
153 score++;
154 }
155
156 if (!ipv6_addr_any(a: &sk->sk_v6_daddr)) {
157 if (!ipv6_addr_equal(a1: &sk->sk_v6_daddr, a2: saddr))
158 return -1;
159 score++;
160 }
161
162 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
163 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
164 if (!dev_match)
165 return -1;
166 if (bound_dev_if)
167 score++;
168
169 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
170 score++;
171
172 return score;
173}
174
175/**
176 * udp6_lib_lookup1() - Simplified lookup using primary hash (destination port)
177 * @net: Network namespace
178 * @saddr: Source address, network order
179 * @sport: Source port, network order
180 * @daddr: Destination address, network order
181 * @hnum: Destination port, host order
182 * @dif: Destination interface index
183 * @sdif: Destination bridge port index, if relevant
184 * @udptable: Set of UDP hash tables
185 *
186 * Simplified lookup to be used as fallback if no sockets are found due to a
187 * potential race between (receive) address change, and lookup happening before
188 * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
189 * result sockets, because if we have one, we don't need the fallback at all.
190 *
191 * Called under rcu_read_lock().
192 *
193 * Return: socket with highest matching score if any, NULL if none
194 */
195static struct sock *udp6_lib_lookup1(const struct net *net,
196 const struct in6_addr *saddr, __be16 sport,
197 const struct in6_addr *daddr,
198 unsigned int hnum, int dif, int sdif,
199 const struct udp_table *udptable)
200{
201 unsigned int slot = udp_hashfn(net, num: hnum, mask: udptable->mask);
202 struct udp_hslot *hslot = &udptable->hash[slot];
203 struct sock *sk, *result = NULL;
204 int score, badness = 0;
205
206 sk_for_each_rcu(sk, &hslot->head) {
207 score = compute_score(sk, net,
208 saddr, sport, daddr, hnum, dif, sdif);
209 if (score > badness) {
210 result = sk;
211 badness = score;
212 }
213 }
214
215 return result;
216}
217
218/* called with rcu_read_lock() */
219static struct sock *udp6_lib_lookup2(const struct net *net,
220 const struct in6_addr *saddr, __be16 sport,
221 const struct in6_addr *daddr, unsigned int hnum,
222 int dif, int sdif, struct udp_hslot *hslot2,
223 struct sk_buff *skb)
224{
225 struct sock *sk, *result;
226 int score, badness;
227 bool need_rescore;
228
229 result = NULL;
230 badness = -1;
231 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
232 need_rescore = false;
233rescore:
234 score = compute_score(sk: need_rescore ? result : sk, net, saddr,
235 sport, daddr, hnum, dif, sdif);
236 if (score > badness) {
237 badness = score;
238
239 if (need_rescore)
240 continue;
241
242 if (sk->sk_state == TCP_ESTABLISHED) {
243 result = sk;
244 continue;
245 }
246
247 result = inet6_lookup_reuseport(net, sk, skb, doff: sizeof(struct udphdr),
248 saddr, sport, daddr, hnum, ehashfn: udp6_ehashfn);
249 if (!result) {
250 result = sk;
251 continue;
252 }
253
254 /* Fall back to scoring if group has connections */
255 if (!reuseport_has_conns(sk))
256 return result;
257
258 /* Reuseport logic returned an error, keep original score. */
259 if (IS_ERR(ptr: result))
260 continue;
261
262 /* compute_score is too long of a function to be
263 * inlined, and calling it again here yields
264 * measurable overhead for some
265 * workloads. Work around it by jumping
266 * backwards to rescore 'result'.
267 */
268 need_rescore = true;
269 goto rescore;
270 }
271 }
272 return result;
273}
274
275#if IS_ENABLED(CONFIG_BASE_SMALL)
276static struct sock *udp6_lib_lookup4(const struct net *net,
277 const struct in6_addr *saddr, __be16 sport,
278 const struct in6_addr *daddr,
279 unsigned int hnum, int dif, int sdif,
280 struct udp_table *udptable)
281{
282 return NULL;
283}
284
285static void udp6_hash4(struct sock *sk)
286{
287}
288#else /* !CONFIG_BASE_SMALL */
289static struct sock *udp6_lib_lookup4(const struct net *net,
290 const struct in6_addr *saddr, __be16 sport,
291 const struct in6_addr *daddr,
292 unsigned int hnum, int dif, int sdif,
293 struct udp_table *udptable)
294{
295 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
296 const struct hlist_nulls_node *node;
297 struct udp_hslot *hslot4;
298 unsigned int hash4, slot;
299 struct udp_sock *up;
300 struct sock *sk;
301
302 hash4 = udp6_ehashfn(net, laddr: daddr, lport: hnum, faddr: saddr, fport: sport);
303 slot = hash4 & udptable->mask;
304 hslot4 = &udptable->hash4[slot];
305
306begin:
307 udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) {
308 sk = (struct sock *)up;
309 if (inet6_match(net, sk, saddr, daddr, ports, dif, sdif))
310 return sk;
311 }
312
313 /* if the nulls value we got at the end of this lookup is not the
314 * expected one, we must restart lookup. We probably met an item that
315 * was moved to another chain due to rehash.
316 */
317 if (get_nulls_value(ptr: node) != slot)
318 goto begin;
319
320 return NULL;
321}
322
323static void udp6_hash4(struct sock *sk)
324{
325 struct net *net = sock_net(sk);
326 unsigned int hash;
327
328 if (ipv6_addr_v4mapped(a: &sk->sk_v6_rcv_saddr)) {
329 udp4_hash4(sk);
330 return;
331 }
332
333 if (sk_unhashed(sk) || ipv6_addr_any(a: &sk->sk_v6_rcv_saddr))
334 return;
335
336 hash = udp6_ehashfn(net, laddr: &sk->sk_v6_rcv_saddr, lport: sk->sk_num,
337 faddr: &sk->sk_v6_daddr, fport: sk->sk_dport);
338
339 udp_lib_hash4(sk, hash);
340}
341#endif /* CONFIG_BASE_SMALL */
342
343/* rcu_read_lock() must be held */
344struct sock *__udp6_lib_lookup(const struct net *net,
345 const struct in6_addr *saddr, __be16 sport,
346 const struct in6_addr *daddr, __be16 dport,
347 int dif, int sdif, struct udp_table *udptable,
348 struct sk_buff *skb)
349{
350 unsigned short hnum = ntohs(dport);
351 struct udp_hslot *hslot2;
352 struct sock *result, *sk;
353 unsigned int hash2;
354
355 hash2 = ipv6_portaddr_hash(net, addr6: daddr, port: hnum);
356 hslot2 = udp_hashslot2(table: udptable, hash: hash2);
357
358 if (udp_has_hash4(hslot2)) {
359 result = udp6_lib_lookup4(net, saddr, sport, daddr, hnum,
360 dif, sdif, udptable);
361 if (result) /* udp6_lib_lookup4 return sk or NULL */
362 return result;
363 }
364
365 /* Lookup connected or non-wildcard sockets */
366 result = udp6_lib_lookup2(net, saddr, sport,
367 daddr, hnum, dif, sdif,
368 hslot2, skb);
369 if (!IS_ERR_OR_NULL(ptr: result) && result->sk_state == TCP_ESTABLISHED)
370 goto done;
371
372 /* Lookup redirect from BPF */
373 if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
374 udptable == net->ipv4.udp_table) {
375 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, doff: sizeof(struct udphdr),
376 saddr, sport, daddr, hnum, dif,
377 ehashfn: udp6_ehashfn);
378 if (sk) {
379 result = sk;
380 goto done;
381 }
382 }
383
384 /* Got non-wildcard socket or error on first lookup */
385 if (result)
386 goto done;
387
388 /* Lookup wildcard sockets */
389 hash2 = ipv6_portaddr_hash(net, addr6: &in6addr_any, port: hnum);
390 hslot2 = udp_hashslot2(table: udptable, hash: hash2);
391
392 result = udp6_lib_lookup2(net, saddr, sport,
393 daddr: &in6addr_any, hnum, dif, sdif,
394 hslot2, skb);
395 if (!IS_ERR_OR_NULL(ptr: result))
396 goto done;
397
398 /* Cover address change/lookup/rehash race: see __udp4_lib_lookup() */
399 result = udp6_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
400 udptable);
401
402done:
403 if (IS_ERR(ptr: result))
404 return NULL;
405 return result;
406}
407EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
408
409static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
410 __be16 sport, __be16 dport,
411 struct udp_table *udptable)
412{
413 const struct ipv6hdr *iph = ipv6_hdr(skb);
414
415 return __udp6_lib_lookup(dev_net(dev: skb->dev), &iph->saddr, sport,
416 &iph->daddr, dport, inet6_iif(skb),
417 inet6_sdif(skb), udptable, skb);
418}
419
420struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
421 __be16 sport, __be16 dport)
422{
423 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
424 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
425 struct net *net = dev_net(dev: skb->dev);
426 int iif, sdif;
427
428 inet6_get_iif_sdif(skb, iif: &iif, sdif: &sdif);
429
430 return __udp6_lib_lookup(net, &iph->saddr, sport,
431 &iph->daddr, dport, iif,
432 sdif, net->ipv4.udp_table, NULL);
433}
434
435/* Must be called under rcu_read_lock().
436 * Does increment socket refcount.
437 */
438#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
439struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr, __be16 sport,
440 const struct in6_addr *daddr, __be16 dport, int dif)
441{
442 struct sock *sk;
443
444 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
445 dif, 0, net->ipv4.udp_table, NULL);
446 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
447 sk = NULL;
448 return sk;
449}
450EXPORT_SYMBOL_GPL(udp6_lib_lookup);
451#endif
452
453/* do not use the scratch area len for jumbogram: their length exceeds the
454 * scratch area space; note that the IP6CB flags is still in the first
455 * cacheline, so checking for jumbograms is cheap
456 */
457static int udp6_skb_len(struct sk_buff *skb)
458{
459 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
460}
461
462/*
463 * This should be easy, if there is something there we
464 * return it, otherwise we block.
465 */
466
467int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
468 int flags, int *addr_len)
469{
470 struct ipv6_pinfo *np = inet6_sk(sk: sk);
471 struct inet_sock *inet = inet_sk(sk);
472 struct sk_buff *skb;
473 unsigned int ulen, copied;
474 int off, err, peeking = flags & MSG_PEEK;
475 int is_udplite = IS_UDPLITE(sk);
476 struct udp_mib __percpu *mib;
477 bool checksum_valid = false;
478 int is_udp4;
479
480 if (flags & MSG_ERRQUEUE)
481 return ipv6_recv_error(sk, msg, len, addr_len);
482
483 if (np->rxopt.bits.rxpmtu && READ_ONCE(np->rxpmtu))
484 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
485
486try_again:
487 off = sk_peek_offset(sk, flags);
488 skb = __skb_recv_udp(sk, flags, off: &off, err: &err);
489 if (!skb)
490 return err;
491
492 ulen = udp6_skb_len(skb);
493 copied = len;
494 if (copied > ulen - off)
495 copied = ulen - off;
496 else if (copied < ulen)
497 msg->msg_flags |= MSG_TRUNC;
498
499 is_udp4 = (skb->protocol == htons(ETH_P_IP));
500 mib = __UDPX_MIB(sk, is_udp4);
501
502 /*
503 * If checksum is needed at all, try to do it while copying the
504 * data. If the data is truncated, or if we only want a partial
505 * coverage checksum (UDP-Lite), do it before the copy.
506 */
507
508 if (copied < ulen || peeking ||
509 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
510 checksum_valid = udp_skb_csum_unnecessary(skb) ||
511 !__udp_lib_checksum_complete(skb);
512 if (!checksum_valid)
513 goto csum_copy_err;
514 }
515
516 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
517 if (udp_skb_is_linear(skb))
518 err = copy_linear_skb(skb, len: copied, off, to: &msg->msg_iter);
519 else
520 err = skb_copy_datagram_msg(from: skb, offset: off, msg, size: copied);
521 } else {
522 err = skb_copy_and_csum_datagram_msg(skb, hlen: off, msg);
523 if (err == -EINVAL)
524 goto csum_copy_err;
525 }
526 if (unlikely(err)) {
527 if (!peeking) {
528 udp_drops_inc(sk);
529 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
530 }
531 kfree_skb(skb);
532 return err;
533 }
534 if (!peeking)
535 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
536
537 sock_recv_cmsgs(msg, sk, skb);
538
539 /* Copy the address. */
540 if (msg->msg_name) {
541 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
542 sin6->sin6_family = AF_INET6;
543 sin6->sin6_port = udp_hdr(skb)->source;
544 sin6->sin6_flowinfo = 0;
545
546 if (is_udp4) {
547 ipv6_addr_set_v4mapped(addr: ip_hdr(skb)->saddr,
548 v4mapped: &sin6->sin6_addr);
549 sin6->sin6_scope_id = 0;
550 } else {
551 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
552 sin6->sin6_scope_id =
553 ipv6_iface_scope_id(addr: &sin6->sin6_addr,
554 iface: inet6_iif(skb));
555 }
556 *addr_len = sizeof(*sin6);
557
558 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
559 (struct sockaddr *)sin6,
560 addr_len);
561 }
562
563 if (udp_test_bit(GRO_ENABLED, sk))
564 udp_cmsg_recv(msg, sk, skb);
565
566 if (np->rxopt.all)
567 ip6_datagram_recv_common_ctl(sk, msg, skb);
568
569 if (is_udp4) {
570 if (inet_cmsg_flags(inet))
571 ip_cmsg_recv_offset(msg, sk, skb,
572 tlen: sizeof(struct udphdr), offset: off);
573 } else {
574 if (np->rxopt.all)
575 ip6_datagram_recv_specific_ctl(sk, msg, skb);
576 }
577
578 err = copied;
579 if (flags & MSG_TRUNC)
580 err = ulen;
581
582 skb_consume_udp(sk, skb, len: peeking ? -err : err);
583 return err;
584
585csum_copy_err:
586 if (!__sk_queue_drop_skb(sk, sk_queue: &udp_sk(sk)->reader_queue, skb, flags,
587 destructor: udp_skb_destructor)) {
588 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
589 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
590 }
591 kfree_skb_reason(skb, reason: SKB_DROP_REASON_UDP_CSUM);
592
593 /* starting over for a new packet, but check if we need to yield */
594 cond_resched();
595 msg->msg_flags &= ~MSG_TRUNC;
596 goto try_again;
597}
598
599DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
600void udpv6_encap_enable(void)
601{
602 static_branch_inc(&udpv6_encap_needed_key);
603}
604EXPORT_SYMBOL(udpv6_encap_enable);
605
606/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
607 * through error handlers in encapsulations looking for a match.
608 */
609static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
610 struct inet6_skb_parm *opt,
611 u8 type, u8 code, int offset, __be32 info)
612{
613 int i;
614
615 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
616 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
617 u8 type, u8 code, int offset, __be32 info);
618 const struct ip6_tnl_encap_ops *encap;
619
620 encap = rcu_dereference(ip6tun_encaps[i]);
621 if (!encap)
622 continue;
623 handler = encap->err_handler;
624 if (handler && !handler(skb, opt, type, code, offset, info))
625 return 0;
626 }
627
628 return -ENOENT;
629}
630
631/* Try to match ICMP errors to UDP tunnels by looking up a socket without
632 * reversing source and destination port: this will match tunnels that force the
633 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
634 * lwtunnels might actually break this assumption by being configured with
635 * different destination ports on endpoints, in this case we won't be able to
636 * trace ICMP messages back to them.
637 *
638 * If this doesn't match any socket, probe tunnels with arbitrary destination
639 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
640 * we've sent packets to won't necessarily match the local destination port.
641 *
642 * Then ask the tunnel implementation to match the error against a valid
643 * association.
644 *
645 * Return an error if we can't find a match, the socket if we need further
646 * processing, zero otherwise.
647 */
648static struct sock *__udp6_lib_err_encap(struct net *net,
649 const struct ipv6hdr *hdr, int offset,
650 struct udphdr *uh,
651 struct udp_table *udptable,
652 struct sock *sk,
653 struct sk_buff *skb,
654 struct inet6_skb_parm *opt,
655 u8 type, u8 code, __be32 info)
656{
657 int (*lookup)(struct sock *sk, struct sk_buff *skb);
658 int network_offset, transport_offset;
659 struct udp_sock *up;
660
661 network_offset = skb_network_offset(skb);
662 transport_offset = skb_transport_offset(skb);
663
664 /* Network header needs to point to the outer IPv6 header inside ICMP */
665 skb_reset_network_header(skb);
666
667 /* Transport header needs to point to the UDP header */
668 skb_set_transport_header(skb, offset);
669
670 if (sk) {
671 up = udp_sk(sk);
672
673 lookup = READ_ONCE(up->encap_err_lookup);
674 if (lookup && lookup(sk, skb))
675 sk = NULL;
676
677 goto out;
678 }
679
680 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
681 &hdr->saddr, uh->dest,
682 inet6_iif(skb), 0, udptable, skb);
683 if (sk) {
684 up = udp_sk(sk);
685
686 lookup = READ_ONCE(up->encap_err_lookup);
687 if (!lookup || lookup(sk, skb))
688 sk = NULL;
689 }
690
691out:
692 if (!sk) {
693 sk = ERR_PTR(error: __udp6_lib_err_encap_no_sk(skb, opt, type, code,
694 offset, info));
695 }
696
697 skb_set_transport_header(skb, offset: transport_offset);
698 skb_set_network_header(skb, offset: network_offset);
699
700 return sk;
701}
702
703int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
704 u8 type, u8 code, int offset, __be32 info,
705 struct udp_table *udptable)
706{
707 struct ipv6_pinfo *np;
708 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
709 const struct in6_addr *saddr = &hdr->saddr;
710 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
711 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
712 bool tunnel = false;
713 struct sock *sk;
714 int harderr;
715 int err;
716 struct net *net = dev_net(dev: skb->dev);
717
718 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
719 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
720
721 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
722 /* No socket for error: try tunnels before discarding */
723 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
724 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
725 udptable, sk, skb,
726 opt, type, code, info);
727 if (!sk)
728 return 0;
729 } else
730 sk = ERR_PTR(error: -ENOENT);
731
732 if (IS_ERR(ptr: sk)) {
733 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
734 ICMP6_MIB_INERRORS);
735 return PTR_ERR(ptr: sk);
736 }
737
738 tunnel = true;
739 }
740
741 harderr = icmpv6_err_convert(type, code, err: &err);
742 np = inet6_sk(sk: sk);
743
744 if (type == ICMPV6_PKT_TOOBIG) {
745 if (!ip6_sk_accept_pmtu(sk))
746 goto out;
747 ip6_sk_update_pmtu(skb, sk, mtu: info);
748 if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT)
749 harderr = 1;
750 }
751 if (type == NDISC_REDIRECT) {
752 if (tunnel) {
753 ip6_redirect(skb, net: sock_net(sk), oif: inet6_iif(skb),
754 READ_ONCE(sk->sk_mark),
755 uid: sk_uid(sk));
756 } else {
757 ip6_sk_redirect(skb, sk);
758 }
759 goto out;
760 }
761
762 /* Tunnels don't have an application socket: don't pass errors back */
763 if (tunnel) {
764 if (udp_sk(sk)->encap_err_rcv)
765 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
766 ntohl(info), (u8 *)(uh+1));
767 goto out;
768 }
769
770 if (!inet6_test_bit(RECVERR6, sk)) {
771 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
772 goto out;
773 } else {
774 ipv6_icmp_error(sk, skb, err, port: uh->dest, ntohl(info), payload: (u8 *)(uh+1));
775 }
776
777 sk->sk_err = err;
778 sk_error_report(sk);
779out:
780 return 0;
781}
782
783static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
784{
785 int rc;
786
787 if (!ipv6_addr_any(a: &sk->sk_v6_daddr)) {
788 sock_rps_save_rxhash(sk, skb);
789 sk_mark_napi_id(sk, skb);
790 sk_incoming_cpu_update(sk);
791 } else {
792 sk_mark_napi_id_once(sk, skb);
793 }
794
795 rc = __udp_enqueue_schedule_skb(sk, skb);
796 if (rc < 0) {
797 int is_udplite = IS_UDPLITE(sk);
798 enum skb_drop_reason drop_reason;
799
800 /* Note that an ENOMEM error is charged twice */
801 if (rc == -ENOMEM) {
802 UDP6_INC_STATS(sock_net(sk),
803 UDP_MIB_RCVBUFERRORS, is_udplite);
804 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
805 } else {
806 UDP6_INC_STATS(sock_net(sk),
807 UDP_MIB_MEMERRORS, is_udplite);
808 drop_reason = SKB_DROP_REASON_PROTO_MEM;
809 }
810 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
811 trace_udp_fail_queue_rcv_skb(rc, sk, skb);
812 sk_skb_reason_drop(sk, skb, reason: drop_reason);
813 return -1;
814 }
815
816 return 0;
817}
818
819static __inline__ int udpv6_err(struct sk_buff *skb,
820 struct inet6_skb_parm *opt, u8 type,
821 u8 code, int offset, __be32 info)
822{
823 return __udp6_lib_err(skb, opt, type, code, offset, info,
824 udptable: dev_net(dev: skb->dev)->ipv4.udp_table);
825}
826
827static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
828{
829 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
830 struct udp_sock *up = udp_sk(sk);
831 int is_udplite = IS_UDPLITE(sk);
832
833 if (!xfrm6_policy_check(sk, dir: XFRM_POLICY_IN, skb)) {
834 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
835 goto drop;
836 }
837 nf_reset_ct(skb);
838
839 if (static_branch_unlikely(&udpv6_encap_needed_key) &&
840 READ_ONCE(up->encap_type)) {
841 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
842
843 /*
844 * This is an encapsulation socket so pass the skb to
845 * the socket's udp_encap_rcv() hook. Otherwise, just
846 * fall through and pass this up the UDP socket.
847 * up->encap_rcv() returns the following value:
848 * =0 if skb was successfully passed to the encap
849 * handler or was discarded by it.
850 * >0 if skb should be passed on to UDP.
851 * <0 if skb should be resubmitted as proto -N
852 */
853
854 /* if we're overly short, let UDP handle it */
855 encap_rcv = READ_ONCE(up->encap_rcv);
856 if (encap_rcv) {
857 int ret;
858
859 /* Verify checksum before giving to encap */
860 if (udp_lib_checksum_complete(skb))
861 goto csum_error;
862
863 ret = encap_rcv(sk, skb);
864 if (ret <= 0) {
865 __UDP6_INC_STATS(sock_net(sk),
866 UDP_MIB_INDATAGRAMS,
867 is_udplite);
868 return -ret;
869 }
870 }
871
872 /* FALLTHROUGH -- it's a UDP Packet */
873 }
874
875 /*
876 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
877 */
878 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
879 u16 pcrlen = READ_ONCE(up->pcrlen);
880
881 if (pcrlen == 0) { /* full coverage was set */
882 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
883 UDP_SKB_CB(skb)->cscov, skb->len);
884 goto drop;
885 }
886 if (UDP_SKB_CB(skb)->cscov < pcrlen) {
887 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
888 UDP_SKB_CB(skb)->cscov, pcrlen);
889 goto drop;
890 }
891 }
892
893 prefetch(&sk->sk_rmem_alloc);
894 if (rcu_access_pointer(sk->sk_filter) &&
895 udp_lib_checksum_complete(skb))
896 goto csum_error;
897
898 if (sk_filter_trim_cap(sk, skb, cap: sizeof(struct udphdr), reason: &drop_reason))
899 goto drop;
900
901 udp_csum_pull_header(skb);
902
903 skb_dst_drop(skb);
904
905 return __udpv6_queue_rcv_skb(sk, skb);
906
907csum_error:
908 drop_reason = SKB_DROP_REASON_UDP_CSUM;
909 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
910drop:
911 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
912 udp_drops_inc(sk);
913 sk_skb_reason_drop(sk, skb, reason: drop_reason);
914 return -1;
915}
916
917static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
918{
919 struct sk_buff *next, *segs;
920 int ret;
921
922 if (likely(!udp_unexpected_gso(sk, skb)))
923 return udpv6_queue_rcv_one_skb(sk, skb);
924
925 __skb_push(skb, len: -skb_mac_offset(skb));
926 segs = udp_rcv_segment(sk, skb, ipv4: false);
927 skb_list_walk_safe(segs, skb, next) {
928 __skb_pull(skb, len: skb_transport_offset(skb));
929
930 udp_post_segment_fix_csum(skb);
931 ret = udpv6_queue_rcv_one_skb(sk, skb);
932 if (ret > 0)
933 ip6_protocol_deliver_rcu(net: dev_net(dev: skb->dev), skb, nexthdr: ret,
934 have_final: true);
935 }
936 return 0;
937}
938
939static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
940 __be16 loc_port, const struct in6_addr *loc_addr,
941 __be16 rmt_port, const struct in6_addr *rmt_addr,
942 int dif, int sdif, unsigned short hnum)
943{
944 const struct inet_sock *inet = inet_sk(sk);
945
946 if (!net_eq(net1: sock_net(sk), net2: net))
947 return false;
948
949 if (udp_sk(sk)->udp_port_hash != hnum ||
950 sk->sk_family != PF_INET6 ||
951 (inet->inet_dport && inet->inet_dport != rmt_port) ||
952 (!ipv6_addr_any(a: &sk->sk_v6_daddr) &&
953 !ipv6_addr_equal(a1: &sk->sk_v6_daddr, a2: rmt_addr)) ||
954 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
955 (!ipv6_addr_any(a: &sk->sk_v6_rcv_saddr) &&
956 !ipv6_addr_equal(a1: &sk->sk_v6_rcv_saddr, a2: loc_addr)))
957 return false;
958 if (!inet6_mc_check(sk, mc_addr: loc_addr, src_addr: rmt_addr))
959 return false;
960 return true;
961}
962
963static void udp6_csum_zero_error(struct sk_buff *skb)
964{
965 /* RFC 2460 section 8.1 says that we SHOULD log
966 * this error. Well, it is reasonable.
967 */
968 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
969 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
970 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
971}
972
973/*
974 * Note: called only from the BH handler context,
975 * so we don't need to lock the hashes.
976 */
977static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
978 const struct in6_addr *saddr, const struct in6_addr *daddr,
979 struct udp_table *udptable, int proto)
980{
981 struct sock *sk, *first = NULL;
982 const struct udphdr *uh = udp_hdr(skb);
983 unsigned short hnum = ntohs(uh->dest);
984 struct udp_hslot *hslot = udp_hashslot(table: udptable, net, num: hnum);
985 unsigned int offset = offsetof(typeof(*sk), sk_node);
986 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
987 int dif = inet6_iif(skb);
988 int sdif = inet6_sdif(skb);
989 struct hlist_node *node;
990 struct sk_buff *nskb;
991
992 if (use_hash2) {
993 hash2_any = ipv6_portaddr_hash(net, addr6: &in6addr_any, port: hnum) &
994 udptable->mask;
995 hash2 = ipv6_portaddr_hash(net, addr6: daddr, port: hnum) & udptable->mask;
996start_lookup:
997 hslot = &udptable->hash2[hash2].hslot;
998 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
999 }
1000
1001 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
1002 if (!__udp_v6_is_mcast_sock(net, sk, loc_port: uh->dest, loc_addr: daddr,
1003 rmt_port: uh->source, rmt_addr: saddr, dif, sdif,
1004 hnum))
1005 continue;
1006 /* If zero checksum and no_check is not on for
1007 * the socket then skip it.
1008 */
1009 if (!uh->check && !udp_get_no_check6_rx(sk))
1010 continue;
1011 if (!first) {
1012 first = sk;
1013 continue;
1014 }
1015 nskb = skb_clone(skb, GFP_ATOMIC);
1016 if (unlikely(!nskb)) {
1017 udp_drops_inc(sk);
1018 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
1019 IS_UDPLITE(sk));
1020 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
1021 IS_UDPLITE(sk));
1022 continue;
1023 }
1024
1025 if (udpv6_queue_rcv_skb(sk, skb: nskb) > 0)
1026 consume_skb(skb: nskb);
1027 }
1028
1029 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
1030 if (use_hash2 && hash2 != hash2_any) {
1031 hash2 = hash2_any;
1032 goto start_lookup;
1033 }
1034
1035 if (first) {
1036 if (udpv6_queue_rcv_skb(sk: first, skb) > 0)
1037 consume_skb(skb);
1038 } else {
1039 kfree_skb(skb);
1040 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
1041 proto == IPPROTO_UDPLITE);
1042 }
1043 return 0;
1044}
1045
1046static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1047{
1048 if (udp_sk_rx_dst_set(sk, dst))
1049 sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
1050}
1051
1052/* wrapper for udp_queue_rcv_skb taking care of csum conversion and
1053 * return code conversion for ip layer consumption
1054 */
1055static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
1056 struct udphdr *uh)
1057{
1058 int ret;
1059
1060 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
1061 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
1062
1063 ret = udpv6_queue_rcv_skb(sk, skb);
1064
1065 /* a return value > 0 means to resubmit the input */
1066 if (ret > 0)
1067 return ret;
1068 return 0;
1069}
1070
1071int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1072 int proto)
1073{
1074 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
1075 const struct in6_addr *saddr, *daddr;
1076 struct net *net = dev_net(dev: skb->dev);
1077 struct sock *sk = NULL;
1078 struct udphdr *uh;
1079 bool refcounted;
1080 u32 ulen = 0;
1081
1082 if (!pskb_may_pull(skb, len: sizeof(struct udphdr)))
1083 goto discard;
1084
1085 saddr = &ipv6_hdr(skb)->saddr;
1086 daddr = &ipv6_hdr(skb)->daddr;
1087 uh = udp_hdr(skb);
1088
1089 ulen = ntohs(uh->len);
1090 if (ulen > skb->len)
1091 goto short_packet;
1092
1093 if (proto == IPPROTO_UDP) {
1094 /* UDP validates ulen. */
1095
1096 /* Check for jumbo payload */
1097 if (ulen == 0)
1098 ulen = skb->len;
1099
1100 if (ulen < sizeof(*uh))
1101 goto short_packet;
1102
1103 if (ulen < skb->len) {
1104 if (pskb_trim_rcsum(skb, len: ulen))
1105 goto short_packet;
1106 saddr = &ipv6_hdr(skb)->saddr;
1107 daddr = &ipv6_hdr(skb)->daddr;
1108 uh = udp_hdr(skb);
1109 }
1110 }
1111
1112 if (udp6_csum_init(skb, uh, proto))
1113 goto csum_error;
1114
1115 /* Check if the socket is already available, e.g. due to early demux */
1116 sk = inet6_steal_sock(net, skb, doff: sizeof(struct udphdr), saddr, sport: uh->source, daddr, dport: uh->dest,
1117 refcounted: &refcounted, ehashfn: udp6_ehashfn);
1118 if (IS_ERR(ptr: sk))
1119 goto no_sk;
1120
1121 if (sk) {
1122 struct dst_entry *dst = skb_dst(skb);
1123 int ret;
1124
1125 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
1126 udp6_sk_rx_dst_set(sk, dst);
1127
1128 if (!uh->check && !udp_get_no_check6_rx(sk)) {
1129 if (refcounted)
1130 sock_put(sk);
1131 goto report_csum_error;
1132 }
1133
1134 ret = udp6_unicast_rcv_skb(sk, skb, uh);
1135 if (refcounted)
1136 sock_put(sk);
1137 return ret;
1138 }
1139
1140 /*
1141 * Multicast receive code
1142 */
1143 if (ipv6_addr_is_multicast(addr: daddr))
1144 return __udp6_lib_mcast_deliver(net, skb,
1145 saddr, daddr, udptable, proto);
1146
1147 /* Unicast */
1148 sk = __udp6_lib_lookup_skb(skb, sport: uh->source, dport: uh->dest, udptable);
1149 if (sk) {
1150 if (!uh->check && !udp_get_no_check6_rx(sk))
1151 goto report_csum_error;
1152 return udp6_unicast_rcv_skb(sk, skb, uh);
1153 }
1154no_sk:
1155 reason = SKB_DROP_REASON_NO_SOCKET;
1156
1157 if (!uh->check)
1158 goto report_csum_error;
1159
1160 if (!xfrm6_policy_check(NULL, dir: XFRM_POLICY_IN, skb))
1161 goto discard;
1162 nf_reset_ct(skb);
1163
1164 if (udp_lib_checksum_complete(skb))
1165 goto csum_error;
1166
1167 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1168 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, info: 0);
1169
1170 sk_skb_reason_drop(sk, skb, reason);
1171 return 0;
1172
1173short_packet:
1174 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1175 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1176 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1177 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1178 saddr, ntohs(uh->source),
1179 ulen, skb->len,
1180 daddr, ntohs(uh->dest));
1181 goto discard;
1182
1183report_csum_error:
1184 udp6_csum_zero_error(skb);
1185csum_error:
1186 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1187 reason = SKB_DROP_REASON_UDP_CSUM;
1188 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1189discard:
1190 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1191 sk_skb_reason_drop(sk, skb, reason);
1192 return 0;
1193}
1194
1195
1196static struct sock *__udp6_lib_demux_lookup(struct net *net,
1197 __be16 loc_port, const struct in6_addr *loc_addr,
1198 __be16 rmt_port, const struct in6_addr *rmt_addr,
1199 int dif, int sdif)
1200{
1201 struct udp_table *udptable = net->ipv4.udp_table;
1202 unsigned short hnum = ntohs(loc_port);
1203 struct udp_hslot *hslot2;
1204 unsigned int hash2;
1205 __portpair ports;
1206 struct sock *sk;
1207
1208 hash2 = ipv6_portaddr_hash(net, addr6: loc_addr, port: hnum);
1209 hslot2 = udp_hashslot2(table: udptable, hash: hash2);
1210 ports = INET_COMBINED_PORTS(rmt_port, hnum);
1211
1212 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1213 if (sk->sk_state == TCP_ESTABLISHED &&
1214 inet6_match(net, sk, saddr: rmt_addr, daddr: loc_addr, ports, dif, sdif))
1215 return sk;
1216 /* Only check first socket in chain */
1217 break;
1218 }
1219 return NULL;
1220}
1221
1222void udp_v6_early_demux(struct sk_buff *skb)
1223{
1224 struct net *net = dev_net(dev: skb->dev);
1225 const struct udphdr *uh;
1226 struct sock *sk;
1227 struct dst_entry *dst;
1228 int dif = skb->dev->ifindex;
1229 int sdif = inet6_sdif(skb);
1230
1231 if (!pskb_may_pull(skb, len: skb_transport_offset(skb) +
1232 sizeof(struct udphdr)))
1233 return;
1234
1235 uh = udp_hdr(skb);
1236
1237 if (skb->pkt_type == PACKET_HOST)
1238 sk = __udp6_lib_demux_lookup(net, loc_port: uh->dest,
1239 loc_addr: &ipv6_hdr(skb)->daddr,
1240 rmt_port: uh->source, rmt_addr: &ipv6_hdr(skb)->saddr,
1241 dif, sdif);
1242 else
1243 return;
1244
1245 if (!sk)
1246 return;
1247
1248 skb->sk = sk;
1249 DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
1250 skb->destructor = sock_pfree;
1251 dst = rcu_dereference(sk->sk_rx_dst);
1252
1253 if (dst)
1254 dst = dst_check(dst, cookie: sk->sk_rx_dst_cookie);
1255 if (dst) {
1256 /* set noref for now.
1257 * any place which wants to hold dst has to call
1258 * dst_hold_safe()
1259 */
1260 skb_dst_set_noref(skb, dst);
1261 }
1262}
1263
1264INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1265{
1266 return __udp6_lib_rcv(skb, udptable: dev_net(dev: skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1267}
1268
1269/*
1270 * Throw away all pending data and cancel the corking. Socket is locked.
1271 */
1272static void udp_v6_flush_pending_frames(struct sock *sk)
1273{
1274 struct udp_sock *up = udp_sk(sk);
1275
1276 if (up->pending == AF_INET)
1277 udp_flush_pending_frames(sk);
1278 else if (up->pending) {
1279 up->len = 0;
1280 WRITE_ONCE(up->pending, 0);
1281 ip6_flush_pending_frames(sk);
1282 }
1283}
1284
1285static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1286 int addr_len)
1287{
1288 if (addr_len < offsetofend(struct sockaddr, sa_family))
1289 return -EINVAL;
1290 /* The following checks are replicated from __ip6_datagram_connect()
1291 * and intended to prevent BPF program called below from accessing
1292 * bytes that are out of the bound specified by user in addr_len.
1293 */
1294 if (uaddr->sa_family == AF_INET) {
1295 if (ipv6_only_sock(sk))
1296 return -EAFNOSUPPORT;
1297 return udp_pre_connect(sk, uaddr, addr_len);
1298 }
1299
1300 if (addr_len < SIN6_LEN_RFC2133)
1301 return -EINVAL;
1302
1303 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1304}
1305
1306static int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1307{
1308 int res;
1309
1310 lock_sock(sk);
1311 res = __ip6_datagram_connect(sk, addr: uaddr, addr_len);
1312 if (!res)
1313 udp6_hash4(sk);
1314 release_sock(sk);
1315 return res;
1316}
1317
1318/**
1319 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1320 * @sk: socket we are sending on
1321 * @skb: sk_buff containing the filled-in UDP header
1322 * (checksum field must be zeroed out)
1323 * @saddr: source address
1324 * @daddr: destination address
1325 * @len: length of packet
1326 */
1327static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1328 const struct in6_addr *saddr,
1329 const struct in6_addr *daddr, int len)
1330{
1331 unsigned int offset;
1332 struct udphdr *uh = udp_hdr(skb);
1333 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1334 __wsum csum = 0;
1335
1336 if (!frags) {
1337 /* Only one fragment on the socket. */
1338 skb->csum_start = skb_transport_header(skb) - skb->head;
1339 skb->csum_offset = offsetof(struct udphdr, check);
1340 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, sum: 0);
1341 } else {
1342 /*
1343 * HW-checksum won't work as there are two or more
1344 * fragments on the socket so that all csums of sk_buffs
1345 * should be together
1346 */
1347 offset = skb_transport_offset(skb);
1348 skb->csum = skb_checksum(skb, offset, len: skb->len - offset, csum: 0);
1349 csum = skb->csum;
1350
1351 skb->ip_summed = CHECKSUM_NONE;
1352
1353 do {
1354 csum = csum_add(csum, addend: frags->csum);
1355 } while ((frags = frags->next));
1356
1357 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1358 sum: csum);
1359 if (uh->check == 0)
1360 uh->check = CSUM_MANGLED_0;
1361 }
1362}
1363
1364/*
1365 * Sending
1366 */
1367
1368static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1369 struct inet_cork *cork)
1370{
1371 struct sock *sk = skb->sk;
1372 struct udphdr *uh;
1373 int err = 0;
1374 int is_udplite = IS_UDPLITE(sk);
1375 __wsum csum = 0;
1376 int offset = skb_transport_offset(skb);
1377 int len = skb->len - offset;
1378 int datalen = len - sizeof(*uh);
1379
1380 /*
1381 * Create a UDP header
1382 */
1383 uh = udp_hdr(skb);
1384 uh->source = fl6->fl6_sport;
1385 uh->dest = fl6->fl6_dport;
1386 uh->len = htons(len);
1387 uh->check = 0;
1388
1389 if (cork->gso_size) {
1390 const int hlen = skb_network_header_len(skb) +
1391 sizeof(struct udphdr);
1392
1393 if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
1394 kfree_skb(skb);
1395 return -EMSGSIZE;
1396 }
1397 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1398 kfree_skb(skb);
1399 return -EINVAL;
1400 }
1401 if (udp_get_no_check6_tx(sk)) {
1402 kfree_skb(skb);
1403 return -EINVAL;
1404 }
1405 if (is_udplite || dst_xfrm(dst: skb_dst(skb))) {
1406 kfree_skb(skb);
1407 return -EIO;
1408 }
1409
1410 if (datalen > cork->gso_size) {
1411 skb_shinfo(skb)->gso_size = cork->gso_size;
1412 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1413 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1414 cork->gso_size);
1415
1416 /* Don't checksum the payload, skb will get segmented */
1417 goto csum_partial;
1418 }
1419 }
1420
1421 if (is_udplite)
1422 csum = udplite_csum(skb);
1423 else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
1424 skb->ip_summed = CHECKSUM_NONE;
1425 goto send;
1426 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1427csum_partial:
1428 udp6_hwcsum_outgoing(sk, skb, saddr: &fl6->saddr, daddr: &fl6->daddr, len);
1429 goto send;
1430 } else
1431 csum = udp_csum(skb);
1432
1433 /* add protocol-dependent pseudo-header */
1434 uh->check = csum_ipv6_magic(saddr: &fl6->saddr, daddr: &fl6->daddr,
1435 len, proto: fl6->flowi6_proto, sum: csum);
1436 if (uh->check == 0)
1437 uh->check = CSUM_MANGLED_0;
1438
1439send:
1440 err = ip6_send_skb(skb);
1441 if (err) {
1442 if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
1443 UDP6_INC_STATS(sock_net(sk),
1444 UDP_MIB_SNDBUFERRORS, is_udplite);
1445 err = 0;
1446 }
1447 } else {
1448 UDP6_INC_STATS(sock_net(sk),
1449 UDP_MIB_OUTDATAGRAMS, is_udplite);
1450 }
1451 return err;
1452}
1453
1454static int udp_v6_push_pending_frames(struct sock *sk)
1455{
1456 struct sk_buff *skb;
1457 struct udp_sock *up = udp_sk(sk);
1458 int err = 0;
1459
1460 if (up->pending == AF_INET)
1461 return udp_push_pending_frames(sk);
1462
1463 skb = ip6_finish_skb(sk);
1464 if (!skb)
1465 goto out;
1466
1467 err = udp_v6_send_skb(skb, fl6: &inet_sk(sk)->cork.fl.u.ip6,
1468 cork: &inet_sk(sk)->cork.base);
1469out:
1470 up->len = 0;
1471 WRITE_ONCE(up->pending, 0);
1472 return err;
1473}
1474
1475int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1476{
1477 struct ipv6_txoptions opt_space;
1478 struct udp_sock *up = udp_sk(sk);
1479 struct inet_sock *inet = inet_sk(sk);
1480 struct ipv6_pinfo *np = inet6_sk(sk: sk);
1481 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1482 struct in6_addr *daddr, *final_p, final;
1483 struct ipv6_txoptions *opt = NULL;
1484 struct ipv6_txoptions *opt_to_free = NULL;
1485 struct ip6_flowlabel *flowlabel = NULL;
1486 struct inet_cork_full cork;
1487 struct flowi6 *fl6 = &cork.fl.u.ip6;
1488 struct dst_entry *dst;
1489 struct ipcm6_cookie ipc6;
1490 int addr_len = msg->msg_namelen;
1491 bool connected = false;
1492 int ulen = len;
1493 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1494 int err;
1495 int is_udplite = IS_UDPLITE(sk);
1496 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1497
1498 ipcm6_init_sk(ipc6: &ipc6, sk);
1499 ipc6.gso_size = READ_ONCE(up->gso_size);
1500
1501 /* destination address check */
1502 if (sin6) {
1503 if (addr_len < offsetof(struct sockaddr, sa_data))
1504 return -EINVAL;
1505
1506 switch (sin6->sin6_family) {
1507 case AF_INET6:
1508 if (addr_len < SIN6_LEN_RFC2133)
1509 return -EINVAL;
1510 daddr = &sin6->sin6_addr;
1511 if (ipv6_addr_any(a: daddr) &&
1512 ipv6_addr_v4mapped(a: &np->saddr))
1513 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1514 v4mapped: daddr);
1515 break;
1516 case AF_INET:
1517 goto do_udp_sendmsg;
1518 case AF_UNSPEC:
1519 msg->msg_name = sin6 = NULL;
1520 msg->msg_namelen = addr_len = 0;
1521 daddr = NULL;
1522 break;
1523 default:
1524 return -EINVAL;
1525 }
1526 } else if (!READ_ONCE(up->pending)) {
1527 if (sk->sk_state != TCP_ESTABLISHED)
1528 return -EDESTADDRREQ;
1529 daddr = &sk->sk_v6_daddr;
1530 } else
1531 daddr = NULL;
1532
1533 if (daddr) {
1534 if (ipv6_addr_v4mapped(a: daddr)) {
1535 struct sockaddr_in sin;
1536 sin.sin_family = AF_INET;
1537 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1538 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1539 msg->msg_name = &sin;
1540 msg->msg_namelen = sizeof(sin);
1541do_udp_sendmsg:
1542 err = ipv6_only_sock(sk) ?
1543 -ENETUNREACH : udp_sendmsg(sk, msg, len);
1544 msg->msg_name = sin6;
1545 msg->msg_namelen = addr_len;
1546 return err;
1547 }
1548 }
1549
1550 /* Rough check on arithmetic overflow,
1551 better check is made in ip6_append_data().
1552 */
1553 if (len > INT_MAX - sizeof(struct udphdr))
1554 return -EMSGSIZE;
1555
1556 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1557 if (READ_ONCE(up->pending)) {
1558 if (READ_ONCE(up->pending) == AF_INET)
1559 return udp_sendmsg(sk, msg, len);
1560 /*
1561 * There are pending frames.
1562 * The socket lock must be held while it's corked.
1563 */
1564 lock_sock(sk);
1565 if (likely(up->pending)) {
1566 if (unlikely(up->pending != AF_INET6)) {
1567 release_sock(sk);
1568 return -EAFNOSUPPORT;
1569 }
1570 dst = NULL;
1571 goto do_append_data;
1572 }
1573 release_sock(sk);
1574 }
1575 ulen += sizeof(struct udphdr);
1576
1577 memset(s: fl6, c: 0, n: sizeof(*fl6));
1578
1579 if (sin6) {
1580 if (sin6->sin6_port == 0)
1581 return -EINVAL;
1582
1583 fl6->fl6_dport = sin6->sin6_port;
1584 daddr = &sin6->sin6_addr;
1585
1586 if (inet6_test_bit(SNDFLOW, sk)) {
1587 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1588 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1589 flowlabel = fl6_sock_lookup(sk, label: fl6->flowlabel);
1590 if (IS_ERR(ptr: flowlabel))
1591 return -EINVAL;
1592 }
1593 }
1594
1595 /*
1596 * Otherwise it will be difficult to maintain
1597 * sk->sk_dst_cache.
1598 */
1599 if (sk->sk_state == TCP_ESTABLISHED &&
1600 ipv6_addr_equal(a1: daddr, a2: &sk->sk_v6_daddr))
1601 daddr = &sk->sk_v6_daddr;
1602
1603 if (addr_len >= sizeof(struct sockaddr_in6) &&
1604 sin6->sin6_scope_id &&
1605 __ipv6_addr_needs_scope_id(type: __ipv6_addr_type(addr: daddr)))
1606 fl6->flowi6_oif = sin6->sin6_scope_id;
1607 } else {
1608 if (sk->sk_state != TCP_ESTABLISHED)
1609 return -EDESTADDRREQ;
1610
1611 fl6->fl6_dport = inet->inet_dport;
1612 daddr = &sk->sk_v6_daddr;
1613 fl6->flowlabel = np->flow_label;
1614 connected = true;
1615 }
1616
1617 if (!fl6->flowi6_oif)
1618 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1619
1620 if (!fl6->flowi6_oif)
1621 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1622
1623 fl6->flowi6_uid = sk_uid(sk);
1624
1625 if (msg->msg_controllen) {
1626 opt = &opt_space;
1627 memset(s: opt, c: 0, n: sizeof(struct ipv6_txoptions));
1628 opt->tot_len = sizeof(*opt);
1629 ipc6.opt = opt;
1630
1631 err = udp_cmsg_send(sk, msg, gso_size: &ipc6.gso_size);
1632 if (err > 0) {
1633 err = ip6_datagram_send_ctl(net: sock_net(sk), sk, msg, fl6,
1634 ipc6: &ipc6);
1635 connected = false;
1636 }
1637 if (err < 0) {
1638 fl6_sock_release(fl: flowlabel);
1639 return err;
1640 }
1641 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1642 flowlabel = fl6_sock_lookup(sk, label: fl6->flowlabel);
1643 if (IS_ERR(ptr: flowlabel))
1644 return -EINVAL;
1645 }
1646 if (!(opt->opt_nflen|opt->opt_flen))
1647 opt = NULL;
1648 }
1649 if (!opt) {
1650 opt = txopt_get(np);
1651 opt_to_free = opt;
1652 }
1653 if (flowlabel)
1654 opt = fl6_merge_options(opt_space: &opt_space, fl: flowlabel, fopt: opt);
1655 opt = ipv6_fixup_options(opt_space: &opt_space, opt);
1656 ipc6.opt = opt;
1657
1658 fl6->flowi6_proto = sk->sk_protocol;
1659 fl6->flowi6_mark = ipc6.sockc.mark;
1660 fl6->daddr = *daddr;
1661 if (ipv6_addr_any(a: &fl6->saddr) && !ipv6_addr_any(a: &np->saddr))
1662 fl6->saddr = np->saddr;
1663 fl6->fl6_sport = inet->inet_sport;
1664
1665 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1666 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1667 (struct sockaddr *)sin6,
1668 &addr_len,
1669 &fl6->saddr);
1670 if (err)
1671 goto out_no_dst;
1672 if (sin6) {
1673 if (ipv6_addr_v4mapped(a: &sin6->sin6_addr)) {
1674 /* BPF program rewrote IPv6-only by IPv4-mapped
1675 * IPv6. It's currently unsupported.
1676 */
1677 err = -ENOTSUPP;
1678 goto out_no_dst;
1679 }
1680 if (sin6->sin6_port == 0) {
1681 /* BPF program set invalid port. Reject it. */
1682 err = -EINVAL;
1683 goto out_no_dst;
1684 }
1685 fl6->fl6_dport = sin6->sin6_port;
1686 fl6->daddr = sin6->sin6_addr;
1687 }
1688 }
1689
1690 if (ipv6_addr_any(a: &fl6->daddr))
1691 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1692
1693 final_p = fl6_update_dst(fl6, opt, orig: &final);
1694 if (final_p)
1695 connected = false;
1696
1697 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(addr: &fl6->daddr)) {
1698 fl6->flowi6_oif = READ_ONCE(np->mcast_oif);
1699 connected = false;
1700 } else if (!fl6->flowi6_oif)
1701 fl6->flowi6_oif = READ_ONCE(np->ucast_oif);
1702
1703 security_sk_classify_flow(sk, flic: flowi6_to_flowi_common(fl6));
1704
1705 fl6->flowlabel = ip6_make_flowinfo(tclass: ipc6.tclass, flowlabel: fl6->flowlabel);
1706
1707 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_dst: final_p, connected);
1708 if (IS_ERR(ptr: dst)) {
1709 err = PTR_ERR(ptr: dst);
1710 dst = NULL;
1711 goto out;
1712 }
1713
1714 if (ipc6.hlimit < 0)
1715 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1716
1717 if (msg->msg_flags&MSG_CONFIRM)
1718 goto do_confirm;
1719back_from_confirm:
1720
1721 /* Lockless fast path for the non-corking case */
1722 if (!corkreq) {
1723 struct sk_buff *skb;
1724
1725 skb = ip6_make_skb(sk, getfrag, from: msg, length: ulen,
1726 transhdrlen: sizeof(struct udphdr), ipc6: &ipc6,
1727 dst_rt6_info(dst),
1728 flags: msg->msg_flags, cork: &cork);
1729 err = PTR_ERR(ptr: skb);
1730 if (!IS_ERR_OR_NULL(ptr: skb))
1731 err = udp_v6_send_skb(skb, fl6, cork: &cork.base);
1732 /* ip6_make_skb steals dst reference */
1733 goto out_no_dst;
1734 }
1735
1736 lock_sock(sk);
1737 if (unlikely(up->pending)) {
1738 /* The socket is already corked while preparing it. */
1739 /* ... which is an evident application bug. --ANK */
1740 release_sock(sk);
1741
1742 net_dbg_ratelimited("udp cork app bug 2\n");
1743 err = -EINVAL;
1744 goto out;
1745 }
1746
1747 WRITE_ONCE(up->pending, AF_INET6);
1748
1749do_append_data:
1750 up->len += ulen;
1751 err = ip6_append_data(sk, getfrag, from: msg, length: ulen, transhdrlen: sizeof(struct udphdr),
1752 ipc6: &ipc6, fl6, dst_rt6_info(dst),
1753 flags: corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1754 if (err)
1755 udp_v6_flush_pending_frames(sk);
1756 else if (!corkreq)
1757 err = udp_v6_push_pending_frames(sk);
1758 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1759 WRITE_ONCE(up->pending, 0);
1760
1761 if (err > 0)
1762 err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
1763 release_sock(sk);
1764
1765out:
1766 dst_release(dst);
1767out_no_dst:
1768 fl6_sock_release(fl: flowlabel);
1769 txopt_put(opt: opt_to_free);
1770 if (!err)
1771 return len;
1772 /*
1773 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1774 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1775 * we don't have a good statistic (IpOutDiscards but it can be too many
1776 * things). We could add another new stat but at least for now that
1777 * seems like overkill.
1778 */
1779 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1780 UDP6_INC_STATS(sock_net(sk),
1781 UDP_MIB_SNDBUFERRORS, is_udplite);
1782 }
1783 return err;
1784
1785do_confirm:
1786 if (msg->msg_flags & MSG_PROBE)
1787 dst_confirm_neigh(dst, daddr: &fl6->daddr);
1788 if (!(msg->msg_flags&MSG_PROBE) || len)
1789 goto back_from_confirm;
1790 err = 0;
1791 goto out;
1792}
1793EXPORT_SYMBOL(udpv6_sendmsg);
1794
1795static void udpv6_splice_eof(struct socket *sock)
1796{
1797 struct sock *sk = sock->sk;
1798 struct udp_sock *up = udp_sk(sk);
1799
1800 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1801 return;
1802
1803 lock_sock(sk);
1804 if (up->pending && !udp_test_bit(CORK, sk))
1805 udp_v6_push_pending_frames(sk);
1806 release_sock(sk);
1807}
1808
1809void udpv6_destroy_sock(struct sock *sk)
1810{
1811 struct udp_sock *up = udp_sk(sk);
1812 lock_sock(sk);
1813
1814 /* protects from races with udp_abort() */
1815 sock_set_flag(sk, flag: SOCK_DEAD);
1816 udp_v6_flush_pending_frames(sk);
1817 release_sock(sk);
1818
1819 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1820 if (up->encap_type) {
1821 void (*encap_destroy)(struct sock *sk);
1822 encap_destroy = READ_ONCE(up->encap_destroy);
1823 if (encap_destroy)
1824 encap_destroy(sk);
1825 }
1826 if (udp_test_bit(ENCAP_ENABLED, sk)) {
1827 static_branch_dec(&udpv6_encap_needed_key);
1828 udp_encap_disable();
1829 udp_tunnel_cleanup_gro(sk);
1830 }
1831 }
1832}
1833
1834/*
1835 * Socket option code for UDP
1836 */
1837int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1838 unsigned int optlen)
1839{
1840 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
1841 return udp_lib_setsockopt(sk, level, optname,
1842 optval, optlen,
1843 push_pending_frames: udp_v6_push_pending_frames);
1844 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1845}
1846
1847int udpv6_getsockopt(struct sock *sk, int level, int optname,
1848 char __user *optval, int __user *optlen)
1849{
1850 if (level == SOL_UDP || level == SOL_UDPLITE)
1851 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1852 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1853}
1854
1855
1856/* ------------------------------------------------------------------------ */
1857#ifdef CONFIG_PROC_FS
1858int udp6_seq_show(struct seq_file *seq, void *v)
1859{
1860 if (v == SEQ_START_TOKEN) {
1861 seq_puts(m: seq, IPV6_SEQ_DGRAM_HEADER);
1862 } else {
1863 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1864 const struct inet_sock *inet = inet_sk((const struct sock *)v);
1865 __u16 srcp = ntohs(inet->inet_sport);
1866 __u16 destp = ntohs(inet->inet_dport);
1867 __ip6_dgram_sock_seq_show(seq, sp: v, srcp, destp,
1868 rqueue: udp_rqueue_get(sk: v), bucket);
1869 }
1870 return 0;
1871}
1872
1873const struct seq_operations udp6_seq_ops = {
1874 .start = udp_seq_start,
1875 .next = udp_seq_next,
1876 .stop = udp_seq_stop,
1877 .show = udp6_seq_show,
1878};
1879EXPORT_SYMBOL(udp6_seq_ops);
1880
1881static struct udp_seq_afinfo udp6_seq_afinfo = {
1882 .family = AF_INET6,
1883 .udp_table = NULL,
1884};
1885
1886int __net_init udp6_proc_init(struct net *net)
1887{
1888 if (!proc_create_net_data(name: "udp6", mode: 0444, parent: net->proc_net, ops: &udp6_seq_ops,
1889 state_size: sizeof(struct udp_iter_state), data: &udp6_seq_afinfo))
1890 return -ENOMEM;
1891 return 0;
1892}
1893
1894void udp6_proc_exit(struct net *net)
1895{
1896 remove_proc_entry("udp6", net->proc_net);
1897}
1898#endif /* CONFIG_PROC_FS */
1899
1900/* ------------------------------------------------------------------------ */
1901
1902struct proto udpv6_prot = {
1903 .name = "UDPv6",
1904 .owner = THIS_MODULE,
1905 .close = udp_lib_close,
1906 .pre_connect = udpv6_pre_connect,
1907 .connect = udpv6_connect,
1908 .disconnect = udp_disconnect,
1909 .ioctl = udp_ioctl,
1910 .init = udpv6_init_sock,
1911 .destroy = udpv6_destroy_sock,
1912 .setsockopt = udpv6_setsockopt,
1913 .getsockopt = udpv6_getsockopt,
1914 .sendmsg = udpv6_sendmsg,
1915 .recvmsg = udpv6_recvmsg,
1916 .splice_eof = udpv6_splice_eof,
1917 .release_cb = ip6_datagram_release_cb,
1918 .hash = udp_lib_hash,
1919 .unhash = udp_lib_unhash,
1920 .rehash = udp_v6_rehash,
1921 .get_port = udp_v6_get_port,
1922 .put_port = udp_lib_unhash,
1923#ifdef CONFIG_BPF_SYSCALL
1924 .psock_update_sk_prot = udp_bpf_update_proto,
1925#endif
1926
1927 .memory_allocated = &net_aligned_data.udp_memory_allocated,
1928 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
1929
1930 .sysctl_mem = sysctl_udp_mem,
1931 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1932 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1933 .obj_size = sizeof(struct udp6_sock),
1934 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1935 .h.udp_table = NULL,
1936 .diag_destroy = udp_abort,
1937};
1938
1939static struct inet_protosw udpv6_protosw = {
1940 .type = SOCK_DGRAM,
1941 .protocol = IPPROTO_UDP,
1942 .prot = &udpv6_prot,
1943 .ops = &inet6_dgram_ops,
1944 .flags = INET_PROTOSW_PERMANENT,
1945};
1946
1947int __init udpv6_init(void)
1948{
1949 int ret;
1950
1951 net_hotdata.udpv6_protocol = (struct inet6_protocol) {
1952 .handler = udpv6_rcv,
1953 .err_handler = udpv6_err,
1954 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1955 };
1956 ret = inet6_add_protocol(prot: &net_hotdata.udpv6_protocol, IPPROTO_UDP);
1957 if (ret)
1958 goto out;
1959
1960 ret = inet6_register_protosw(p: &udpv6_protosw);
1961 if (ret)
1962 goto out_udpv6_protocol;
1963out:
1964 return ret;
1965
1966out_udpv6_protocol:
1967 inet6_del_protocol(prot: &net_hotdata.udpv6_protocol, IPPROTO_UDP);
1968 goto out;
1969}
1970
1971void udpv6_exit(void)
1972{
1973 inet6_unregister_protosw(p: &udpv6_protosw);
1974 inet6_del_protocol(prot: &net_hotdata.udpv6_protocol, IPPROTO_UDP);
1975}
1976