1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Multicast support for IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
10 */
11
12/* Changes:
13 *
14 * yoshfuji : fix format of router-alert option
15 * YOSHIFUJI Hideaki @USAGI:
16 * Fixed source address for MLD message based on
17 * <draft-ietf-magma-mld-source-05.txt>.
18 * YOSHIFUJI Hideaki @USAGI:
19 * - Ignore Queries for invalid addresses.
20 * - MLD for link-local addresses.
21 * David L Stevens <dlstevens@us.ibm.com>:
22 * - MLDv2 support
23 */
24
25#include <linux/module.h>
26#include <linux/errno.h>
27#include <linux/types.h>
28#include <linux/string.h>
29#include <linux/socket.h>
30#include <linux/sockios.h>
31#include <linux/jiffies.h>
32#include <linux/net.h>
33#include <linux/in.h>
34#include <linux/in6.h>
35#include <linux/netdevice.h>
36#include <linux/if_addr.h>
37#include <linux/if_arp.h>
38#include <linux/route.h>
39#include <linux/rtnetlink.h>
40#include <linux/init.h>
41#include <linux/proc_fs.h>
42#include <linux/seq_file.h>
43#include <linux/slab.h>
44#include <linux/pkt_sched.h>
45#include <net/mld.h>
46#include <linux/workqueue.h>
47
48#include <linux/netfilter.h>
49#include <linux/netfilter_ipv6.h>
50
51#include <net/net_namespace.h>
52#include <net/netlink.h>
53#include <net/sock.h>
54#include <net/snmp.h>
55
56#include <net/ipv6.h>
57#include <net/protocol.h>
58#include <net/if_inet6.h>
59#include <net/ndisc.h>
60#include <net/addrconf.h>
61#include <net/ip6_route.h>
62#include <net/inet_common.h>
63
64#include <net/ip6_checksum.h>
65
66/* Ensure that we have struct in6_addr aligned on 32bit word. */
67static int __mld2_query_bugs[] __attribute__((__unused__)) = {
68 BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4),
69 BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4),
70 BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4)
71};
72
73static struct workqueue_struct *mld_wq;
74static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
75
76static void igmp6_join_group(struct ifmcaddr6 *ma);
77static void igmp6_leave_group(struct ifmcaddr6 *ma);
78static void mld_mca_work(struct work_struct *work);
79
80static void mld_ifc_event(struct inet6_dev *idev);
81static bool mld_in_v1_mode(const struct inet6_dev *idev);
82static int sf_setstate(struct ifmcaddr6 *pmc);
83static void sf_markstate(struct ifmcaddr6 *pmc);
84static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
85static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
86 int sfmode, int sfcount, const struct in6_addr *psfsrc,
87 int delta);
88static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
89 int sfmode, int sfcount, const struct in6_addr *psfsrc,
90 int delta);
91static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
92 struct inet6_dev *idev);
93static int __ipv6_dev_mc_inc(struct net_device *dev,
94 const struct in6_addr *addr, unsigned int mode);
95
96#define MLD_QRV_DEFAULT 2
97/* RFC3810, 9.2. Query Interval */
98#define MLD_QI_DEFAULT (125 * HZ)
99/* RFC3810, 9.3. Query Response Interval */
100#define MLD_QRI_DEFAULT (10 * HZ)
101
102/* RFC3810, 8.1 Query Version Distinctions */
103#define MLD_V1_QUERY_LEN 24
104#define MLD_V2_QUERY_LEN_MIN 28
105
106#define IPV6_MLD_MAX_MSF 64
107
108int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
109int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
110
111#define mc_assert_locked(idev) \
112 lockdep_assert_held(&(idev)->mc_lock)
113
114#define mc_dereference(e, idev) \
115 rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
116
117#define sock_dereference(e, sk) \
118 rcu_dereference_protected(e, lockdep_sock_is_held(sk))
119
120#define for_each_pmc_socklock(np, sk, pmc) \
121 for (pmc = sock_dereference((np)->ipv6_mc_list, sk); \
122 pmc; \
123 pmc = sock_dereference(pmc->next, sk))
124
125#define for_each_pmc_rcu(np, pmc) \
126 for (pmc = rcu_dereference((np)->ipv6_mc_list); \
127 pmc; \
128 pmc = rcu_dereference(pmc->next))
129
130#define for_each_psf_mclock(mc, psf) \
131 for (psf = mc_dereference((mc)->mca_sources, mc->idev); \
132 psf; \
133 psf = mc_dereference(psf->sf_next, mc->idev))
134
135#define for_each_psf_rcu(mc, psf) \
136 for (psf = rcu_dereference((mc)->mca_sources); \
137 psf; \
138 psf = rcu_dereference(psf->sf_next))
139
140#define for_each_psf_tomb(mc, psf) \
141 for (psf = mc_dereference((mc)->mca_tomb, mc->idev); \
142 psf; \
143 psf = mc_dereference(psf->sf_next, mc->idev))
144
145#define for_each_mc_mclock(idev, mc) \
146 for (mc = mc_dereference((idev)->mc_list, idev); \
147 mc; \
148 mc = mc_dereference(mc->next, idev))
149
150#define for_each_mc_rcu(idev, mc) \
151 for (mc = rcu_dereference((idev)->mc_list); \
152 mc; \
153 mc = rcu_dereference(mc->next))
154
155#define for_each_mc_tomb(idev, mc) \
156 for (mc = mc_dereference((idev)->mc_tomb, idev); \
157 mc; \
158 mc = mc_dereference(mc->next, idev))
159
160static int unsolicited_report_interval(struct inet6_dev *idev)
161{
162 int iv;
163
164 if (mld_in_v1_mode(idev))
165 iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
166 else
167 iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
168
169 return iv > 0 ? iv : 1;
170}
171
172static struct net_device *ip6_mc_find_dev(struct net *net,
173 const struct in6_addr *group,
174 int ifindex)
175{
176 struct net_device *dev = NULL;
177 struct rt6_info *rt;
178
179 if (ifindex == 0) {
180 rcu_read_lock();
181 rt = rt6_lookup(net, daddr: group, NULL, oif: 0, NULL, flags: 0);
182 if (rt) {
183 dev = dst_dev_rcu(dst: &rt->dst);
184 dev_hold(dev);
185 ip6_rt_put(rt);
186 }
187 rcu_read_unlock();
188 } else {
189 dev = dev_get_by_index(net, ifindex);
190 }
191
192 return dev;
193}
194
195/*
196 * socket join on multicast group
197 */
198static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
199 const struct in6_addr *addr, unsigned int mode)
200{
201 struct ipv6_pinfo *np = inet6_sk(sk: sk);
202 struct ipv6_mc_socklist *mc_lst;
203 struct net *net = sock_net(sk);
204 struct net_device *dev = NULL;
205 int err;
206
207 if (!ipv6_addr_is_multicast(addr))
208 return -EINVAL;
209
210 for_each_pmc_socklock(np, sk, mc_lst) {
211 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
212 ipv6_addr_equal(a1: &mc_lst->addr, a2: addr))
213 return -EADDRINUSE;
214 }
215
216 mc_lst = sock_kmalloc(sk, size: sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
217 if (!mc_lst)
218 return -ENOMEM;
219
220 mc_lst->next = NULL;
221 mc_lst->addr = *addr;
222
223 dev = ip6_mc_find_dev(net, group: addr, ifindex);
224 if (!dev) {
225 sock_kfree_s(sk, mem: mc_lst, size: sizeof(*mc_lst));
226 return -ENODEV;
227 }
228
229 mc_lst->ifindex = dev->ifindex;
230 mc_lst->sfmode = mode;
231 RCU_INIT_POINTER(mc_lst->sflist, NULL);
232
233 /* now add/increase the group membership on the device */
234 err = __ipv6_dev_mc_inc(dev, addr, mode);
235
236 dev_put(dev);
237
238 if (err) {
239 sock_kfree_s(sk, mem: mc_lst, size: sizeof(*mc_lst));
240 return err;
241 }
242
243 mc_lst->next = np->ipv6_mc_list;
244 rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
245
246 return 0;
247}
248
249int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
250{
251 return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
252}
253EXPORT_SYMBOL(ipv6_sock_mc_join);
254
255int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
256 const struct in6_addr *addr, unsigned int mode)
257{
258 return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
259}
260
261/*
262 * socket leave on multicast group
263 */
264static void __ipv6_sock_mc_drop(struct sock *sk, struct ipv6_mc_socklist *mc_lst)
265{
266 struct net *net = sock_net(sk);
267 struct net_device *dev;
268
269 dev = dev_get_by_index(net, ifindex: mc_lst->ifindex);
270 if (dev) {
271 struct inet6_dev *idev = in6_dev_get(dev);
272
273 ip6_mc_leave_src(sk, iml: mc_lst, idev);
274
275 if (idev) {
276 __ipv6_dev_mc_dec(idev, addr: &mc_lst->addr);
277 in6_dev_put(idev);
278 }
279
280 dev_put(dev);
281 } else {
282 ip6_mc_leave_src(sk, iml: mc_lst, NULL);
283 }
284
285 atomic_sub(i: sizeof(*mc_lst), v: &sk->sk_omem_alloc);
286 kfree_rcu(mc_lst, rcu);
287}
288
289int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
290{
291 struct ipv6_pinfo *np = inet6_sk(sk: sk);
292 struct ipv6_mc_socklist __rcu **lnk;
293 struct ipv6_mc_socklist *mc_lst;
294
295 if (!ipv6_addr_is_multicast(addr))
296 return -EINVAL;
297
298 for (lnk = &np->ipv6_mc_list;
299 (mc_lst = sock_dereference(*lnk, sk)) != NULL;
300 lnk = &mc_lst->next) {
301 if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
302 ipv6_addr_equal(a1: &mc_lst->addr, a2: addr)) {
303 *lnk = mc_lst->next;
304 __ipv6_sock_mc_drop(sk, mc_lst);
305 return 0;
306 }
307 }
308
309 return -EADDRNOTAVAIL;
310}
311EXPORT_SYMBOL(ipv6_sock_mc_drop);
312
313static struct inet6_dev *ip6_mc_find_idev(struct net *net,
314 const struct in6_addr *group,
315 int ifindex)
316{
317 struct net_device *dev;
318 struct inet6_dev *idev;
319
320 dev = ip6_mc_find_dev(net, group, ifindex);
321 if (!dev)
322 return NULL;
323
324 idev = in6_dev_get(dev);
325 dev_put(dev);
326
327 return idev;
328}
329
330void __ipv6_sock_mc_close(struct sock *sk)
331{
332 struct ipv6_pinfo *np = inet6_sk(sk: sk);
333 struct ipv6_mc_socklist *mc_lst;
334
335 while ((mc_lst = sock_dereference(np->ipv6_mc_list, sk)) != NULL) {
336 np->ipv6_mc_list = mc_lst->next;
337 __ipv6_sock_mc_drop(sk, mc_lst);
338 }
339}
340
341void ipv6_sock_mc_close(struct sock *sk)
342{
343 struct ipv6_pinfo *np = inet6_sk(sk: sk);
344
345 if (!rcu_access_pointer(np->ipv6_mc_list))
346 return;
347
348 lock_sock(sk);
349 __ipv6_sock_mc_close(sk);
350 release_sock(sk);
351}
352
353int ip6_mc_source(int add, int omode, struct sock *sk,
354 struct group_source_req *pgsr)
355{
356 struct ipv6_pinfo *inet6 = inet6_sk(sk: sk);
357 struct in6_addr *source, *group;
358 struct net *net = sock_net(sk);
359 struct ipv6_mc_socklist *pmc;
360 struct ip6_sf_socklist *psl;
361 struct inet6_dev *idev;
362 int leavegroup = 0;
363 int i, j, rv;
364 int err;
365
366 source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
367 group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
368
369 if (!ipv6_addr_is_multicast(addr: group))
370 return -EINVAL;
371
372 idev = ip6_mc_find_idev(net, group, ifindex: pgsr->gsr_interface);
373 if (!idev)
374 return -ENODEV;
375
376 mutex_lock(lock: &idev->mc_lock);
377
378 if (idev->dead) {
379 err = -ENODEV;
380 goto done;
381 }
382
383 err = -EADDRNOTAVAIL;
384
385 for_each_pmc_socklock(inet6, sk, pmc) {
386 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
387 continue;
388 if (ipv6_addr_equal(a1: &pmc->addr, a2: group))
389 break;
390 }
391 if (!pmc) { /* must have a prior join */
392 err = -EINVAL;
393 goto done;
394 }
395 /* if a source filter was set, must be the same mode as before */
396 if (rcu_access_pointer(pmc->sflist)) {
397 if (pmc->sfmode != omode) {
398 err = -EINVAL;
399 goto done;
400 }
401 } else if (pmc->sfmode != omode) {
402 /* allow mode switches for empty-set filters */
403 ip6_mc_add_src(idev, pmca: group, sfmode: omode, sfcount: 0, NULL, delta: 0);
404 ip6_mc_del_src(idev, pmca: group, sfmode: pmc->sfmode, sfcount: 0, NULL, delta: 0);
405 pmc->sfmode = omode;
406 }
407
408 psl = sock_dereference(pmc->sflist, sk);
409 if (!add) {
410 if (!psl)
411 goto done; /* err = -EADDRNOTAVAIL */
412 rv = !0;
413 for (i = 0; i < psl->sl_count; i++) {
414 rv = !ipv6_addr_equal(a1: &psl->sl_addr[i], a2: source);
415 if (rv == 0)
416 break;
417 }
418 if (rv) /* source not found */
419 goto done; /* err = -EADDRNOTAVAIL */
420
421 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
422 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
423 leavegroup = 1;
424 goto done;
425 }
426
427 /* update the interface filter */
428 ip6_mc_del_src(idev, pmca: group, sfmode: omode, sfcount: 1, psfsrc: source, delta: 1);
429
430 for (j = i+1; j < psl->sl_count; j++)
431 psl->sl_addr[j-1] = psl->sl_addr[j];
432 psl->sl_count--;
433 err = 0;
434 goto done;
435 }
436 /* else, add a new source to the filter */
437
438 if (psl && psl->sl_count >= sysctl_mld_max_msf) {
439 err = -ENOBUFS;
440 goto done;
441 }
442 if (!psl || psl->sl_count == psl->sl_max) {
443 struct ip6_sf_socklist *newpsl;
444 int count = IP6_SFBLOCK;
445
446 if (psl)
447 count += psl->sl_max;
448 newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, count),
449 GFP_KERNEL);
450 if (!newpsl) {
451 err = -ENOBUFS;
452 goto done;
453 }
454 newpsl->sl_max = count;
455 newpsl->sl_count = count - IP6_SFBLOCK;
456 if (psl) {
457 for (i = 0; i < psl->sl_count; i++)
458 newpsl->sl_addr[i] = psl->sl_addr[i];
459 atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
460 v: &sk->sk_omem_alloc);
461 }
462 rcu_assign_pointer(pmc->sflist, newpsl);
463 kfree_rcu(psl, rcu);
464 psl = newpsl;
465 }
466 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
467 for (i = 0; i < psl->sl_count; i++) {
468 rv = !ipv6_addr_equal(a1: &psl->sl_addr[i], a2: source);
469 if (rv == 0) /* There is an error in the address. */
470 goto done;
471 }
472 for (j = psl->sl_count-1; j >= i; j--)
473 psl->sl_addr[j+1] = psl->sl_addr[j];
474 psl->sl_addr[i] = *source;
475 psl->sl_count++;
476 err = 0;
477 /* update the interface list */
478 ip6_mc_add_src(idev, pmca: group, sfmode: omode, sfcount: 1, psfsrc: source, delta: 1);
479done:
480 mutex_unlock(lock: &idev->mc_lock);
481 in6_dev_put(idev);
482 if (leavegroup)
483 err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
484 return err;
485}
486
487int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
488 struct sockaddr_storage *list)
489{
490 struct ipv6_pinfo *inet6 = inet6_sk(sk: sk);
491 struct ip6_sf_socklist *newpsl, *psl;
492 struct net *net = sock_net(sk);
493 const struct in6_addr *group;
494 struct ipv6_mc_socklist *pmc;
495 struct inet6_dev *idev;
496 int leavegroup = 0;
497 int i, err;
498
499 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
500
501 if (!ipv6_addr_is_multicast(addr: group))
502 return -EINVAL;
503 if (gsf->gf_fmode != MCAST_INCLUDE &&
504 gsf->gf_fmode != MCAST_EXCLUDE)
505 return -EINVAL;
506
507 idev = ip6_mc_find_idev(net, group, ifindex: gsf->gf_interface);
508 if (!idev)
509 return -ENODEV;
510
511 mutex_lock(lock: &idev->mc_lock);
512
513 if (idev->dead) {
514 err = -ENODEV;
515 goto done;
516 }
517
518 err = 0;
519
520 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
521 leavegroup = 1;
522 goto done;
523 }
524
525 for_each_pmc_socklock(inet6, sk, pmc) {
526 if (pmc->ifindex != gsf->gf_interface)
527 continue;
528 if (ipv6_addr_equal(a1: &pmc->addr, a2: group))
529 break;
530 }
531 if (!pmc) { /* must have a prior join */
532 err = -EINVAL;
533 goto done;
534 }
535 if (gsf->gf_numsrc) {
536 newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr,
537 gsf->gf_numsrc),
538 GFP_KERNEL);
539 if (!newpsl) {
540 err = -ENOBUFS;
541 goto done;
542 }
543 newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
544 for (i = 0; i < newpsl->sl_count; ++i, ++list) {
545 struct sockaddr_in6 *psin6;
546
547 psin6 = (struct sockaddr_in6 *)list;
548 newpsl->sl_addr[i] = psin6->sin6_addr;
549 }
550
551 err = ip6_mc_add_src(idev, pmca: group, sfmode: gsf->gf_fmode,
552 sfcount: newpsl->sl_count, psfsrc: newpsl->sl_addr, delta: 0);
553 if (err) {
554 sock_kfree_s(sk, mem: newpsl, struct_size(newpsl, sl_addr,
555 newpsl->sl_max));
556 goto done;
557 }
558 } else {
559 newpsl = NULL;
560 ip6_mc_add_src(idev, pmca: group, sfmode: gsf->gf_fmode, sfcount: 0, NULL, delta: 0);
561 }
562
563 psl = sock_dereference(pmc->sflist, sk);
564 if (psl) {
565 ip6_mc_del_src(idev, pmca: group, sfmode: pmc->sfmode,
566 sfcount: psl->sl_count, psfsrc: psl->sl_addr, delta: 0);
567 atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
568 v: &sk->sk_omem_alloc);
569 } else {
570 ip6_mc_del_src(idev, pmca: group, sfmode: pmc->sfmode, sfcount: 0, NULL, delta: 0);
571 }
572
573 rcu_assign_pointer(pmc->sflist, newpsl);
574 kfree_rcu(psl, rcu);
575 pmc->sfmode = gsf->gf_fmode;
576 err = 0;
577done:
578 mutex_unlock(lock: &idev->mc_lock);
579 in6_dev_put(idev);
580 if (leavegroup)
581 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
582 return err;
583}
584
585int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
586 sockptr_t optval, size_t ss_offset)
587{
588 struct ipv6_pinfo *inet6 = inet6_sk(sk: sk);
589 const struct in6_addr *group;
590 struct ipv6_mc_socklist *pmc;
591 struct ip6_sf_socklist *psl;
592 unsigned int count;
593 int i, copycount;
594
595 group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
596
597 if (!ipv6_addr_is_multicast(addr: group))
598 return -EINVAL;
599
600 for_each_pmc_socklock(inet6, sk, pmc) {
601 if (pmc->ifindex != gsf->gf_interface)
602 continue;
603 if (ipv6_addr_equal(a1: group, a2: &pmc->addr))
604 break;
605 }
606 if (!pmc) /* must have a prior join */
607 return -EADDRNOTAVAIL;
608
609 gsf->gf_fmode = pmc->sfmode;
610 psl = sock_dereference(pmc->sflist, sk);
611 count = psl ? psl->sl_count : 0;
612
613 copycount = min(count, gsf->gf_numsrc);
614 gsf->gf_numsrc = count;
615 for (i = 0; i < copycount; i++) {
616 struct sockaddr_in6 *psin6;
617 struct sockaddr_storage ss;
618
619 psin6 = (struct sockaddr_in6 *)&ss;
620 memset(s: &ss, c: 0, n: sizeof(ss));
621 psin6->sin6_family = AF_INET6;
622 psin6->sin6_addr = psl->sl_addr[i];
623 if (copy_to_sockptr_offset(dst: optval, offset: ss_offset, src: &ss, size: sizeof(ss)))
624 return -EFAULT;
625 ss_offset += sizeof(ss);
626 }
627 return 0;
628}
629
630bool inet6_mc_check(const struct sock *sk, const struct in6_addr *mc_addr,
631 const struct in6_addr *src_addr)
632{
633 const struct ipv6_pinfo *np = inet6_sk(sk: sk);
634 const struct ipv6_mc_socklist *mc;
635 const struct ip6_sf_socklist *psl;
636 bool rv = true;
637
638 rcu_read_lock();
639 for_each_pmc_rcu(np, mc) {
640 if (ipv6_addr_equal(a1: &mc->addr, a2: mc_addr))
641 break;
642 }
643 if (!mc) {
644 rcu_read_unlock();
645 return inet6_test_bit(MC6_ALL, sk);
646 }
647 psl = rcu_dereference(mc->sflist);
648 if (!psl) {
649 rv = mc->sfmode == MCAST_EXCLUDE;
650 } else {
651 int i;
652
653 for (i = 0; i < psl->sl_count; i++) {
654 if (ipv6_addr_equal(a1: &psl->sl_addr[i], a2: src_addr))
655 break;
656 }
657 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
658 rv = false;
659 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
660 rv = false;
661 }
662 rcu_read_unlock();
663
664 return rv;
665}
666
667static void igmp6_group_added(struct ifmcaddr6 *mc)
668{
669 struct net_device *dev = mc->idev->dev;
670 char buf[MAX_ADDR_LEN];
671
672 mc_assert_locked(mc->idev);
673
674 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
675 IPV6_ADDR_SCOPE_LINKLOCAL)
676 return;
677
678 if (!(mc->mca_flags&MAF_LOADED)) {
679 mc->mca_flags |= MAF_LOADED;
680 if (ndisc_mc_map(addr: &mc->mca_addr, buf, dev, dir: 0) == 0)
681 dev_mc_add(dev, addr: buf);
682 }
683
684 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
685 return;
686
687 if (mld_in_v1_mode(idev: mc->idev)) {
688 igmp6_join_group(ma: mc);
689 return;
690 }
691 /* else v2 */
692
693 /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
694 * should not send filter-mode change record as the mode
695 * should be from IN() to IN(A).
696 */
697 if (mc->mca_sfmode == MCAST_EXCLUDE)
698 mc->mca_crcount = mc->idev->mc_qrv;
699
700 mld_ifc_event(idev: mc->idev);
701}
702
703static void igmp6_group_dropped(struct ifmcaddr6 *mc)
704{
705 struct net_device *dev = mc->idev->dev;
706 char buf[MAX_ADDR_LEN];
707
708 mc_assert_locked(mc->idev);
709
710 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
711 IPV6_ADDR_SCOPE_LINKLOCAL)
712 return;
713
714 if (mc->mca_flags&MAF_LOADED) {
715 mc->mca_flags &= ~MAF_LOADED;
716 if (ndisc_mc_map(addr: &mc->mca_addr, buf, dev, dir: 0) == 0)
717 dev_mc_del(dev, addr: buf);
718 }
719
720 if (mc->mca_flags & MAF_NOREPORT)
721 return;
722
723 if (!mc->idev->dead)
724 igmp6_leave_group(ma: mc);
725
726 if (cancel_delayed_work(dwork: &mc->mca_work))
727 refcount_dec(r: &mc->mca_refcnt);
728}
729
730/* deleted ifmcaddr6 manipulation */
731static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
732{
733 struct ifmcaddr6 *pmc;
734
735 mc_assert_locked(idev);
736
737 /* this is an "ifmcaddr6" for convenience; only the fields below
738 * are actually used. In particular, the refcnt and users are not
739 * used for management of the delete list. Using the same structure
740 * for deleted items allows change reports to use common code with
741 * non-deleted or query-response MCA's.
742 */
743 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
744 if (!pmc)
745 return;
746
747 pmc->idev = im->idev;
748 in6_dev_hold(idev);
749 pmc->mca_addr = im->mca_addr;
750 pmc->mca_crcount = idev->mc_qrv;
751 pmc->mca_sfmode = im->mca_sfmode;
752 if (pmc->mca_sfmode == MCAST_INCLUDE) {
753 struct ip6_sf_list *psf;
754
755 rcu_assign_pointer(pmc->mca_tomb,
756 mc_dereference(im->mca_tomb, idev));
757 rcu_assign_pointer(pmc->mca_sources,
758 mc_dereference(im->mca_sources, idev));
759 RCU_INIT_POINTER(im->mca_tomb, NULL);
760 RCU_INIT_POINTER(im->mca_sources, NULL);
761
762 for_each_psf_mclock(pmc, psf)
763 psf->sf_crcount = pmc->mca_crcount;
764 }
765
766 rcu_assign_pointer(pmc->next, idev->mc_tomb);
767 rcu_assign_pointer(idev->mc_tomb, pmc);
768}
769
770static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
771{
772 struct ip6_sf_list *psf, *sources, *tomb;
773 struct in6_addr *pmca = &im->mca_addr;
774 struct ifmcaddr6 *pmc, *pmc_prev;
775
776 mc_assert_locked(idev);
777
778 pmc_prev = NULL;
779 for_each_mc_tomb(idev, pmc) {
780 if (ipv6_addr_equal(a1: &pmc->mca_addr, a2: pmca))
781 break;
782 pmc_prev = pmc;
783 }
784 if (!pmc)
785 return;
786 if (pmc_prev)
787 rcu_assign_pointer(pmc_prev->next, pmc->next);
788 else
789 rcu_assign_pointer(idev->mc_tomb, pmc->next);
790
791 im->idev = pmc->idev;
792 if (im->mca_sfmode == MCAST_INCLUDE) {
793 tomb = rcu_replace_pointer(im->mca_tomb,
794 mc_dereference(pmc->mca_tomb, pmc->idev),
795 lockdep_is_held(&im->idev->mc_lock));
796 rcu_assign_pointer(pmc->mca_tomb, tomb);
797
798 sources = rcu_replace_pointer(im->mca_sources,
799 mc_dereference(pmc->mca_sources, pmc->idev),
800 lockdep_is_held(&im->idev->mc_lock));
801 rcu_assign_pointer(pmc->mca_sources, sources);
802 for_each_psf_mclock(im, psf)
803 psf->sf_crcount = idev->mc_qrv;
804 } else {
805 im->mca_crcount = idev->mc_qrv;
806 }
807 ip6_mc_clear_src(pmc);
808 in6_dev_put(idev: pmc->idev);
809 kfree_rcu(pmc, rcu);
810}
811
812static void mld_clear_delrec(struct inet6_dev *idev)
813{
814 struct ifmcaddr6 *pmc, *nextpmc;
815
816 mc_assert_locked(idev);
817
818 pmc = mc_dereference(idev->mc_tomb, idev);
819 RCU_INIT_POINTER(idev->mc_tomb, NULL);
820
821 for (; pmc; pmc = nextpmc) {
822 nextpmc = mc_dereference(pmc->next, idev);
823 ip6_mc_clear_src(pmc);
824 in6_dev_put(idev: pmc->idev);
825 kfree_rcu(pmc, rcu);
826 }
827
828 /* clear dead sources, too */
829 for_each_mc_mclock(idev, pmc) {
830 struct ip6_sf_list *psf, *psf_next;
831
832 psf = mc_dereference(pmc->mca_tomb, idev);
833 RCU_INIT_POINTER(pmc->mca_tomb, NULL);
834 for (; psf; psf = psf_next) {
835 psf_next = mc_dereference(psf->sf_next, idev);
836 kfree_rcu(psf, rcu);
837 }
838 }
839}
840
841static void mld_clear_query(struct inet6_dev *idev)
842{
843 spin_lock_bh(lock: &idev->mc_query_lock);
844 __skb_queue_purge(list: &idev->mc_query_queue);
845 spin_unlock_bh(lock: &idev->mc_query_lock);
846}
847
848static void mld_clear_report(struct inet6_dev *idev)
849{
850 spin_lock_bh(lock: &idev->mc_report_lock);
851 __skb_queue_purge(list: &idev->mc_report_queue);
852 spin_unlock_bh(lock: &idev->mc_report_lock);
853}
854
855static void ma_put(struct ifmcaddr6 *mc)
856{
857 if (refcount_dec_and_test(r: &mc->mca_refcnt)) {
858 in6_dev_put(idev: mc->idev);
859 kfree_rcu(mc, rcu);
860 }
861}
862
863static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
864 const struct in6_addr *addr,
865 unsigned int mode)
866{
867 struct ifmcaddr6 *mc;
868
869 mc_assert_locked(idev);
870
871 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
872 if (!mc)
873 return NULL;
874
875 INIT_DELAYED_WORK(&mc->mca_work, mld_mca_work);
876
877 mc->mca_addr = *addr;
878 mc->idev = idev; /* reference taken by caller */
879 mc->mca_users = 1;
880 /* mca_stamp should be updated upon changes */
881 mc->mca_cstamp = mc->mca_tstamp = jiffies;
882 refcount_set(r: &mc->mca_refcnt, n: 1);
883
884 mc->mca_sfmode = mode;
885 mc->mca_sfcount[mode] = 1;
886
887 if (ipv6_addr_is_ll_all_nodes(addr: &mc->mca_addr) ||
888 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
889 mc->mca_flags |= MAF_NOREPORT;
890
891 return mc;
892}
893
894static void inet6_ifmcaddr_notify(struct net_device *dev,
895 const struct ifmcaddr6 *ifmca, int event)
896{
897 struct inet6_fill_args fillargs = {
898 .portid = 0,
899 .seq = 0,
900 .event = event,
901 .flags = 0,
902 .netnsid = -1,
903 .force_rt_scope_universe = true,
904 };
905 struct net *net = dev_net(dev);
906 struct sk_buff *skb;
907 int err = -ENOMEM;
908
909 skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
910 nla_total_size(payload: sizeof(struct in6_addr)) +
911 nla_total_size(payload: sizeof(struct ifa_cacheinfo)),
912 GFP_KERNEL);
913 if (!skb)
914 goto error;
915
916 err = inet6_fill_ifmcaddr(skb, ifmca, args: &fillargs);
917 if (err < 0) {
918 WARN_ON_ONCE(err == -EMSGSIZE);
919 nlmsg_free(skb);
920 goto error;
921 }
922
923 rtnl_notify(skb, net, pid: 0, RTNLGRP_IPV6_MCADDR, NULL, GFP_KERNEL);
924 return;
925error:
926 rtnl_set_sk_err(net, RTNLGRP_IPV6_MCADDR, error: err);
927}
928
929/*
930 * device multicast group inc (add if not found)
931 */
932static int __ipv6_dev_mc_inc(struct net_device *dev,
933 const struct in6_addr *addr, unsigned int mode)
934{
935 struct inet6_dev *idev;
936 struct ifmcaddr6 *mc;
937
938 /* we need to take a reference on idev */
939 idev = in6_dev_get(dev);
940 if (!idev)
941 return -EINVAL;
942
943 mutex_lock(lock: &idev->mc_lock);
944
945 if (READ_ONCE(idev->dead)) {
946 mutex_unlock(lock: &idev->mc_lock);
947 in6_dev_put(idev);
948 return -ENODEV;
949 }
950
951 for_each_mc_mclock(idev, mc) {
952 if (ipv6_addr_equal(a1: &mc->mca_addr, a2: addr)) {
953 mc->mca_users++;
954 ip6_mc_add_src(idev, pmca: &mc->mca_addr, sfmode: mode, sfcount: 0, NULL, delta: 0);
955 mutex_unlock(lock: &idev->mc_lock);
956 in6_dev_put(idev);
957 return 0;
958 }
959 }
960
961 mc = mca_alloc(idev, addr, mode);
962 if (!mc) {
963 mutex_unlock(lock: &idev->mc_lock);
964 in6_dev_put(idev);
965 return -ENOMEM;
966 }
967
968 rcu_assign_pointer(mc->next, idev->mc_list);
969 rcu_assign_pointer(idev->mc_list, mc);
970
971 mld_del_delrec(idev, im: mc);
972 igmp6_group_added(mc);
973 inet6_ifmcaddr_notify(dev, ifmca: mc, RTM_NEWMULTICAST);
974 mutex_unlock(lock: &idev->mc_lock);
975
976 return 0;
977}
978
979int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
980{
981 return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
982}
983EXPORT_SYMBOL(ipv6_dev_mc_inc);
984
985/*
986 * device multicast group del
987 */
988int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
989{
990 struct ifmcaddr6 *ma, __rcu **map;
991
992 mutex_lock(lock: &idev->mc_lock);
993
994 for (map = &idev->mc_list;
995 (ma = mc_dereference(*map, idev));
996 map = &ma->next) {
997 if (ipv6_addr_equal(a1: &ma->mca_addr, a2: addr)) {
998 if (--ma->mca_users == 0) {
999 *map = ma->next;
1000
1001 igmp6_group_dropped(mc: ma);
1002 inet6_ifmcaddr_notify(dev: idev->dev, ifmca: ma,
1003 RTM_DELMULTICAST);
1004 ip6_mc_clear_src(pmc: ma);
1005 mutex_unlock(lock: &idev->mc_lock);
1006
1007 ma_put(mc: ma);
1008 return 0;
1009 }
1010 mutex_unlock(lock: &idev->mc_lock);
1011 return 0;
1012 }
1013 }
1014
1015 mutex_unlock(lock: &idev->mc_lock);
1016 return -ENOENT;
1017}
1018
1019int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
1020{
1021 struct inet6_dev *idev;
1022 int err;
1023
1024 idev = in6_dev_get(dev);
1025 if (!idev)
1026 return -ENODEV;
1027
1028 err = __ipv6_dev_mc_dec(idev, addr);
1029 in6_dev_put(idev);
1030
1031 return err;
1032}
1033EXPORT_SYMBOL(ipv6_dev_mc_dec);
1034
1035/*
1036 * check if the interface/address pair is valid
1037 */
1038bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
1039 const struct in6_addr *src_addr)
1040{
1041 struct inet6_dev *idev;
1042 struct ifmcaddr6 *mc;
1043 bool rv = false;
1044
1045 rcu_read_lock();
1046 idev = __in6_dev_get(dev);
1047 if (!idev)
1048 goto unlock;
1049 for_each_mc_rcu(idev, mc) {
1050 if (ipv6_addr_equal(a1: &mc->mca_addr, a2: group))
1051 break;
1052 }
1053 if (!mc)
1054 goto unlock;
1055 if (src_addr && !ipv6_addr_any(a: src_addr)) {
1056 struct ip6_sf_list *psf;
1057
1058 for_each_psf_rcu(mc, psf) {
1059 if (ipv6_addr_equal(a1: &psf->sf_addr, a2: src_addr))
1060 break;
1061 }
1062 if (psf)
1063 rv = READ_ONCE(psf->sf_count[MCAST_INCLUDE]) ||
1064 READ_ONCE(psf->sf_count[MCAST_EXCLUDE]) !=
1065 READ_ONCE(mc->mca_sfcount[MCAST_EXCLUDE]);
1066 else
1067 rv = READ_ONCE(mc->mca_sfcount[MCAST_EXCLUDE]) != 0;
1068 } else {
1069 rv = true; /* don't filter unspecified source */
1070 }
1071unlock:
1072 rcu_read_unlock();
1073 return rv;
1074}
1075
1076static void mld_gq_start_work(struct inet6_dev *idev)
1077{
1078 unsigned long tv = get_random_u32_below(ceil: idev->mc_maxdelay);
1079
1080 mc_assert_locked(idev);
1081
1082 idev->mc_gq_running = 1;
1083 if (!mod_delayed_work(wq: mld_wq, dwork: &idev->mc_gq_work, delay: tv + 2))
1084 in6_dev_hold(idev);
1085}
1086
1087static void mld_gq_stop_work(struct inet6_dev *idev)
1088{
1089 mc_assert_locked(idev);
1090
1091 idev->mc_gq_running = 0;
1092 if (cancel_delayed_work(dwork: &idev->mc_gq_work))
1093 __in6_dev_put(idev);
1094}
1095
1096static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
1097{
1098 unsigned long tv = get_random_u32_below(ceil: delay);
1099
1100 mc_assert_locked(idev);
1101
1102 if (!mod_delayed_work(wq: mld_wq, dwork: &idev->mc_ifc_work, delay: tv + 2))
1103 in6_dev_hold(idev);
1104}
1105
1106static void mld_ifc_stop_work(struct inet6_dev *idev)
1107{
1108 mc_assert_locked(idev);
1109
1110 idev->mc_ifc_count = 0;
1111 if (cancel_delayed_work(dwork: &idev->mc_ifc_work))
1112 __in6_dev_put(idev);
1113}
1114
1115static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
1116{
1117 unsigned long tv = get_random_u32_below(ceil: delay);
1118
1119 mc_assert_locked(idev);
1120
1121 if (!mod_delayed_work(wq: mld_wq, dwork: &idev->mc_dad_work, delay: tv + 2))
1122 in6_dev_hold(idev);
1123}
1124
1125static void mld_dad_stop_work(struct inet6_dev *idev)
1126{
1127 if (cancel_delayed_work(dwork: &idev->mc_dad_work))
1128 __in6_dev_put(idev);
1129}
1130
1131static void mld_query_stop_work(struct inet6_dev *idev)
1132{
1133 spin_lock_bh(lock: &idev->mc_query_lock);
1134 if (cancel_delayed_work(dwork: &idev->mc_query_work))
1135 __in6_dev_put(idev);
1136 spin_unlock_bh(lock: &idev->mc_query_lock);
1137}
1138
1139static void mld_report_stop_work(struct inet6_dev *idev)
1140{
1141 if (cancel_delayed_work_sync(dwork: &idev->mc_report_work))
1142 __in6_dev_put(idev);
1143}
1144
1145/* IGMP handling (alias multicast ICMPv6 messages) */
1146static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1147{
1148 unsigned long delay = resptime;
1149
1150 mc_assert_locked(ma->idev);
1151
1152 /* Do not start work for these addresses */
1153 if (ipv6_addr_is_ll_all_nodes(addr: &ma->mca_addr) ||
1154 IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1155 return;
1156
1157 if (cancel_delayed_work(dwork: &ma->mca_work)) {
1158 refcount_dec(r: &ma->mca_refcnt);
1159 delay = ma->mca_work.timer.expires - jiffies;
1160 }
1161
1162 if (delay >= resptime)
1163 delay = get_random_u32_below(ceil: resptime);
1164
1165 if (!mod_delayed_work(wq: mld_wq, dwork: &ma->mca_work, delay))
1166 refcount_inc(r: &ma->mca_refcnt);
1167 ma->mca_flags |= MAF_TIMER_RUNNING;
1168}
1169
1170/* mark EXCLUDE-mode sources */
1171static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1172 const struct in6_addr *srcs)
1173{
1174 struct ip6_sf_list *psf;
1175 int i, scount;
1176
1177 mc_assert_locked(pmc->idev);
1178
1179 scount = 0;
1180 for_each_psf_mclock(pmc, psf) {
1181 if (scount == nsrcs)
1182 break;
1183 for (i = 0; i < nsrcs; i++) {
1184 /* skip inactive filters */
1185 if (psf->sf_count[MCAST_INCLUDE] ||
1186 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1187 psf->sf_count[MCAST_EXCLUDE])
1188 break;
1189 if (ipv6_addr_equal(a1: &srcs[i], a2: &psf->sf_addr)) {
1190 scount++;
1191 break;
1192 }
1193 }
1194 }
1195 pmc->mca_flags &= ~MAF_GSQUERY;
1196 if (scount == nsrcs) /* all sources excluded */
1197 return false;
1198 return true;
1199}
1200
1201static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1202 const struct in6_addr *srcs)
1203{
1204 struct ip6_sf_list *psf;
1205 int i, scount;
1206
1207 mc_assert_locked(pmc->idev);
1208
1209 if (pmc->mca_sfmode == MCAST_EXCLUDE)
1210 return mld_xmarksources(pmc, nsrcs, srcs);
1211
1212 /* mark INCLUDE-mode sources */
1213
1214 scount = 0;
1215 for_each_psf_mclock(pmc, psf) {
1216 if (scount == nsrcs)
1217 break;
1218 for (i = 0; i < nsrcs; i++) {
1219 if (ipv6_addr_equal(a1: &srcs[i], a2: &psf->sf_addr)) {
1220 psf->sf_gsresp = 1;
1221 scount++;
1222 break;
1223 }
1224 }
1225 }
1226 if (!scount) {
1227 pmc->mca_flags &= ~MAF_GSQUERY;
1228 return false;
1229 }
1230 pmc->mca_flags |= MAF_GSQUERY;
1231 return true;
1232}
1233
1234static int mld_force_mld_version(const struct inet6_dev *idev)
1235{
1236 const struct net *net = dev_net(dev: idev->dev);
1237 int all_force;
1238
1239 all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version);
1240 /* Normally, both are 0 here. If enforcement to a particular is
1241 * being used, individual device enforcement will have a lower
1242 * precedence over 'all' device (.../conf/all/force_mld_version).
1243 */
1244 return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
1245}
1246
1247static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1248{
1249 return mld_force_mld_version(idev) == 2;
1250}
1251
1252static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1253{
1254 return mld_force_mld_version(idev) == 1;
1255}
1256
1257static bool mld_in_v1_mode(const struct inet6_dev *idev)
1258{
1259 if (mld_in_v2_mode_only(idev))
1260 return false;
1261 if (mld_in_v1_mode_only(idev))
1262 return true;
1263 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1264 return true;
1265
1266 return false;
1267}
1268
1269static void mld_set_v1_mode(struct inet6_dev *idev)
1270{
1271 /* RFC3810, relevant sections:
1272 * - 9.1. Robustness Variable
1273 * - 9.2. Query Interval
1274 * - 9.3. Query Response Interval
1275 * - 9.12. Older Version Querier Present Timeout
1276 */
1277 unsigned long switchback;
1278
1279 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1280
1281 idev->mc_v1_seen = jiffies + switchback;
1282}
1283
1284static void mld_update_qrv(struct inet6_dev *idev,
1285 const struct mld2_query *mlh2)
1286{
1287 /* RFC3810, relevant sections:
1288 * - 5.1.8. QRV (Querier's Robustness Variable)
1289 * - 9.1. Robustness Variable
1290 */
1291
1292 /* The value of the Robustness Variable MUST NOT be zero,
1293 * and SHOULD NOT be one. Catch this here if we ever run
1294 * into such a case in future.
1295 */
1296 const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
1297 WARN_ON(idev->mc_qrv == 0);
1298
1299 if (mlh2->mld2q_qrv > 0)
1300 idev->mc_qrv = mlh2->mld2q_qrv;
1301
1302 if (unlikely(idev->mc_qrv < min_qrv)) {
1303 net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1304 idev->mc_qrv, min_qrv);
1305 idev->mc_qrv = min_qrv;
1306 }
1307}
1308
1309static void mld_update_qi(struct inet6_dev *idev,
1310 const struct mld2_query *mlh2)
1311{
1312 /* RFC3810, relevant sections:
1313 * - 5.1.9. QQIC (Querier's Query Interval Code)
1314 * - 9.2. Query Interval
1315 * - 9.12. Older Version Querier Present Timeout
1316 * (the [Query Interval] in the last Query received)
1317 */
1318 unsigned long mc_qqi;
1319
1320 if (mlh2->mld2q_qqic < 128) {
1321 mc_qqi = mlh2->mld2q_qqic;
1322 } else {
1323 unsigned long mc_man, mc_exp;
1324
1325 mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1326 mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1327
1328 mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1329 }
1330
1331 idev->mc_qi = mc_qqi * HZ;
1332}
1333
1334static void mld_update_qri(struct inet6_dev *idev,
1335 const struct mld2_query *mlh2)
1336{
1337 /* RFC3810, relevant sections:
1338 * - 5.1.3. Maximum Response Code
1339 * - 9.3. Query Response Interval
1340 */
1341 idev->mc_qri = msecs_to_jiffies(m: mldv2_mrc(mlh2));
1342}
1343
1344static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1345 unsigned long *max_delay, bool v1_query)
1346{
1347 unsigned long mldv1_md;
1348
1349 /* Ignore v1 queries */
1350 if (mld_in_v2_mode_only(idev))
1351 return -EINVAL;
1352
1353 mldv1_md = ntohs(mld->mld_maxdelay);
1354
1355 /* When in MLDv1 fallback and a MLDv2 router start-up being
1356 * unaware of current MLDv1 operation, the MRC == MRD mapping
1357 * only works when the exponential algorithm is not being
1358 * used (as MLDv1 is unaware of such things).
1359 *
1360 * According to the RFC author, the MLDv2 implementations
1361 * he's aware of all use a MRC < 32768 on start up queries.
1362 *
1363 * Thus, should we *ever* encounter something else larger
1364 * than that, just assume the maximum possible within our
1365 * reach.
1366 */
1367 if (!v1_query)
1368 mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
1369
1370 *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1371
1372 /* MLDv1 router present: we need to go into v1 mode *only*
1373 * when an MLDv1 query is received as per section 9.12. of
1374 * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1375 * queries MUST be of exactly 24 octets.
1376 */
1377 if (v1_query)
1378 mld_set_v1_mode(idev);
1379
1380 /* cancel MLDv2 report work */
1381 mld_gq_stop_work(idev);
1382 /* cancel the interface change work */
1383 mld_ifc_stop_work(idev);
1384 /* clear deleted report items */
1385 mld_clear_delrec(idev);
1386
1387 return 0;
1388}
1389
1390static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1391 unsigned long *max_delay)
1392{
1393 *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1394
1395 mld_update_qrv(idev, mlh2: mld);
1396 mld_update_qi(idev, mlh2: mld);
1397 mld_update_qri(idev, mlh2: mld);
1398
1399 idev->mc_maxdelay = *max_delay;
1400
1401 return;
1402}
1403
1404/* called with rcu_read_lock() */
1405void igmp6_event_query(struct sk_buff *skb)
1406{
1407 struct inet6_dev *idev = __in6_dev_get(dev: skb->dev);
1408
1409 if (!idev || idev->dead)
1410 goto out;
1411
1412 spin_lock_bh(lock: &idev->mc_query_lock);
1413 if (skb_queue_len(list_: &idev->mc_query_queue) < MLD_MAX_SKBS) {
1414 __skb_queue_tail(list: &idev->mc_query_queue, newsk: skb);
1415 if (!mod_delayed_work(wq: mld_wq, dwork: &idev->mc_query_work, delay: 0))
1416 in6_dev_hold(idev);
1417 skb = NULL;
1418 }
1419 spin_unlock_bh(lock: &idev->mc_query_lock);
1420out:
1421 kfree_skb(skb);
1422}
1423
1424static void __mld_query_work(struct sk_buff *skb)
1425{
1426 struct mld2_query *mlh2 = NULL;
1427 const struct in6_addr *group;
1428 unsigned long max_delay;
1429 struct inet6_dev *idev;
1430 struct ifmcaddr6 *ma;
1431 struct mld_msg *mld;
1432 int group_type;
1433 int mark = 0;
1434 int len, err;
1435
1436 if (!pskb_may_pull(skb, len: sizeof(struct in6_addr)))
1437 goto kfree_skb;
1438
1439 /* compute payload length excluding extension headers */
1440 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1441 len -= skb_network_header_len(skb);
1442
1443 /* RFC3810 6.2
1444 * Upon reception of an MLD message that contains a Query, the node
1445 * checks if the source address of the message is a valid link-local
1446 * address, if the Hop Limit is set to 1, and if the Router Alert
1447 * option is present in the Hop-By-Hop Options header of the IPv6
1448 * packet. If any of these checks fails, the packet is dropped.
1449 */
1450 if (!(ipv6_addr_type(addr: &ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1451 ipv6_hdr(skb)->hop_limit != 1 ||
1452 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1453 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1454 goto kfree_skb;
1455
1456 idev = in6_dev_get(dev: skb->dev);
1457 if (!idev)
1458 goto kfree_skb;
1459
1460 mld = (struct mld_msg *)icmp6_hdr(skb);
1461 group = &mld->mld_mca;
1462 group_type = ipv6_addr_type(addr: group);
1463
1464 if (group_type != IPV6_ADDR_ANY &&
1465 !(group_type&IPV6_ADDR_MULTICAST))
1466 goto out;
1467
1468 if (len < MLD_V1_QUERY_LEN) {
1469 goto out;
1470 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1471 err = mld_process_v1(idev, mld, max_delay: &max_delay,
1472 v1_query: len == MLD_V1_QUERY_LEN);
1473 if (err < 0)
1474 goto out;
1475 } else if (len >= MLD_V2_QUERY_LEN_MIN) {
1476 int srcs_offset = sizeof(struct mld2_query) -
1477 sizeof(struct icmp6hdr);
1478
1479 if (!pskb_may_pull(skb, len: srcs_offset))
1480 goto out;
1481
1482 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1483
1484 mld_process_v2(idev, mld: mlh2, max_delay: &max_delay);
1485
1486 if (group_type == IPV6_ADDR_ANY) { /* general query */
1487 if (mlh2->mld2q_nsrcs)
1488 goto out; /* no sources allowed */
1489
1490 mld_gq_start_work(idev);
1491 goto out;
1492 }
1493 /* mark sources to include, if group & source-specific */
1494 if (mlh2->mld2q_nsrcs != 0) {
1495 if (!pskb_may_pull(skb, len: srcs_offset +
1496 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1497 goto out;
1498
1499 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1500 mark = 1;
1501 }
1502 } else {
1503 goto out;
1504 }
1505
1506 if (group_type == IPV6_ADDR_ANY) {
1507 for_each_mc_mclock(idev, ma) {
1508 igmp6_group_queried(ma, resptime: max_delay);
1509 }
1510 } else {
1511 for_each_mc_mclock(idev, ma) {
1512 if (!ipv6_addr_equal(a1: group, a2: &ma->mca_addr))
1513 continue;
1514 if (ma->mca_flags & MAF_TIMER_RUNNING) {
1515 /* gsquery <- gsquery && mark */
1516 if (!mark)
1517 ma->mca_flags &= ~MAF_GSQUERY;
1518 } else {
1519 /* gsquery <- mark */
1520 if (mark)
1521 ma->mca_flags |= MAF_GSQUERY;
1522 else
1523 ma->mca_flags &= ~MAF_GSQUERY;
1524 }
1525 if (!(ma->mca_flags & MAF_GSQUERY) ||
1526 mld_marksources(pmc: ma, ntohs(mlh2->mld2q_nsrcs), srcs: mlh2->mld2q_srcs))
1527 igmp6_group_queried(ma, resptime: max_delay);
1528 break;
1529 }
1530 }
1531
1532out:
1533 in6_dev_put(idev);
1534kfree_skb:
1535 consume_skb(skb);
1536}
1537
1538static void mld_query_work(struct work_struct *work)
1539{
1540 struct inet6_dev *idev = container_of(to_delayed_work(work),
1541 struct inet6_dev,
1542 mc_query_work);
1543 struct sk_buff_head q;
1544 struct sk_buff *skb;
1545 bool rework = false;
1546 int cnt = 0;
1547
1548 skb_queue_head_init(list: &q);
1549
1550 spin_lock_bh(lock: &idev->mc_query_lock);
1551 while ((skb = __skb_dequeue(list: &idev->mc_query_queue))) {
1552 __skb_queue_tail(list: &q, newsk: skb);
1553
1554 if (++cnt >= MLD_MAX_QUEUE) {
1555 rework = true;
1556 break;
1557 }
1558 }
1559 spin_unlock_bh(lock: &idev->mc_query_lock);
1560
1561 mutex_lock(lock: &idev->mc_lock);
1562 while ((skb = __skb_dequeue(list: &q)))
1563 __mld_query_work(skb);
1564 mutex_unlock(lock: &idev->mc_lock);
1565
1566 if (rework && queue_delayed_work(wq: mld_wq, dwork: &idev->mc_query_work, delay: 0))
1567 return;
1568
1569 in6_dev_put(idev);
1570}
1571
1572/* called with rcu_read_lock() */
1573void igmp6_event_report(struct sk_buff *skb)
1574{
1575 struct inet6_dev *idev = __in6_dev_get(dev: skb->dev);
1576
1577 if (!idev || idev->dead)
1578 goto out;
1579
1580 spin_lock_bh(lock: &idev->mc_report_lock);
1581 if (skb_queue_len(list_: &idev->mc_report_queue) < MLD_MAX_SKBS) {
1582 __skb_queue_tail(list: &idev->mc_report_queue, newsk: skb);
1583 if (!mod_delayed_work(wq: mld_wq, dwork: &idev->mc_report_work, delay: 0))
1584 in6_dev_hold(idev);
1585 skb = NULL;
1586 }
1587 spin_unlock_bh(lock: &idev->mc_report_lock);
1588out:
1589 kfree_skb(skb);
1590}
1591
1592static void __mld_report_work(struct sk_buff *skb)
1593{
1594 struct inet6_dev *idev;
1595 struct ifmcaddr6 *ma;
1596 struct mld_msg *mld;
1597 int addr_type;
1598
1599 /* Our own report looped back. Ignore it. */
1600 if (skb->pkt_type == PACKET_LOOPBACK)
1601 goto kfree_skb;
1602
1603 /* send our report if the MC router may not have heard this report */
1604 if (skb->pkt_type != PACKET_MULTICAST &&
1605 skb->pkt_type != PACKET_BROADCAST)
1606 goto kfree_skb;
1607
1608 if (!pskb_may_pull(skb, len: sizeof(*mld) - sizeof(struct icmp6hdr)))
1609 goto kfree_skb;
1610
1611 mld = (struct mld_msg *)icmp6_hdr(skb);
1612
1613 /* Drop reports with not link local source */
1614 addr_type = ipv6_addr_type(addr: &ipv6_hdr(skb)->saddr);
1615 if (addr_type != IPV6_ADDR_ANY &&
1616 !(addr_type&IPV6_ADDR_LINKLOCAL))
1617 goto kfree_skb;
1618
1619 idev = in6_dev_get(dev: skb->dev);
1620 if (!idev)
1621 goto kfree_skb;
1622
1623 /*
1624 * Cancel the work for this group
1625 */
1626
1627 for_each_mc_mclock(idev, ma) {
1628 if (ipv6_addr_equal(a1: &ma->mca_addr, a2: &mld->mld_mca)) {
1629 if (cancel_delayed_work(dwork: &ma->mca_work))
1630 refcount_dec(r: &ma->mca_refcnt);
1631 ma->mca_flags &= ~(MAF_LAST_REPORTER |
1632 MAF_TIMER_RUNNING);
1633 break;
1634 }
1635 }
1636
1637 in6_dev_put(idev);
1638kfree_skb:
1639 consume_skb(skb);
1640}
1641
1642static void mld_report_work(struct work_struct *work)
1643{
1644 struct inet6_dev *idev = container_of(to_delayed_work(work),
1645 struct inet6_dev,
1646 mc_report_work);
1647 struct sk_buff_head q;
1648 struct sk_buff *skb;
1649 bool rework = false;
1650 int cnt = 0;
1651
1652 skb_queue_head_init(list: &q);
1653 spin_lock_bh(lock: &idev->mc_report_lock);
1654 while ((skb = __skb_dequeue(list: &idev->mc_report_queue))) {
1655 __skb_queue_tail(list: &q, newsk: skb);
1656
1657 if (++cnt >= MLD_MAX_QUEUE) {
1658 rework = true;
1659 break;
1660 }
1661 }
1662 spin_unlock_bh(lock: &idev->mc_report_lock);
1663
1664 mutex_lock(lock: &idev->mc_lock);
1665 while ((skb = __skb_dequeue(list: &q)))
1666 __mld_report_work(skb);
1667 mutex_unlock(lock: &idev->mc_lock);
1668
1669 if (rework && queue_delayed_work(wq: mld_wq, dwork: &idev->mc_report_work, delay: 0))
1670 return;
1671
1672 in6_dev_put(idev);
1673}
1674
1675static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1676 int gdeleted, int sdeleted)
1677{
1678 switch (type) {
1679 case MLD2_MODE_IS_INCLUDE:
1680 case MLD2_MODE_IS_EXCLUDE:
1681 if (gdeleted || sdeleted)
1682 return false;
1683 if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1684 if (pmc->mca_sfmode == MCAST_INCLUDE)
1685 return true;
1686 /* don't include if this source is excluded
1687 * in all filters
1688 */
1689 if (psf->sf_count[MCAST_INCLUDE])
1690 return type == MLD2_MODE_IS_INCLUDE;
1691 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1692 psf->sf_count[MCAST_EXCLUDE];
1693 }
1694 return false;
1695 case MLD2_CHANGE_TO_INCLUDE:
1696 if (gdeleted || sdeleted)
1697 return false;
1698 return psf->sf_count[MCAST_INCLUDE] != 0;
1699 case MLD2_CHANGE_TO_EXCLUDE:
1700 if (gdeleted || sdeleted)
1701 return false;
1702 if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1703 psf->sf_count[MCAST_INCLUDE])
1704 return false;
1705 return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1706 psf->sf_count[MCAST_EXCLUDE];
1707 case MLD2_ALLOW_NEW_SOURCES:
1708 if (gdeleted || !psf->sf_crcount)
1709 return false;
1710 return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1711 case MLD2_BLOCK_OLD_SOURCES:
1712 if (pmc->mca_sfmode == MCAST_INCLUDE)
1713 return gdeleted || (psf->sf_crcount && sdeleted);
1714 return psf->sf_crcount && !gdeleted && !sdeleted;
1715 }
1716 return false;
1717}
1718
1719static int
1720mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1721{
1722 struct ip6_sf_list *psf;
1723 int scount = 0;
1724
1725 for_each_psf_mclock(pmc, psf) {
1726 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1727 continue;
1728 scount++;
1729 }
1730 return scount;
1731}
1732
1733static void ip6_mc_hdr(const struct sock *sk, struct sk_buff *skb,
1734 struct net_device *dev, const struct in6_addr *saddr,
1735 const struct in6_addr *daddr, int proto, int len)
1736{
1737 struct ipv6hdr *hdr;
1738
1739 skb->protocol = htons(ETH_P_IPV6);
1740 skb->dev = dev;
1741
1742 skb_reset_network_header(skb);
1743 skb_put(skb, len: sizeof(struct ipv6hdr));
1744 hdr = ipv6_hdr(skb);
1745
1746 ip6_flow_hdr(hdr, tclass: 0, flowlabel: 0);
1747
1748 hdr->payload_len = htons(len);
1749 hdr->nexthdr = proto;
1750 hdr->hop_limit = READ_ONCE(inet6_sk(sk)->hop_limit);
1751
1752 hdr->saddr = *saddr;
1753 hdr->daddr = *daddr;
1754}
1755
1756static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1757{
1758 u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
1759 2, 0, 0, IPV6_TLV_PADN, 0 };
1760 struct net_device *dev = idev->dev;
1761 int hlen = LL_RESERVED_SPACE(dev);
1762 int tlen = dev->needed_tailroom;
1763 const struct in6_addr *saddr;
1764 struct in6_addr addr_buf;
1765 struct mld2_report *pmr;
1766 struct sk_buff *skb;
1767 unsigned int size;
1768 struct sock *sk;
1769 struct net *net;
1770
1771 /* we assume size > sizeof(ra) here
1772 * Also try to not allocate high-order pages for big MTU
1773 */
1774 size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
1775 skb = alloc_skb(size, GFP_KERNEL);
1776 if (!skb)
1777 return NULL;
1778
1779 skb->priority = TC_PRIO_CONTROL;
1780 skb_reserve(skb, len: hlen);
1781 skb_tailroom_reserve(skb, mtu, needed_tailroom: tlen);
1782
1783 rcu_read_lock();
1784
1785 net = dev_net_rcu(dev);
1786 sk = net->ipv6.igmp_sk;
1787 skb_set_owner_w(skb, sk);
1788
1789 if (ipv6_get_lladdr(dev, addr: &addr_buf, IFA_F_TENTATIVE)) {
1790 /* <draft-ietf-magma-mld-source-05.txt>:
1791 * use unspecified address as the source address
1792 * when a valid link-local address is not available.
1793 */
1794 saddr = &in6addr_any;
1795 } else
1796 saddr = &addr_buf;
1797
1798 ip6_mc_hdr(sk, skb, dev, saddr, daddr: &mld2_all_mcr, NEXTHDR_HOP, len: 0);
1799
1800 rcu_read_unlock();
1801
1802 skb_put_data(skb, data: ra, len: sizeof(ra));
1803
1804 skb_set_transport_header(skb, offset: skb_tail_pointer(skb) - skb->data);
1805 skb_put(skb, len: sizeof(*pmr));
1806 pmr = (struct mld2_report *)skb_transport_header(skb);
1807 pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1808 pmr->mld2r_resv1 = 0;
1809 pmr->mld2r_cksum = 0;
1810 pmr->mld2r_resv2 = 0;
1811 pmr->mld2r_ngrec = 0;
1812 return skb;
1813}
1814
1815static void mld_sendpack(struct sk_buff *skb)
1816{
1817 struct ipv6hdr *pip6 = ipv6_hdr(skb);
1818 struct mld2_report *pmr =
1819 (struct mld2_report *)skb_transport_header(skb);
1820 int payload_len, mldlen;
1821 struct inet6_dev *idev;
1822 struct net *net = dev_net(dev: skb->dev);
1823 int err;
1824 struct flowi6 fl6;
1825 struct dst_entry *dst;
1826
1827 rcu_read_lock();
1828 idev = __in6_dev_get(dev: skb->dev);
1829 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
1830
1831 payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1832 sizeof(*pip6);
1833 mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1834 pip6->payload_len = htons(payload_len);
1835
1836 pmr->mld2r_cksum = csum_ipv6_magic(saddr: &pip6->saddr, daddr: &pip6->daddr, len: mldlen,
1837 IPPROTO_ICMPV6,
1838 sum: csum_partial(buff: skb_transport_header(skb),
1839 len: mldlen, sum: 0));
1840
1841 icmpv6_flow_init(sk: net->ipv6.igmp_sk, fl6: &fl6, ICMPV6_MLD2_REPORT,
1842 saddr: &ipv6_hdr(skb)->saddr, daddr: &ipv6_hdr(skb)->daddr,
1843 oif: skb->dev->ifindex);
1844 dst = icmp6_dst_alloc(dev: skb->dev, fl6: &fl6);
1845
1846 err = 0;
1847 if (IS_ERR(ptr: dst)) {
1848 err = PTR_ERR(ptr: dst);
1849 dst = NULL;
1850 }
1851 skb_dst_set(skb, dst);
1852 if (err)
1853 goto err_out;
1854
1855 err = NF_HOOK(pf: NFPROTO_IPV6, hook: NF_INET_LOCAL_OUT,
1856 net, sk: net->ipv6.igmp_sk, skb, NULL, out: skb->dev,
1857 okfn: dst_output);
1858out:
1859 if (!err) {
1860 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1861 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1862 } else {
1863 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1864 }
1865
1866 rcu_read_unlock();
1867 return;
1868
1869err_out:
1870 kfree_skb(skb);
1871 goto out;
1872}
1873
1874static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1875{
1876 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdeleted: gdel,sdeleted: sdel);
1877}
1878
1879static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1880 int type, struct mld2_grec **ppgr, unsigned int mtu)
1881{
1882 struct mld2_report *pmr;
1883 struct mld2_grec *pgr;
1884
1885 if (!skb) {
1886 skb = mld_newpack(idev: pmc->idev, mtu);
1887 if (!skb)
1888 return NULL;
1889 }
1890 pgr = skb_put(skb, len: sizeof(struct mld2_grec));
1891 pgr->grec_type = type;
1892 pgr->grec_auxwords = 0;
1893 pgr->grec_nsrcs = 0;
1894 pgr->grec_mca = pmc->mca_addr; /* structure copy */
1895 pmr = (struct mld2_report *)skb_transport_header(skb);
1896 pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1897 *ppgr = pgr;
1898 return skb;
1899}
1900
1901#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
1902
1903static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1904 int type, int gdeleted, int sdeleted,
1905 int crsend)
1906{
1907 struct ip6_sf_list *psf, *psf_prev, *psf_next;
1908 int scount, stotal, first, isquery, truncate;
1909 struct ip6_sf_list __rcu **psf_list;
1910 struct inet6_dev *idev = pmc->idev;
1911 struct net_device *dev = idev->dev;
1912 struct mld2_grec *pgr = NULL;
1913 struct mld2_report *pmr;
1914 unsigned int mtu;
1915
1916 mc_assert_locked(idev);
1917
1918 if (pmc->mca_flags & MAF_NOREPORT)
1919 return skb;
1920
1921 mtu = READ_ONCE(dev->mtu);
1922 if (mtu < IPV6_MIN_MTU)
1923 return skb;
1924
1925 isquery = type == MLD2_MODE_IS_INCLUDE ||
1926 type == MLD2_MODE_IS_EXCLUDE;
1927 truncate = type == MLD2_MODE_IS_EXCLUDE ||
1928 type == MLD2_CHANGE_TO_EXCLUDE;
1929
1930 stotal = scount = 0;
1931
1932 psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1933
1934 if (!rcu_access_pointer(*psf_list))
1935 goto empty_source;
1936
1937 pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1938
1939 /* EX and TO_EX get a fresh packet, if needed */
1940 if (truncate) {
1941 if (pmr && pmr->mld2r_ngrec &&
1942 AVAILABLE(skb) < grec_size(pmc, type, gdel: gdeleted, sdel: sdeleted)) {
1943 if (skb)
1944 mld_sendpack(skb);
1945 skb = mld_newpack(idev, mtu);
1946 }
1947 }
1948 first = 1;
1949 psf_prev = NULL;
1950 for (psf = mc_dereference(*psf_list, idev);
1951 psf;
1952 psf = psf_next) {
1953 struct in6_addr *psrc;
1954
1955 psf_next = mc_dereference(psf->sf_next, idev);
1956
1957 if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
1958 psf_prev = psf;
1959 continue;
1960 }
1961
1962 /* Based on RFC3810 6.1. Should not send source-list change
1963 * records when there is a filter mode change.
1964 */
1965 if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
1966 (!gdeleted && pmc->mca_crcount)) &&
1967 (type == MLD2_ALLOW_NEW_SOURCES ||
1968 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
1969 goto decrease_sf_crcount;
1970
1971 /* clear marks on query responses */
1972 if (isquery)
1973 psf->sf_gsresp = 0;
1974
1975 if (AVAILABLE(skb) < sizeof(*psrc) +
1976 first*sizeof(struct mld2_grec)) {
1977 if (truncate && !first)
1978 break; /* truncate these */
1979 if (pgr)
1980 pgr->grec_nsrcs = htons(scount);
1981 if (skb)
1982 mld_sendpack(skb);
1983 skb = mld_newpack(idev, mtu);
1984 first = 1;
1985 scount = 0;
1986 }
1987 if (first) {
1988 skb = add_grhead(skb, pmc, type, ppgr: &pgr, mtu);
1989 first = 0;
1990 }
1991 if (!skb)
1992 return NULL;
1993 psrc = skb_put(skb, len: sizeof(*psrc));
1994 *psrc = psf->sf_addr;
1995 scount++; stotal++;
1996 if ((type == MLD2_ALLOW_NEW_SOURCES ||
1997 type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
1998decrease_sf_crcount:
1999 psf->sf_crcount--;
2000 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
2001 if (psf_prev)
2002 rcu_assign_pointer(psf_prev->sf_next,
2003 mc_dereference(psf->sf_next, idev));
2004 else
2005 rcu_assign_pointer(*psf_list,
2006 mc_dereference(psf->sf_next, idev));
2007 kfree_rcu(psf, rcu);
2008 continue;
2009 }
2010 }
2011 psf_prev = psf;
2012 }
2013
2014empty_source:
2015 if (!stotal) {
2016 if (type == MLD2_ALLOW_NEW_SOURCES ||
2017 type == MLD2_BLOCK_OLD_SOURCES)
2018 return skb;
2019 if (pmc->mca_crcount || isquery || crsend) {
2020 /* make sure we have room for group header */
2021 if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
2022 mld_sendpack(skb);
2023 skb = NULL; /* add_grhead will get a new one */
2024 }
2025 skb = add_grhead(skb, pmc, type, ppgr: &pgr, mtu);
2026 }
2027 }
2028 if (pgr)
2029 pgr->grec_nsrcs = htons(scount);
2030
2031 if (isquery)
2032 pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
2033 return skb;
2034}
2035
2036static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2037{
2038 struct sk_buff *skb = NULL;
2039 int type;
2040
2041 mc_assert_locked(idev);
2042
2043 if (!pmc) {
2044 for_each_mc_mclock(idev, pmc) {
2045 if (pmc->mca_flags & MAF_NOREPORT)
2046 continue;
2047 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2048 type = MLD2_MODE_IS_EXCLUDE;
2049 else
2050 type = MLD2_MODE_IS_INCLUDE;
2051 skb = add_grec(skb, pmc, type, gdeleted: 0, sdeleted: 0, crsend: 0);
2052 }
2053 } else {
2054 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2055 type = MLD2_MODE_IS_EXCLUDE;
2056 else
2057 type = MLD2_MODE_IS_INCLUDE;
2058 skb = add_grec(skb, pmc, type, gdeleted: 0, sdeleted: 0, crsend: 0);
2059 }
2060 if (skb)
2061 mld_sendpack(skb);
2062}
2063
2064/* remove zero-count source records from a source filter list */
2065static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev)
2066{
2067 struct ip6_sf_list *psf_prev, *psf_next, *psf;
2068
2069 psf_prev = NULL;
2070 for (psf = mc_dereference(*ppsf, idev);
2071 psf;
2072 psf = psf_next) {
2073 psf_next = mc_dereference(psf->sf_next, idev);
2074 if (psf->sf_crcount == 0) {
2075 if (psf_prev)
2076 rcu_assign_pointer(psf_prev->sf_next,
2077 mc_dereference(psf->sf_next, idev));
2078 else
2079 rcu_assign_pointer(*ppsf,
2080 mc_dereference(psf->sf_next, idev));
2081 kfree_rcu(psf, rcu);
2082 } else {
2083 psf_prev = psf;
2084 }
2085 }
2086}
2087
2088static void mld_send_cr(struct inet6_dev *idev)
2089{
2090 struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
2091 struct sk_buff *skb = NULL;
2092 int type, dtype;
2093
2094 /* deleted MCA's */
2095 pmc_prev = NULL;
2096 for (pmc = mc_dereference(idev->mc_tomb, idev);
2097 pmc;
2098 pmc = pmc_next) {
2099 pmc_next = mc_dereference(pmc->next, idev);
2100 if (pmc->mca_sfmode == MCAST_INCLUDE) {
2101 type = MLD2_BLOCK_OLD_SOURCES;
2102 dtype = MLD2_BLOCK_OLD_SOURCES;
2103 skb = add_grec(skb, pmc, type, gdeleted: 1, sdeleted: 0, crsend: 0);
2104 skb = add_grec(skb, pmc, type: dtype, gdeleted: 1, sdeleted: 1, crsend: 0);
2105 }
2106 if (pmc->mca_crcount) {
2107 if (pmc->mca_sfmode == MCAST_EXCLUDE) {
2108 type = MLD2_CHANGE_TO_INCLUDE;
2109 skb = add_grec(skb, pmc, type, gdeleted: 1, sdeleted: 0, crsend: 0);
2110 }
2111 pmc->mca_crcount--;
2112 if (pmc->mca_crcount == 0) {
2113 mld_clear_zeros(ppsf: &pmc->mca_tomb, idev);
2114 mld_clear_zeros(ppsf: &pmc->mca_sources, idev);
2115 }
2116 }
2117 if (pmc->mca_crcount == 0 &&
2118 !rcu_access_pointer(pmc->mca_tomb) &&
2119 !rcu_access_pointer(pmc->mca_sources)) {
2120 if (pmc_prev)
2121 rcu_assign_pointer(pmc_prev->next, pmc_next);
2122 else
2123 rcu_assign_pointer(idev->mc_tomb, pmc_next);
2124 in6_dev_put(idev: pmc->idev);
2125 kfree_rcu(pmc, rcu);
2126 } else
2127 pmc_prev = pmc;
2128 }
2129
2130 /* change recs */
2131 for_each_mc_mclock(idev, pmc) {
2132 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2133 type = MLD2_BLOCK_OLD_SOURCES;
2134 dtype = MLD2_ALLOW_NEW_SOURCES;
2135 } else {
2136 type = MLD2_ALLOW_NEW_SOURCES;
2137 dtype = MLD2_BLOCK_OLD_SOURCES;
2138 }
2139 skb = add_grec(skb, pmc, type, gdeleted: 0, sdeleted: 0, crsend: 0);
2140 skb = add_grec(skb, pmc, type: dtype, gdeleted: 0, sdeleted: 1, crsend: 0); /* deleted sources */
2141
2142 /* filter mode changes */
2143 if (pmc->mca_crcount) {
2144 if (pmc->mca_sfmode == MCAST_EXCLUDE)
2145 type = MLD2_CHANGE_TO_EXCLUDE;
2146 else
2147 type = MLD2_CHANGE_TO_INCLUDE;
2148 skb = add_grec(skb, pmc, type, gdeleted: 0, sdeleted: 0, crsend: 0);
2149 pmc->mca_crcount--;
2150 }
2151 }
2152 if (!skb)
2153 return;
2154 (void) mld_sendpack(skb);
2155}
2156
2157static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
2158{
2159 const struct in6_addr *snd_addr, *saddr;
2160 int err, len, payload_len, full_len;
2161 struct in6_addr addr_buf;
2162 struct inet6_dev *idev;
2163 struct sk_buff *skb;
2164 struct mld_msg *hdr;
2165 int hlen = LL_RESERVED_SPACE(dev);
2166 int tlen = dev->needed_tailroom;
2167 u8 ra[8] = { IPPROTO_ICMPV6, 0,
2168 IPV6_TLV_ROUTERALERT, 2, 0, 0,
2169 IPV6_TLV_PADN, 0 };
2170 struct dst_entry *dst;
2171 struct flowi6 fl6;
2172 struct net *net;
2173 struct sock *sk;
2174
2175 if (type == ICMPV6_MGM_REDUCTION)
2176 snd_addr = &in6addr_linklocal_allrouters;
2177 else
2178 snd_addr = addr;
2179
2180 len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
2181 payload_len = len + sizeof(ra);
2182 full_len = sizeof(struct ipv6hdr) + payload_len;
2183
2184 skb = alloc_skb(size: hlen + tlen + full_len, GFP_KERNEL);
2185
2186 rcu_read_lock();
2187
2188 net = dev_net_rcu(dev);
2189 idev = __in6_dev_get(dev);
2190 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
2191 if (!skb) {
2192 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2193 rcu_read_unlock();
2194 return;
2195 }
2196 sk = net->ipv6.igmp_sk;
2197 skb_set_owner_w(skb, sk);
2198
2199 skb->priority = TC_PRIO_CONTROL;
2200 skb_reserve(skb, len: hlen);
2201
2202 if (ipv6_get_lladdr(dev, addr: &addr_buf, IFA_F_TENTATIVE)) {
2203 /* <draft-ietf-magma-mld-source-05.txt>:
2204 * use unspecified address as the source address
2205 * when a valid link-local address is not available.
2206 */
2207 saddr = &in6addr_any;
2208 } else
2209 saddr = &addr_buf;
2210
2211 ip6_mc_hdr(sk, skb, dev, saddr, daddr: snd_addr, NEXTHDR_HOP, len: payload_len);
2212
2213 skb_put_data(skb, data: ra, len: sizeof(ra));
2214
2215 hdr = skb_put_zero(skb, len: sizeof(struct mld_msg));
2216 hdr->mld_type = type;
2217 hdr->mld_mca = *addr;
2218
2219 hdr->mld_cksum = csum_ipv6_magic(saddr, daddr: snd_addr, len,
2220 IPPROTO_ICMPV6,
2221 sum: csum_partial(buff: hdr, len, sum: 0));
2222
2223 icmpv6_flow_init(sk, fl6: &fl6, type,
2224 saddr: &ipv6_hdr(skb)->saddr, daddr: &ipv6_hdr(skb)->daddr,
2225 oif: skb->dev->ifindex);
2226 dst = icmp6_dst_alloc(dev: skb->dev, fl6: &fl6);
2227 if (IS_ERR(ptr: dst)) {
2228 err = PTR_ERR(ptr: dst);
2229 goto err_out;
2230 }
2231
2232 skb_dst_set(skb, dst);
2233 err = NF_HOOK(pf: NFPROTO_IPV6, hook: NF_INET_LOCAL_OUT,
2234 net, sk, skb, NULL, out: skb->dev,
2235 okfn: dst_output);
2236out:
2237 if (!err) {
2238 ICMP6MSGOUT_INC_STATS(net, idev, type);
2239 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2240 } else
2241 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2242
2243 rcu_read_unlock();
2244 return;
2245
2246err_out:
2247 kfree_skb(skb);
2248 goto out;
2249}
2250
2251static void mld_send_initial_cr(struct inet6_dev *idev)
2252{
2253 struct ifmcaddr6 *pmc;
2254 struct sk_buff *skb;
2255 int type;
2256
2257 mc_assert_locked(idev);
2258
2259 if (mld_in_v1_mode(idev))
2260 return;
2261
2262 skb = NULL;
2263 for_each_mc_mclock(idev, pmc) {
2264 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2265 type = MLD2_CHANGE_TO_EXCLUDE;
2266 else
2267 type = MLD2_ALLOW_NEW_SOURCES;
2268 skb = add_grec(skb, pmc, type, gdeleted: 0, sdeleted: 0, crsend: 1);
2269 }
2270 if (skb)
2271 mld_sendpack(skb);
2272}
2273
2274void ipv6_mc_dad_complete(struct inet6_dev *idev)
2275{
2276 mutex_lock(lock: &idev->mc_lock);
2277 idev->mc_dad_count = idev->mc_qrv;
2278 if (idev->mc_dad_count) {
2279 mld_send_initial_cr(idev);
2280 idev->mc_dad_count--;
2281 if (idev->mc_dad_count)
2282 mld_dad_start_work(idev,
2283 delay: unsolicited_report_interval(idev));
2284 }
2285 mutex_unlock(lock: &idev->mc_lock);
2286}
2287
2288static void mld_dad_work(struct work_struct *work)
2289{
2290 struct inet6_dev *idev = container_of(to_delayed_work(work),
2291 struct inet6_dev,
2292 mc_dad_work);
2293 mutex_lock(lock: &idev->mc_lock);
2294 mld_send_initial_cr(idev);
2295 if (idev->mc_dad_count) {
2296 idev->mc_dad_count--;
2297 if (idev->mc_dad_count)
2298 mld_dad_start_work(idev,
2299 delay: unsolicited_report_interval(idev));
2300 }
2301 mutex_unlock(lock: &idev->mc_lock);
2302 in6_dev_put(idev);
2303}
2304
2305static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2306 const struct in6_addr *psfsrc)
2307{
2308 struct ip6_sf_list *psf, *psf_prev;
2309 int rv = 0;
2310
2311 mc_assert_locked(pmc->idev);
2312
2313 psf_prev = NULL;
2314 for_each_psf_mclock(pmc, psf) {
2315 if (ipv6_addr_equal(a1: &psf->sf_addr, a2: psfsrc))
2316 break;
2317 psf_prev = psf;
2318 }
2319 if (!psf || psf->sf_count[sfmode] == 0) {
2320 /* source filter not found, or count wrong => bug */
2321 return -ESRCH;
2322 }
2323 WRITE_ONCE(psf->sf_count[sfmode], psf->sf_count[sfmode] - 1);
2324 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2325 struct inet6_dev *idev = pmc->idev;
2326
2327 /* no more filters for this source */
2328 if (psf_prev)
2329 rcu_assign_pointer(psf_prev->sf_next,
2330 mc_dereference(psf->sf_next, idev));
2331 else
2332 rcu_assign_pointer(pmc->mca_sources,
2333 mc_dereference(psf->sf_next, idev));
2334
2335 if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2336 !mld_in_v1_mode(idev)) {
2337 psf->sf_crcount = idev->mc_qrv;
2338 rcu_assign_pointer(psf->sf_next,
2339 mc_dereference(pmc->mca_tomb, idev));
2340 rcu_assign_pointer(pmc->mca_tomb, psf);
2341 rv = 1;
2342 } else {
2343 kfree_rcu(psf, rcu);
2344 }
2345 }
2346 return rv;
2347}
2348
2349static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2350 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2351 int delta)
2352{
2353 struct ifmcaddr6 *pmc;
2354 int changerec = 0;
2355 int i, err;
2356
2357 if (!idev)
2358 return -ENODEV;
2359
2360 mc_assert_locked(idev);
2361
2362 for_each_mc_mclock(idev, pmc) {
2363 if (ipv6_addr_equal(a1: pmca, a2: &pmc->mca_addr))
2364 break;
2365 }
2366 if (!pmc)
2367 return -ESRCH;
2368
2369 sf_markstate(pmc);
2370 if (!delta) {
2371 if (!pmc->mca_sfcount[sfmode])
2372 return -EINVAL;
2373
2374 pmc->mca_sfcount[sfmode]--;
2375 }
2376 err = 0;
2377 for (i = 0; i < sfcount; i++) {
2378 int rv = ip6_mc_del1_src(pmc, sfmode, psfsrc: &psfsrc[i]);
2379
2380 changerec |= rv > 0;
2381 if (!err && rv < 0)
2382 err = rv;
2383 }
2384 if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2385 pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2386 pmc->mca_sfcount[MCAST_INCLUDE]) {
2387 struct ip6_sf_list *psf;
2388
2389 /* filter mode change */
2390 pmc->mca_sfmode = MCAST_INCLUDE;
2391 pmc->mca_crcount = idev->mc_qrv;
2392 idev->mc_ifc_count = pmc->mca_crcount;
2393 for_each_psf_mclock(pmc, psf)
2394 psf->sf_crcount = 0;
2395 mld_ifc_event(idev: pmc->idev);
2396 } else if (sf_setstate(pmc) || changerec) {
2397 mld_ifc_event(idev: pmc->idev);
2398 }
2399
2400 return err;
2401}
2402
2403/* Add multicast single-source filter to the interface list */
2404static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2405 const struct in6_addr *psfsrc)
2406{
2407 struct ip6_sf_list *psf, *psf_prev;
2408
2409 mc_assert_locked(pmc->idev);
2410
2411 psf_prev = NULL;
2412 for_each_psf_mclock(pmc, psf) {
2413 if (ipv6_addr_equal(a1: &psf->sf_addr, a2: psfsrc))
2414 break;
2415 psf_prev = psf;
2416 }
2417 if (!psf) {
2418 psf = kzalloc(sizeof(*psf), GFP_KERNEL);
2419 if (!psf)
2420 return -ENOBUFS;
2421
2422 psf->sf_addr = *psfsrc;
2423 if (psf_prev) {
2424 rcu_assign_pointer(psf_prev->sf_next, psf);
2425 } else {
2426 rcu_assign_pointer(pmc->mca_sources, psf);
2427 }
2428 }
2429 WRITE_ONCE(psf->sf_count[sfmode], psf->sf_count[sfmode] + 1);
2430 return 0;
2431}
2432
2433static void sf_markstate(struct ifmcaddr6 *pmc)
2434{
2435 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2436 struct ip6_sf_list *psf;
2437
2438 mc_assert_locked(pmc->idev);
2439
2440 for_each_psf_mclock(pmc, psf) {
2441 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2442 psf->sf_oldin = mca_xcount ==
2443 psf->sf_count[MCAST_EXCLUDE] &&
2444 !psf->sf_count[MCAST_INCLUDE];
2445 } else {
2446 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2447 }
2448 }
2449}
2450
2451static int sf_setstate(struct ifmcaddr6 *pmc)
2452{
2453 int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2454 struct ip6_sf_list *psf, *dpsf;
2455 int qrv = pmc->idev->mc_qrv;
2456 int new_in, rv;
2457
2458 mc_assert_locked(pmc->idev);
2459
2460 rv = 0;
2461 for_each_psf_mclock(pmc, psf) {
2462 if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2463 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2464 !psf->sf_count[MCAST_INCLUDE];
2465 } else
2466 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2467 if (new_in) {
2468 if (!psf->sf_oldin) {
2469 struct ip6_sf_list *prev = NULL;
2470
2471 for_each_psf_tomb(pmc, dpsf) {
2472 if (ipv6_addr_equal(a1: &dpsf->sf_addr,
2473 a2: &psf->sf_addr))
2474 break;
2475 prev = dpsf;
2476 }
2477 if (dpsf) {
2478 if (prev)
2479 rcu_assign_pointer(prev->sf_next,
2480 mc_dereference(dpsf->sf_next,
2481 pmc->idev));
2482 else
2483 rcu_assign_pointer(pmc->mca_tomb,
2484 mc_dereference(dpsf->sf_next,
2485 pmc->idev));
2486 kfree_rcu(dpsf, rcu);
2487 }
2488 psf->sf_crcount = qrv;
2489 rv++;
2490 }
2491 } else if (psf->sf_oldin) {
2492 psf->sf_crcount = 0;
2493 /*
2494 * add or update "delete" records if an active filter
2495 * is now inactive
2496 */
2497
2498 for_each_psf_tomb(pmc, dpsf)
2499 if (ipv6_addr_equal(a1: &dpsf->sf_addr,
2500 a2: &psf->sf_addr))
2501 break;
2502 if (!dpsf) {
2503 dpsf = kmalloc(sizeof(*dpsf), GFP_KERNEL);
2504 if (!dpsf)
2505 continue;
2506 *dpsf = *psf;
2507 rcu_assign_pointer(dpsf->sf_next,
2508 mc_dereference(pmc->mca_tomb, pmc->idev));
2509 rcu_assign_pointer(pmc->mca_tomb, dpsf);
2510 }
2511 dpsf->sf_crcount = qrv;
2512 rv++;
2513 }
2514 }
2515 return rv;
2516}
2517
2518/* Add multicast source filter list to the interface list */
2519static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2520 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2521 int delta)
2522{
2523 struct ifmcaddr6 *pmc;
2524 int isexclude;
2525 int i, err;
2526
2527 if (!idev)
2528 return -ENODEV;
2529
2530 mc_assert_locked(idev);
2531
2532 for_each_mc_mclock(idev, pmc) {
2533 if (ipv6_addr_equal(a1: pmca, a2: &pmc->mca_addr))
2534 break;
2535 }
2536 if (!pmc)
2537 return -ESRCH;
2538
2539 sf_markstate(pmc);
2540 isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2541 if (!delta)
2542 WRITE_ONCE(pmc->mca_sfcount[sfmode],
2543 pmc->mca_sfcount[sfmode] + 1);
2544 err = 0;
2545 for (i = 0; i < sfcount; i++) {
2546 err = ip6_mc_add1_src(pmc, sfmode, psfsrc: &psfsrc[i]);
2547 if (err)
2548 break;
2549 }
2550 if (err) {
2551 int j;
2552
2553 if (!delta)
2554 WRITE_ONCE(pmc->mca_sfcount[sfmode],
2555 pmc->mca_sfcount[sfmode] - 1);
2556 for (j = 0; j < i; j++)
2557 ip6_mc_del1_src(pmc, sfmode, psfsrc: &psfsrc[j]);
2558 } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2559 struct ip6_sf_list *psf;
2560
2561 /* filter mode change */
2562 if (pmc->mca_sfcount[MCAST_EXCLUDE])
2563 pmc->mca_sfmode = MCAST_EXCLUDE;
2564 else if (pmc->mca_sfcount[MCAST_INCLUDE])
2565 pmc->mca_sfmode = MCAST_INCLUDE;
2566 /* else no filters; keep old mode for reports */
2567
2568 pmc->mca_crcount = idev->mc_qrv;
2569 idev->mc_ifc_count = pmc->mca_crcount;
2570 for_each_psf_mclock(pmc, psf)
2571 psf->sf_crcount = 0;
2572 mld_ifc_event(idev);
2573 } else if (sf_setstate(pmc)) {
2574 mld_ifc_event(idev);
2575 }
2576 return err;
2577}
2578
2579static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2580{
2581 struct ip6_sf_list *psf, *nextpsf;
2582
2583 mc_assert_locked(pmc->idev);
2584
2585 for (psf = mc_dereference(pmc->mca_tomb, pmc->idev);
2586 psf;
2587 psf = nextpsf) {
2588 nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2589 kfree_rcu(psf, rcu);
2590 }
2591 RCU_INIT_POINTER(pmc->mca_tomb, NULL);
2592 for (psf = mc_dereference(pmc->mca_sources, pmc->idev);
2593 psf;
2594 psf = nextpsf) {
2595 nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2596 kfree_rcu(psf, rcu);
2597 }
2598 RCU_INIT_POINTER(pmc->mca_sources, NULL);
2599 pmc->mca_sfmode = MCAST_EXCLUDE;
2600 pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2601 /* Paired with the READ_ONCE() from ipv6_chk_mcast_addr() */
2602 WRITE_ONCE(pmc->mca_sfcount[MCAST_EXCLUDE], 1);
2603}
2604
2605static void igmp6_join_group(struct ifmcaddr6 *ma)
2606{
2607 unsigned long delay;
2608
2609 mc_assert_locked(ma->idev);
2610
2611 if (ma->mca_flags & MAF_NOREPORT)
2612 return;
2613
2614 igmp6_send(addr: &ma->mca_addr, dev: ma->idev->dev, ICMPV6_MGM_REPORT);
2615
2616 delay = get_random_u32_below(ceil: unsolicited_report_interval(idev: ma->idev));
2617
2618 if (cancel_delayed_work(dwork: &ma->mca_work)) {
2619 refcount_dec(r: &ma->mca_refcnt);
2620 delay = ma->mca_work.timer.expires - jiffies;
2621 }
2622
2623 if (!mod_delayed_work(wq: mld_wq, dwork: &ma->mca_work, delay))
2624 refcount_inc(r: &ma->mca_refcnt);
2625 ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2626}
2627
2628static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2629 struct inet6_dev *idev)
2630{
2631 struct ip6_sf_socklist *psl;
2632 int err;
2633
2634 psl = sock_dereference(iml->sflist, sk);
2635
2636 if (idev)
2637 mutex_lock(lock: &idev->mc_lock);
2638
2639 if (!psl) {
2640 /* any-source empty exclude case */
2641 err = ip6_mc_del_src(idev, pmca: &iml->addr, sfmode: iml->sfmode, sfcount: 0, NULL, delta: 0);
2642 } else {
2643 err = ip6_mc_del_src(idev, pmca: &iml->addr, sfmode: iml->sfmode,
2644 sfcount: psl->sl_count, psfsrc: psl->sl_addr, delta: 0);
2645 RCU_INIT_POINTER(iml->sflist, NULL);
2646 atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
2647 v: &sk->sk_omem_alloc);
2648 kfree_rcu(psl, rcu);
2649 }
2650
2651 if (idev)
2652 mutex_unlock(lock: &idev->mc_lock);
2653
2654 return err;
2655}
2656
2657static void igmp6_leave_group(struct ifmcaddr6 *ma)
2658{
2659 mc_assert_locked(ma->idev);
2660
2661 if (mld_in_v1_mode(idev: ma->idev)) {
2662 if (ma->mca_flags & MAF_LAST_REPORTER) {
2663 igmp6_send(addr: &ma->mca_addr, dev: ma->idev->dev,
2664 ICMPV6_MGM_REDUCTION);
2665 }
2666 } else {
2667 mld_add_delrec(idev: ma->idev, im: ma);
2668 mld_ifc_event(idev: ma->idev);
2669 }
2670}
2671
2672static void mld_gq_work(struct work_struct *work)
2673{
2674 struct inet6_dev *idev = container_of(to_delayed_work(work),
2675 struct inet6_dev,
2676 mc_gq_work);
2677
2678 mutex_lock(lock: &idev->mc_lock);
2679 mld_send_report(idev, NULL);
2680 idev->mc_gq_running = 0;
2681 mutex_unlock(lock: &idev->mc_lock);
2682
2683 in6_dev_put(idev);
2684}
2685
2686static void mld_ifc_work(struct work_struct *work)
2687{
2688 struct inet6_dev *idev = container_of(to_delayed_work(work),
2689 struct inet6_dev,
2690 mc_ifc_work);
2691
2692 mutex_lock(lock: &idev->mc_lock);
2693 mld_send_cr(idev);
2694
2695 if (idev->mc_ifc_count) {
2696 idev->mc_ifc_count--;
2697 if (idev->mc_ifc_count)
2698 mld_ifc_start_work(idev,
2699 delay: unsolicited_report_interval(idev));
2700 }
2701 mutex_unlock(lock: &idev->mc_lock);
2702 in6_dev_put(idev);
2703}
2704
2705static void mld_ifc_event(struct inet6_dev *idev)
2706{
2707 mc_assert_locked(idev);
2708
2709 if (mld_in_v1_mode(idev))
2710 return;
2711
2712 idev->mc_ifc_count = idev->mc_qrv;
2713 mld_ifc_start_work(idev, delay: 1);
2714}
2715
2716static void mld_mca_work(struct work_struct *work)
2717{
2718 struct ifmcaddr6 *ma = container_of(to_delayed_work(work),
2719 struct ifmcaddr6, mca_work);
2720
2721 mutex_lock(lock: &ma->idev->mc_lock);
2722 if (mld_in_v1_mode(idev: ma->idev))
2723 igmp6_send(addr: &ma->mca_addr, dev: ma->idev->dev, ICMPV6_MGM_REPORT);
2724 else
2725 mld_send_report(idev: ma->idev, pmc: ma);
2726 ma->mca_flags |= MAF_LAST_REPORTER;
2727 ma->mca_flags &= ~MAF_TIMER_RUNNING;
2728 mutex_unlock(lock: &ma->idev->mc_lock);
2729
2730 ma_put(mc: ma);
2731}
2732
2733/* Device changing type */
2734
2735void ipv6_mc_unmap(struct inet6_dev *idev)
2736{
2737 struct ifmcaddr6 *i;
2738
2739 /* Install multicast list, except for all-nodes (already installed) */
2740
2741 mutex_lock(lock: &idev->mc_lock);
2742 for_each_mc_mclock(idev, i)
2743 igmp6_group_dropped(mc: i);
2744 mutex_unlock(lock: &idev->mc_lock);
2745}
2746
2747void ipv6_mc_remap(struct inet6_dev *idev)
2748{
2749 ipv6_mc_up(idev);
2750}
2751
2752/* Device going down */
2753void ipv6_mc_down(struct inet6_dev *idev)
2754{
2755 struct ifmcaddr6 *i;
2756
2757 mutex_lock(lock: &idev->mc_lock);
2758 /* Withdraw multicast list */
2759 for_each_mc_mclock(idev, i)
2760 igmp6_group_dropped(mc: i);
2761 mutex_unlock(lock: &idev->mc_lock);
2762
2763 /* Should stop work after group drop. or we will
2764 * start work again in mld_ifc_event()
2765 */
2766 mld_query_stop_work(idev);
2767 mld_report_stop_work(idev);
2768
2769 mutex_lock(lock: &idev->mc_lock);
2770 mld_ifc_stop_work(idev);
2771 mld_gq_stop_work(idev);
2772 mutex_unlock(lock: &idev->mc_lock);
2773
2774 mld_dad_stop_work(idev);
2775}
2776
2777static void ipv6_mc_reset(struct inet6_dev *idev)
2778{
2779 idev->mc_qrv = sysctl_mld_qrv;
2780 idev->mc_qi = MLD_QI_DEFAULT;
2781 idev->mc_qri = MLD_QRI_DEFAULT;
2782 idev->mc_v1_seen = 0;
2783 idev->mc_maxdelay = unsolicited_report_interval(idev);
2784}
2785
2786/* Device going up */
2787
2788void ipv6_mc_up(struct inet6_dev *idev)
2789{
2790 struct ifmcaddr6 *i;
2791
2792 /* Install multicast list, except for all-nodes (already installed) */
2793
2794 ipv6_mc_reset(idev);
2795 mutex_lock(lock: &idev->mc_lock);
2796 for_each_mc_mclock(idev, i) {
2797 mld_del_delrec(idev, im: i);
2798 igmp6_group_added(mc: i);
2799 }
2800 mutex_unlock(lock: &idev->mc_lock);
2801}
2802
2803/* IPv6 device initialization. */
2804
2805void ipv6_mc_init_dev(struct inet6_dev *idev)
2806{
2807 idev->mc_gq_running = 0;
2808 INIT_DELAYED_WORK(&idev->mc_gq_work, mld_gq_work);
2809 RCU_INIT_POINTER(idev->mc_tomb, NULL);
2810 idev->mc_ifc_count = 0;
2811 INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work);
2812 INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work);
2813 INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work);
2814 INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work);
2815 skb_queue_head_init(list: &idev->mc_query_queue);
2816 skb_queue_head_init(list: &idev->mc_report_queue);
2817 spin_lock_init(&idev->mc_query_lock);
2818 spin_lock_init(&idev->mc_report_lock);
2819 mutex_init(&idev->mc_lock);
2820 ipv6_mc_reset(idev);
2821}
2822
2823/*
2824 * Device is about to be destroyed: clean up.
2825 */
2826
2827void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2828{
2829 struct ifmcaddr6 *i;
2830
2831 /* Deactivate works */
2832 ipv6_mc_down(idev);
2833 mutex_lock(lock: &idev->mc_lock);
2834 mld_clear_delrec(idev);
2835 mutex_unlock(lock: &idev->mc_lock);
2836 mld_clear_query(idev);
2837 mld_clear_report(idev);
2838
2839 /* Delete all-nodes address. */
2840 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
2841 * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2842 * fail.
2843 */
2844 __ipv6_dev_mc_dec(idev, addr: &in6addr_linklocal_allnodes);
2845
2846 if (idev->cnf.forwarding)
2847 __ipv6_dev_mc_dec(idev, addr: &in6addr_linklocal_allrouters);
2848
2849 mutex_lock(lock: &idev->mc_lock);
2850 while ((i = mc_dereference(idev->mc_list, idev))) {
2851 rcu_assign_pointer(idev->mc_list, mc_dereference(i->next, idev));
2852
2853 ip6_mc_clear_src(pmc: i);
2854 ma_put(mc: i);
2855 }
2856 mutex_unlock(lock: &idev->mc_lock);
2857}
2858
2859static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
2860{
2861 struct ifmcaddr6 *pmc;
2862
2863 mutex_lock(lock: &idev->mc_lock);
2864 if (mld_in_v1_mode(idev)) {
2865 for_each_mc_mclock(idev, pmc)
2866 igmp6_join_group(ma: pmc);
2867 } else {
2868 mld_send_report(idev, NULL);
2869 }
2870 mutex_unlock(lock: &idev->mc_lock);
2871}
2872
2873static int ipv6_mc_netdev_event(struct notifier_block *this,
2874 unsigned long event,
2875 void *ptr)
2876{
2877 struct net_device *dev = netdev_notifier_info_to_dev(info: ptr);
2878 struct inet6_dev *idev = __in6_dev_get(dev);
2879
2880 switch (event) {
2881 case NETDEV_RESEND_IGMP:
2882 if (idev)
2883 ipv6_mc_rejoin_groups(idev);
2884 break;
2885 default:
2886 break;
2887 }
2888
2889 return NOTIFY_DONE;
2890}
2891
2892static struct notifier_block igmp6_netdev_notifier = {
2893 .notifier_call = ipv6_mc_netdev_event,
2894};
2895
2896#ifdef CONFIG_PROC_FS
2897struct igmp6_mc_iter_state {
2898 struct seq_net_private p;
2899 struct net_device *dev;
2900 struct inet6_dev *idev;
2901};
2902
2903#define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
2904
2905static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2906{
2907 struct ifmcaddr6 *im = NULL;
2908 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2909 struct net *net = seq_file_net(seq);
2910
2911 state->idev = NULL;
2912 for_each_netdev_rcu(net, state->dev) {
2913 struct inet6_dev *idev;
2914 idev = __in6_dev_get(dev: state->dev);
2915 if (!idev)
2916 continue;
2917
2918 im = rcu_dereference(idev->mc_list);
2919 if (im) {
2920 state->idev = idev;
2921 break;
2922 }
2923 }
2924 return im;
2925}
2926
2927static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2928{
2929 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2930
2931 im = rcu_dereference(im->next);
2932 while (!im) {
2933 state->dev = next_net_device_rcu(dev: state->dev);
2934 if (!state->dev) {
2935 state->idev = NULL;
2936 break;
2937 }
2938 state->idev = __in6_dev_get(dev: state->dev);
2939 if (!state->idev)
2940 continue;
2941 im = rcu_dereference(state->idev->mc_list);
2942 }
2943 return im;
2944}
2945
2946static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2947{
2948 struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2949 if (im)
2950 while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2951 --pos;
2952 return pos ? NULL : im;
2953}
2954
2955static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2956 __acquires(RCU)
2957{
2958 rcu_read_lock();
2959 return igmp6_mc_get_idx(seq, pos: *pos);
2960}
2961
2962static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2963{
2964 struct ifmcaddr6 *im = igmp6_mc_get_next(seq, im: v);
2965
2966 ++*pos;
2967 return im;
2968}
2969
2970static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2971 __releases(RCU)
2972{
2973 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2974
2975 if (likely(state->idev))
2976 state->idev = NULL;
2977 state->dev = NULL;
2978 rcu_read_unlock();
2979}
2980
2981static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2982{
2983 struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2984 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2985
2986 seq_printf(m: seq,
2987 fmt: "%-4d %-15s %pi6 %5d %08X %ld\n",
2988 state->dev->ifindex, state->dev->name,
2989 &im->mca_addr,
2990 im->mca_users, im->mca_flags,
2991 (im->mca_flags & MAF_TIMER_RUNNING) ?
2992 jiffies_to_clock_t(x: im->mca_work.timer.expires - jiffies) : 0);
2993 return 0;
2994}
2995
2996static const struct seq_operations igmp6_mc_seq_ops = {
2997 .start = igmp6_mc_seq_start,
2998 .next = igmp6_mc_seq_next,
2999 .stop = igmp6_mc_seq_stop,
3000 .show = igmp6_mc_seq_show,
3001};
3002
3003struct igmp6_mcf_iter_state {
3004 struct seq_net_private p;
3005 struct net_device *dev;
3006 struct inet6_dev *idev;
3007 struct ifmcaddr6 *im;
3008};
3009
3010#define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
3011
3012static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
3013{
3014 struct ip6_sf_list *psf = NULL;
3015 struct ifmcaddr6 *im = NULL;
3016 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3017 struct net *net = seq_file_net(seq);
3018
3019 state->idev = NULL;
3020 state->im = NULL;
3021 for_each_netdev_rcu(net, state->dev) {
3022 struct inet6_dev *idev;
3023 idev = __in6_dev_get(dev: state->dev);
3024 if (unlikely(idev == NULL))
3025 continue;
3026
3027 im = rcu_dereference(idev->mc_list);
3028 if (likely(im)) {
3029 psf = rcu_dereference(im->mca_sources);
3030 if (likely(psf)) {
3031 state->im = im;
3032 state->idev = idev;
3033 break;
3034 }
3035 }
3036 }
3037 return psf;
3038}
3039
3040static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
3041{
3042 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3043
3044 psf = rcu_dereference(psf->sf_next);
3045 while (!psf) {
3046 state->im = rcu_dereference(state->im->next);
3047 while (!state->im) {
3048 state->dev = next_net_device_rcu(dev: state->dev);
3049 if (!state->dev) {
3050 state->idev = NULL;
3051 goto out;
3052 }
3053 state->idev = __in6_dev_get(dev: state->dev);
3054 if (!state->idev)
3055 continue;
3056 state->im = rcu_dereference(state->idev->mc_list);
3057 }
3058 psf = rcu_dereference(state->im->mca_sources);
3059 }
3060out:
3061 return psf;
3062}
3063
3064static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
3065{
3066 struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
3067 if (psf)
3068 while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
3069 --pos;
3070 return pos ? NULL : psf;
3071}
3072
3073static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
3074 __acquires(RCU)
3075{
3076 rcu_read_lock();
3077 return *pos ? igmp6_mcf_get_idx(seq, pos: *pos - 1) : SEQ_START_TOKEN;
3078}
3079
3080static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3081{
3082 struct ip6_sf_list *psf;
3083 if (v == SEQ_START_TOKEN)
3084 psf = igmp6_mcf_get_first(seq);
3085 else
3086 psf = igmp6_mcf_get_next(seq, psf: v);
3087 ++*pos;
3088 return psf;
3089}
3090
3091static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
3092 __releases(RCU)
3093{
3094 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3095
3096 if (likely(state->im))
3097 state->im = NULL;
3098 if (likely(state->idev))
3099 state->idev = NULL;
3100
3101 state->dev = NULL;
3102 rcu_read_unlock();
3103}
3104
3105static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
3106{
3107 struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
3108 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3109
3110 if (v == SEQ_START_TOKEN) {
3111 seq_puts(m: seq, s: "Idx Device Multicast Address Source Address INC EXC\n");
3112 } else {
3113 seq_printf(m: seq,
3114 fmt: "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
3115 state->dev->ifindex, state->dev->name,
3116 &state->im->mca_addr,
3117 &psf->sf_addr,
3118 READ_ONCE(psf->sf_count[MCAST_INCLUDE]),
3119 READ_ONCE(psf->sf_count[MCAST_EXCLUDE]));
3120 }
3121 return 0;
3122}
3123
3124static const struct seq_operations igmp6_mcf_seq_ops = {
3125 .start = igmp6_mcf_seq_start,
3126 .next = igmp6_mcf_seq_next,
3127 .stop = igmp6_mcf_seq_stop,
3128 .show = igmp6_mcf_seq_show,
3129};
3130
3131static int __net_init igmp6_proc_init(struct net *net)
3132{
3133 int err;
3134
3135 err = -ENOMEM;
3136 if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops,
3137 sizeof(struct igmp6_mc_iter_state)))
3138 goto out;
3139 if (!proc_create_net("mcfilter6", 0444, net->proc_net,
3140 &igmp6_mcf_seq_ops,
3141 sizeof(struct igmp6_mcf_iter_state)))
3142 goto out_proc_net_igmp6;
3143
3144 err = 0;
3145out:
3146 return err;
3147
3148out_proc_net_igmp6:
3149 remove_proc_entry("igmp6", net->proc_net);
3150 goto out;
3151}
3152
3153static void __net_exit igmp6_proc_exit(struct net *net)
3154{
3155 remove_proc_entry("mcfilter6", net->proc_net);
3156 remove_proc_entry("igmp6", net->proc_net);
3157}
3158#else
3159static inline int igmp6_proc_init(struct net *net)
3160{
3161 return 0;
3162}
3163static inline void igmp6_proc_exit(struct net *net)
3164{
3165}
3166#endif
3167
3168static int __net_init igmp6_net_init(struct net *net)
3169{
3170 int err;
3171
3172 err = inet_ctl_sock_create(sk: &net->ipv6.igmp_sk, PF_INET6,
3173 type: SOCK_RAW, IPPROTO_ICMPV6, net);
3174 if (err < 0) {
3175 pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
3176 err);
3177 goto out;
3178 }
3179
3180 inet6_sk(sk: net->ipv6.igmp_sk)->hop_limit = 1;
3181 net->ipv6.igmp_sk->sk_allocation = GFP_KERNEL;
3182
3183 err = inet_ctl_sock_create(sk: &net->ipv6.mc_autojoin_sk, PF_INET6,
3184 type: SOCK_RAW, IPPROTO_ICMPV6, net);
3185 if (err < 0) {
3186 pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
3187 err);
3188 goto out_sock_create;
3189 }
3190
3191 err = igmp6_proc_init(net);
3192 if (err)
3193 goto out_sock_create_autojoin;
3194
3195 return 0;
3196
3197out_sock_create_autojoin:
3198 inet_ctl_sock_destroy(sk: net->ipv6.mc_autojoin_sk);
3199out_sock_create:
3200 inet_ctl_sock_destroy(sk: net->ipv6.igmp_sk);
3201out:
3202 return err;
3203}
3204
3205static void __net_exit igmp6_net_exit(struct net *net)
3206{
3207 inet_ctl_sock_destroy(sk: net->ipv6.igmp_sk);
3208 inet_ctl_sock_destroy(sk: net->ipv6.mc_autojoin_sk);
3209 igmp6_proc_exit(net);
3210}
3211
3212static struct pernet_operations igmp6_net_ops = {
3213 .init = igmp6_net_init,
3214 .exit = igmp6_net_exit,
3215};
3216
3217int __init igmp6_init(void)
3218{
3219 int err;
3220
3221 err = register_pernet_subsys(&igmp6_net_ops);
3222 if (err)
3223 return err;
3224
3225 mld_wq = create_workqueue("mld");
3226 if (!mld_wq) {
3227 unregister_pernet_subsys(&igmp6_net_ops);
3228 return -ENOMEM;
3229 }
3230
3231 return err;
3232}
3233
3234int __init igmp6_late_init(void)
3235{
3236 return register_netdevice_notifier(nb: &igmp6_netdev_notifier);
3237}
3238
3239void igmp6_cleanup(void)
3240{
3241 unregister_pernet_subsys(&igmp6_net_ops);
3242 destroy_workqueue(wq: mld_wq);
3243}
3244
3245void igmp6_late_cleanup(void)
3246{
3247 unregister_netdevice_notifier(nb: &igmp6_netdev_notifier);
3248}
3249