1/* Netfilter messages via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>,
5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2005-2017 by Pablo Neira Ayuso <pablo@netfilter.org>
7 *
8 * Initial netfilter messages via netlink development funded and
9 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
10 *
11 * Further development of this code funded by Astaro AG (http://www.astaro.com)
12 *
13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License, incorporated herein by reference.
15 */
16
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/socket.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/sockios.h>
23#include <linux/net.h>
24#include <linux/skbuff.h>
25#include <linux/uaccess.h>
26#include <net/sock.h>
27#include <linux/init.h>
28#include <linux/sched/signal.h>
29
30#include <net/netlink.h>
31#include <net/netns/generic.h>
32#include <linux/netfilter.h>
33#include <linux/netfilter/nfnetlink.h>
34
35MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
37MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
38MODULE_DESCRIPTION("Netfilter messages via netlink socket");
39
40#define nfnl_dereference_protected(id) \
41 rcu_dereference_protected(table[(id)].subsys, \
42 lockdep_nfnl_is_held((id)))
43
44#define NFNL_MAX_ATTR_COUNT 32
45
46static unsigned int nfnetlink_pernet_id __read_mostly;
47
48#ifdef CONFIG_NF_CONNTRACK_EVENTS
49static DEFINE_SPINLOCK(nfnl_grp_active_lock);
50#endif
51
52struct nfnl_net {
53 struct sock *nfnl;
54};
55
56static struct {
57 struct mutex mutex;
58 const struct nfnetlink_subsystem __rcu *subsys;
59} table[NFNL_SUBSYS_COUNT];
60
61static struct lock_class_key nfnl_lockdep_keys[NFNL_SUBSYS_COUNT];
62
63static const char *const nfnl_lockdep_names[NFNL_SUBSYS_COUNT] = {
64 [NFNL_SUBSYS_NONE] = "nfnl_subsys_none",
65 [NFNL_SUBSYS_CTNETLINK] = "nfnl_subsys_ctnetlink",
66 [NFNL_SUBSYS_CTNETLINK_EXP] = "nfnl_subsys_ctnetlink_exp",
67 [NFNL_SUBSYS_QUEUE] = "nfnl_subsys_queue",
68 [NFNL_SUBSYS_ULOG] = "nfnl_subsys_ulog",
69 [NFNL_SUBSYS_OSF] = "nfnl_subsys_osf",
70 [NFNL_SUBSYS_IPSET] = "nfnl_subsys_ipset",
71 [NFNL_SUBSYS_ACCT] = "nfnl_subsys_acct",
72 [NFNL_SUBSYS_CTNETLINK_TIMEOUT] = "nfnl_subsys_cttimeout",
73 [NFNL_SUBSYS_CTHELPER] = "nfnl_subsys_cthelper",
74 [NFNL_SUBSYS_NFTABLES] = "nfnl_subsys_nftables",
75 [NFNL_SUBSYS_NFT_COMPAT] = "nfnl_subsys_nftcompat",
76 [NFNL_SUBSYS_HOOK] = "nfnl_subsys_hook",
77};
78
79static const int nfnl_group2type[NFNLGRP_MAX+1] = {
80 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
81 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK,
82 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK,
83 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP,
84 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP,
85 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
86 [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES,
87 [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT,
88 [NFNLGRP_NFTRACE] = NFNL_SUBSYS_NFTABLES,
89};
90
91static struct nfnl_net *nfnl_pernet(struct net *net)
92{
93 return net_generic(net, id: nfnetlink_pernet_id);
94}
95
96void nfnl_lock(__u8 subsys_id)
97{
98 mutex_lock(lock: &table[subsys_id].mutex);
99}
100EXPORT_SYMBOL_GPL(nfnl_lock);
101
102void nfnl_unlock(__u8 subsys_id)
103{
104 mutex_unlock(lock: &table[subsys_id].mutex);
105}
106EXPORT_SYMBOL_GPL(nfnl_unlock);
107
108#ifdef CONFIG_PROVE_LOCKING
109bool lockdep_nfnl_is_held(u8 subsys_id)
110{
111 return lockdep_is_held(&table[subsys_id].mutex);
112}
113EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held);
114#endif
115
116int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
117{
118 u8 cb_id;
119
120 /* Sanity-check attr_count size to avoid stack buffer overflow. */
121 for (cb_id = 0; cb_id < n->cb_count; cb_id++)
122 if (WARN_ON(n->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT))
123 return -EINVAL;
124
125 nfnl_lock(n->subsys_id);
126 if (table[n->subsys_id].subsys) {
127 nfnl_unlock(n->subsys_id);
128 return -EBUSY;
129 }
130 rcu_assign_pointer(table[n->subsys_id].subsys, n);
131 nfnl_unlock(n->subsys_id);
132
133 return 0;
134}
135EXPORT_SYMBOL_GPL(nfnetlink_subsys_register);
136
137int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
138{
139 nfnl_lock(n->subsys_id);
140 table[n->subsys_id].subsys = NULL;
141 nfnl_unlock(n->subsys_id);
142 synchronize_rcu();
143 return 0;
144}
145EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
146
147static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type)
148{
149 u8 subsys_id = NFNL_SUBSYS_ID(type);
150
151 if (subsys_id >= NFNL_SUBSYS_COUNT)
152 return NULL;
153
154 return rcu_dereference(table[subsys_id].subsys);
155}
156
157static inline const struct nfnl_callback *
158nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss)
159{
160 u8 cb_id = NFNL_MSG_TYPE(type);
161
162 if (cb_id >= ss->cb_count)
163 return NULL;
164
165 return &ss->cb[cb_id];
166}
167
168int nfnetlink_has_listeners(struct net *net, unsigned int group)
169{
170 struct nfnl_net *nfnlnet = nfnl_pernet(net);
171
172 return netlink_has_listeners(sk: nfnlnet->nfnl, group);
173}
174EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
175
176int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
177 unsigned int group, int echo, gfp_t flags)
178{
179 struct nfnl_net *nfnlnet = nfnl_pernet(net);
180
181 return nlmsg_notify(sk: nfnlnet->nfnl, skb, portid, group, report: echo, flags);
182}
183EXPORT_SYMBOL_GPL(nfnetlink_send);
184
185int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
186{
187 struct nfnl_net *nfnlnet = nfnl_pernet(net);
188
189 return netlink_set_err(ssk: nfnlnet->nfnl, portid, group, code: error);
190}
191EXPORT_SYMBOL_GPL(nfnetlink_set_err);
192
193int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid)
194{
195 struct nfnl_net *nfnlnet = nfnl_pernet(net);
196 int err;
197
198 err = nlmsg_unicast(sk: nfnlnet->nfnl, skb, portid);
199 if (err == -EAGAIN)
200 err = -ENOBUFS;
201
202 return err;
203}
204EXPORT_SYMBOL_GPL(nfnetlink_unicast);
205
206void nfnetlink_broadcast(struct net *net, struct sk_buff *skb, __u32 portid,
207 __u32 group, gfp_t allocation)
208{
209 struct nfnl_net *nfnlnet = nfnl_pernet(net);
210
211 netlink_broadcast(ssk: nfnlnet->nfnl, skb, portid, group, allocation);
212}
213EXPORT_SYMBOL_GPL(nfnetlink_broadcast);
214
215/* Process one complete nfnetlink message. */
216static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
217 struct netlink_ext_ack *extack)
218{
219 struct net *net = sock_net(sk: skb->sk);
220 const struct nfnl_callback *nc;
221 const struct nfnetlink_subsystem *ss;
222 int type, err;
223
224 /* All the messages must at least contain nfgenmsg */
225 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
226 return 0;
227
228 type = nlh->nlmsg_type;
229replay:
230 rcu_read_lock();
231
232 ss = nfnetlink_get_subsys(type);
233 if (!ss) {
234#ifdef CONFIG_MODULES
235 rcu_read_unlock();
236 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
237 rcu_read_lock();
238 ss = nfnetlink_get_subsys(type);
239 if (!ss)
240#endif
241 {
242 rcu_read_unlock();
243 return -EINVAL;
244 }
245 }
246
247 nc = nfnetlink_find_client(type, ss);
248 if (!nc) {
249 rcu_read_unlock();
250 return -EINVAL;
251 }
252
253 {
254 int min_len = nlmsg_total_size(payload: sizeof(struct nfgenmsg));
255 struct nfnl_net *nfnlnet = nfnl_pernet(net);
256 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
257 struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1];
258 struct nlattr *attr = (void *)nlh + min_len;
259 int attrlen = nlh->nlmsg_len - min_len;
260 __u8 subsys_id = NFNL_SUBSYS_ID(type);
261 struct nfnl_info info = {
262 .net = net,
263 .sk = nfnlnet->nfnl,
264 .nlh = nlh,
265 .nfmsg = nlmsg_data(nlh),
266 .extack = extack,
267 };
268
269 /* Sanity-check NFNL_MAX_ATTR_COUNT */
270 if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) {
271 rcu_read_unlock();
272 return -ENOMEM;
273 }
274
275 err = nla_parse_deprecated(tb: cda, maxtype: ss->cb[cb_id].attr_count,
276 head: attr, len: attrlen,
277 policy: ss->cb[cb_id].policy, extack);
278 if (err < 0) {
279 rcu_read_unlock();
280 return err;
281 }
282
283 if (!nc->call) {
284 rcu_read_unlock();
285 return -EINVAL;
286 }
287
288 switch (nc->type) {
289 case NFNL_CB_RCU:
290 err = nc->call(skb, &info, (const struct nlattr **)cda);
291 rcu_read_unlock();
292 break;
293 case NFNL_CB_MUTEX:
294 rcu_read_unlock();
295 nfnl_lock(subsys_id);
296 if (nfnl_dereference_protected(subsys_id) != ss ||
297 nfnetlink_find_client(type, ss) != nc) {
298 nfnl_unlock(subsys_id);
299 err = -EAGAIN;
300 break;
301 }
302 err = nc->call(skb, &info, (const struct nlattr **)cda);
303 nfnl_unlock(subsys_id);
304 break;
305 default:
306 rcu_read_unlock();
307 err = -EINVAL;
308 break;
309 }
310 if (err == -EAGAIN)
311 goto replay;
312 return err;
313 }
314}
315
316struct nfnl_err {
317 struct list_head head;
318 struct nlmsghdr *nlh;
319 int err;
320 struct netlink_ext_ack extack;
321};
322
323static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err,
324 const struct netlink_ext_ack *extack)
325{
326 struct nfnl_err *nfnl_err;
327
328 nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL);
329 if (nfnl_err == NULL)
330 return -ENOMEM;
331
332 nfnl_err->nlh = nlh;
333 nfnl_err->err = err;
334 nfnl_err->extack = *extack;
335 list_add_tail(new: &nfnl_err->head, head: list);
336
337 return 0;
338}
339
340static void nfnl_err_del(struct nfnl_err *nfnl_err)
341{
342 list_del(entry: &nfnl_err->head);
343 kfree(objp: nfnl_err);
344}
345
346static void nfnl_err_reset(struct list_head *err_list)
347{
348 struct nfnl_err *nfnl_err, *next;
349
350 list_for_each_entry_safe(nfnl_err, next, err_list, head)
351 nfnl_err_del(nfnl_err);
352}
353
354static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
355{
356 struct nfnl_err *nfnl_err, *next;
357
358 list_for_each_entry_safe(nfnl_err, next, err_list, head) {
359 netlink_ack(in_skb: skb, nlh: nfnl_err->nlh, err: nfnl_err->err,
360 extack: &nfnl_err->extack);
361 nfnl_err_del(nfnl_err);
362 }
363}
364
365enum {
366 NFNL_BATCH_FAILURE = (1 << 0),
367 NFNL_BATCH_DONE = (1 << 1),
368 NFNL_BATCH_REPLAY = (1 << 2),
369};
370
371static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
372 u16 subsys_id, u32 genid)
373{
374 struct sk_buff *oskb = skb;
375 struct net *net = sock_net(sk: skb->sk);
376 const struct nfnetlink_subsystem *ss;
377 const struct nfnl_callback *nc;
378 struct netlink_ext_ack extack;
379 struct nlmsghdr *onlh = nlh;
380 LIST_HEAD(err_list);
381 u32 status;
382 int err;
383
384 if (subsys_id >= NFNL_SUBSYS_COUNT)
385 return netlink_ack(in_skb: skb, nlh, err: -EINVAL, NULL);
386replay:
387 status = 0;
388replay_abort:
389 skb = netlink_skb_clone(skb: oskb, GFP_KERNEL);
390 nlh = onlh;
391 if (!skb)
392 return netlink_ack(in_skb: oskb, nlh, err: -ENOMEM, NULL);
393
394 nfnl_lock(subsys_id);
395 ss = nfnl_dereference_protected(subsys_id);
396 if (!ss) {
397#ifdef CONFIG_MODULES
398 nfnl_unlock(subsys_id);
399 request_module("nfnetlink-subsys-%d", subsys_id);
400 nfnl_lock(subsys_id);
401 ss = nfnl_dereference_protected(subsys_id);
402 if (!ss)
403#endif
404 {
405 nfnl_unlock(subsys_id);
406 netlink_ack(in_skb: oskb, nlh, err: -EOPNOTSUPP, NULL);
407 return consume_skb(skb);
408 }
409 }
410
411 if (!ss->valid_genid || !ss->commit || !ss->abort) {
412 nfnl_unlock(subsys_id);
413 netlink_ack(in_skb: oskb, nlh, err: -EOPNOTSUPP, NULL);
414 return consume_skb(skb);
415 }
416
417 if (!try_module_get(module: ss->owner)) {
418 nfnl_unlock(subsys_id);
419 netlink_ack(in_skb: oskb, nlh, err: -EOPNOTSUPP, NULL);
420 return consume_skb(skb);
421 }
422
423 if (!ss->valid_genid(net, genid)) {
424 module_put(module: ss->owner);
425 nfnl_unlock(subsys_id);
426 netlink_ack(in_skb: oskb, nlh, err: -ERESTART, NULL);
427 return consume_skb(skb);
428 }
429
430 nfnl_unlock(subsys_id);
431
432 if (nlh->nlmsg_flags & NLM_F_ACK) {
433 memset(s: &extack, c: 0, n: sizeof(extack));
434 nfnl_err_add(list: &err_list, nlh, err: 0, extack: &extack);
435 }
436
437 while (skb->len >= nlmsg_total_size(payload: 0)) {
438 int msglen, type;
439
440 if (fatal_signal_pending(current)) {
441 nfnl_err_reset(err_list: &err_list);
442 err = -EINTR;
443 status = NFNL_BATCH_FAILURE;
444 goto done;
445 }
446
447 memset(s: &extack, c: 0, n: sizeof(extack));
448 nlh = nlmsg_hdr(skb);
449 err = 0;
450
451 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
452 skb->len < nlh->nlmsg_len ||
453 nlmsg_len(nlh) < sizeof(struct nfgenmsg)) {
454 nfnl_err_reset(err_list: &err_list);
455 status |= NFNL_BATCH_FAILURE;
456 goto done;
457 }
458
459 /* Only requests are handled by the kernel */
460 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
461 err = -EINVAL;
462 goto ack;
463 }
464
465 type = nlh->nlmsg_type;
466 if (type == NFNL_MSG_BATCH_BEGIN) {
467 /* Malformed: Batch begin twice */
468 nfnl_err_reset(err_list: &err_list);
469 status |= NFNL_BATCH_FAILURE;
470 goto done;
471 } else if (type == NFNL_MSG_BATCH_END) {
472 status |= NFNL_BATCH_DONE;
473 goto done;
474 } else if (type < NLMSG_MIN_TYPE) {
475 err = -EINVAL;
476 goto ack;
477 }
478
479 /* We only accept a batch with messages for the same
480 * subsystem.
481 */
482 if (NFNL_SUBSYS_ID(type) != subsys_id) {
483 err = -EINVAL;
484 goto ack;
485 }
486
487 nc = nfnetlink_find_client(type, ss);
488 if (!nc) {
489 err = -EINVAL;
490 goto ack;
491 }
492
493 if (nc->type != NFNL_CB_BATCH) {
494 err = -EINVAL;
495 goto ack;
496 }
497
498 {
499 int min_len = nlmsg_total_size(payload: sizeof(struct nfgenmsg));
500 struct nfnl_net *nfnlnet = nfnl_pernet(net);
501 struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1];
502 struct nlattr *attr = (void *)nlh + min_len;
503 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
504 int attrlen = nlh->nlmsg_len - min_len;
505 struct nfnl_info info = {
506 .net = net,
507 .sk = nfnlnet->nfnl,
508 .nlh = nlh,
509 .nfmsg = nlmsg_data(nlh),
510 .extack = &extack,
511 };
512
513 /* Sanity-check NFTA_MAX_ATTR */
514 if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) {
515 err = -ENOMEM;
516 goto ack;
517 }
518
519 err = nla_parse_deprecated(tb: cda,
520 maxtype: ss->cb[cb_id].attr_count,
521 head: attr, len: attrlen,
522 policy: ss->cb[cb_id].policy, extack: &extack);
523 if (err < 0)
524 goto ack;
525
526 err = nc->call(skb, &info, (const struct nlattr **)cda);
527
528 /* The lock was released to autoload some module, we
529 * have to abort and start from scratch using the
530 * original skb.
531 */
532 if (err == -EAGAIN) {
533 status |= NFNL_BATCH_REPLAY;
534 goto done;
535 }
536 }
537ack:
538 if (nlh->nlmsg_flags & NLM_F_ACK || err) {
539 /* Errors are delivered once the full batch has been
540 * processed, this avoids that the same error is
541 * reported several times when replaying the batch.
542 */
543 if (err == -ENOMEM ||
544 nfnl_err_add(list: &err_list, nlh, err, extack: &extack) < 0) {
545 /* We failed to enqueue an error, reset the
546 * list of errors and send OOM to userspace
547 * pointing to the batch header.
548 */
549 nfnl_err_reset(err_list: &err_list);
550 netlink_ack(in_skb: oskb, nlh: nlmsg_hdr(skb: oskb), err: -ENOMEM,
551 NULL);
552 status |= NFNL_BATCH_FAILURE;
553 goto done;
554 }
555 /* We don't stop processing the batch on errors, thus,
556 * userspace gets all the errors that the batch
557 * triggers.
558 */
559 if (err)
560 status |= NFNL_BATCH_FAILURE;
561 }
562
563 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
564 if (msglen > skb->len)
565 msglen = skb->len;
566 skb_pull(skb, len: msglen);
567 }
568done:
569 if (status & NFNL_BATCH_REPLAY) {
570 ss->abort(net, oskb, NFNL_ABORT_AUTOLOAD);
571 nfnl_err_reset(err_list: &err_list);
572 consume_skb(skb);
573 module_put(module: ss->owner);
574 goto replay;
575 } else if (status == NFNL_BATCH_DONE) {
576 err = ss->commit(net, oskb);
577 if (err == -EAGAIN) {
578 status |= NFNL_BATCH_REPLAY;
579 goto done;
580 } else if (err) {
581 ss->abort(net, oskb, NFNL_ABORT_NONE);
582 netlink_ack(in_skb: oskb, nlh: nlmsg_hdr(skb: oskb), err, NULL);
583 } else if (nlh->nlmsg_flags & NLM_F_ACK) {
584 memset(s: &extack, c: 0, n: sizeof(extack));
585 nfnl_err_add(list: &err_list, nlh, err: 0, extack: &extack);
586 }
587 } else {
588 enum nfnl_abort_action abort_action;
589
590 if (status & NFNL_BATCH_FAILURE)
591 abort_action = NFNL_ABORT_NONE;
592 else
593 abort_action = NFNL_ABORT_VALIDATE;
594
595 err = ss->abort(net, oskb, abort_action);
596 if (err == -EAGAIN) {
597 nfnl_err_reset(err_list: &err_list);
598 consume_skb(skb);
599 module_put(module: ss->owner);
600 status |= NFNL_BATCH_FAILURE;
601 goto replay_abort;
602 }
603 }
604
605 nfnl_err_deliver(err_list: &err_list, skb: oskb);
606 consume_skb(skb);
607 module_put(module: ss->owner);
608}
609
610static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = {
611 [NFNL_BATCH_GENID] = { .type = NLA_U32 },
612};
613
614static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
615{
616 int min_len = nlmsg_total_size(payload: sizeof(struct nfgenmsg));
617 struct nlattr *attr = (void *)nlh + min_len;
618 struct nlattr *cda[NFNL_BATCH_MAX + 1];
619 int attrlen = nlh->nlmsg_len - min_len;
620 struct nfgenmsg *nfgenmsg;
621 int msglen, err;
622 u32 gen_id = 0;
623 u16 res_id;
624
625 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
626 if (msglen > skb->len)
627 msglen = skb->len;
628
629 if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
630 return;
631
632 err = nla_parse_deprecated(tb: cda, NFNL_BATCH_MAX, head: attr, len: attrlen,
633 policy: nfnl_batch_policy, NULL);
634 if (err < 0) {
635 netlink_ack(in_skb: skb, nlh, err, NULL);
636 return;
637 }
638 if (cda[NFNL_BATCH_GENID])
639 gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID]));
640
641 nfgenmsg = nlmsg_data(nlh);
642 skb_pull(skb, len: msglen);
643 /* Work around old nft using host byte order */
644 if (nfgenmsg->res_id == (__force __be16)NFNL_SUBSYS_NFTABLES)
645 res_id = NFNL_SUBSYS_NFTABLES;
646 else
647 res_id = ntohs(nfgenmsg->res_id);
648
649 nfnetlink_rcv_batch(skb, nlh, subsys_id: res_id, genid: gen_id);
650}
651
652static void nfnetlink_rcv(struct sk_buff *skb)
653{
654 struct nlmsghdr *nlh = nlmsg_hdr(skb);
655
656 if (skb->len < NLMSG_HDRLEN ||
657 nlh->nlmsg_len < NLMSG_HDRLEN ||
658 skb->len < nlh->nlmsg_len)
659 return;
660
661 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
662 netlink_ack(in_skb: skb, nlh, err: -EPERM, NULL);
663 return;
664 }
665
666 if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN)
667 nfnetlink_rcv_skb_batch(skb, nlh);
668 else
669 netlink_rcv_skb(skb, cb: nfnetlink_rcv_msg);
670}
671
672static void nfnetlink_bind_event(struct net *net, unsigned int group)
673{
674#ifdef CONFIG_NF_CONNTRACK_EVENTS
675 int type, group_bit;
676 u8 v;
677
678 /* All NFNLGRP_CONNTRACK_* group bits fit into u8.
679 * The other groups are not relevant and can be ignored.
680 */
681 if (group >= 8)
682 return;
683
684 type = nfnl_group2type[group];
685
686 switch (type) {
687 case NFNL_SUBSYS_CTNETLINK:
688 break;
689 case NFNL_SUBSYS_CTNETLINK_EXP:
690 break;
691 default:
692 return;
693 }
694
695 group_bit = (1 << group);
696
697 spin_lock(&nfnl_grp_active_lock);
698 v = READ_ONCE(nf_ctnetlink_has_listener);
699 if ((v & group_bit) == 0) {
700 v |= group_bit;
701
702 /* read concurrently without nfnl_grp_active_lock held. */
703 WRITE_ONCE(nf_ctnetlink_has_listener, v);
704 }
705
706 spin_unlock(&nfnl_grp_active_lock);
707#endif
708}
709
710static int nfnetlink_bind(struct net *net, int group)
711{
712 const struct nfnetlink_subsystem *ss;
713 int type;
714
715 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
716 return 0;
717
718 type = nfnl_group2type[group];
719
720 rcu_read_lock();
721 ss = nfnetlink_get_subsys(type: type << 8);
722 rcu_read_unlock();
723 if (!ss)
724 request_module_nowait("nfnetlink-subsys-%d", type);
725
726 nfnetlink_bind_event(net, group);
727 return 0;
728}
729
730static void nfnetlink_unbind(struct net *net, int group)
731{
732#ifdef CONFIG_NF_CONNTRACK_EVENTS
733 int type, group_bit;
734
735 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
736 return;
737
738 type = nfnl_group2type[group];
739
740 switch (type) {
741 case NFNL_SUBSYS_CTNETLINK:
742 break;
743 case NFNL_SUBSYS_CTNETLINK_EXP:
744 break;
745 default:
746 return;
747 }
748
749 /* ctnetlink_has_listener is u8 */
750 if (group >= 8)
751 return;
752
753 group_bit = (1 << group);
754
755 spin_lock(&nfnl_grp_active_lock);
756 if (!nfnetlink_has_listeners(net, group)) {
757 u8 v = READ_ONCE(nf_ctnetlink_has_listener);
758
759 v &= ~group_bit;
760
761 /* read concurrently without nfnl_grp_active_lock held. */
762 WRITE_ONCE(nf_ctnetlink_has_listener, v);
763 }
764 spin_unlock(&nfnl_grp_active_lock);
765#endif
766}
767
768static int __net_init nfnetlink_net_init(struct net *net)
769{
770 struct nfnl_net *nfnlnet = nfnl_pernet(net);
771 struct netlink_kernel_cfg cfg = {
772 .groups = NFNLGRP_MAX,
773 .input = nfnetlink_rcv,
774 .bind = nfnetlink_bind,
775 .unbind = nfnetlink_unbind,
776 };
777
778 nfnlnet->nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, cfg: &cfg);
779 if (!nfnlnet->nfnl)
780 return -ENOMEM;
781 return 0;
782}
783
784static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
785{
786 struct nfnl_net *nfnlnet;
787 struct net *net;
788
789 list_for_each_entry(net, net_exit_list, exit_list) {
790 nfnlnet = nfnl_pernet(net);
791
792 netlink_kernel_release(sk: nfnlnet->nfnl);
793 }
794}
795
796static struct pernet_operations nfnetlink_net_ops = {
797 .init = nfnetlink_net_init,
798 .exit_batch = nfnetlink_net_exit_batch,
799 .id = &nfnetlink_pernet_id,
800 .size = sizeof(struct nfnl_net),
801};
802
803static int __init nfnetlink_init(void)
804{
805 int i;
806
807 for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++)
808 BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE);
809
810 for (i=0; i<NFNL_SUBSYS_COUNT; i++)
811 __mutex_init(lock: &table[i].mutex, name: nfnl_lockdep_names[i], key: &nfnl_lockdep_keys[i]);
812
813 return register_pernet_subsys(&nfnetlink_net_ops);
814}
815
816static void __exit nfnetlink_exit(void)
817{
818 unregister_pernet_subsys(&nfnetlink_net_ops);
819}
820module_init(nfnetlink_init);
821module_exit(nfnetlink_exit);
822