1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/netdevice.h>
4#include <linux/notifier.h>
5#include <linux/rtnetlink.h>
6#include <net/busy_poll.h>
7#include <net/net_namespace.h>
8#include <net/netdev_queues.h>
9#include <net/netdev_rx_queue.h>
10#include <net/sock.h>
11#include <net/xdp.h>
12#include <net/xdp_sock.h>
13#include <net/page_pool/memory_provider.h>
14
15#include "dev.h"
16#include "devmem.h"
17#include "netdev-genl-gen.h"
18
19struct netdev_nl_dump_ctx {
20 unsigned long ifindex;
21 unsigned int rxq_idx;
22 unsigned int txq_idx;
23 unsigned int napi_id;
24};
25
26static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb)
27{
28 NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx);
29
30 return (struct netdev_nl_dump_ctx *)cb->ctx;
31}
32
33static int
34netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
35 const struct genl_info *info)
36{
37 u64 xsk_features = 0;
38 u64 xdp_rx_meta = 0;
39 void *hdr;
40
41 netdev_assert_locked(dev: netdev); /* note: rtnl_lock may not be held! */
42
43 hdr = genlmsg_iput(skb: rsp, info);
44 if (!hdr)
45 return -EMSGSIZE;
46
47#define XDP_METADATA_KFUNC(_, flag, __, xmo) \
48 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
49 xdp_rx_meta |= flag;
50XDP_METADATA_KFUNC_xxx
51#undef XDP_METADATA_KFUNC
52
53 if (netdev->xsk_tx_metadata_ops) {
54 if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp)
55 xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP;
56 if (netdev->xsk_tx_metadata_ops->tmo_request_checksum)
57 xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM;
58 if (netdev->xsk_tx_metadata_ops->tmo_request_launch_time)
59 xsk_features |= NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO;
60 }
61
62 if (nla_put_u32(skb: rsp, attrtype: NETDEV_A_DEV_IFINDEX, value: netdev->ifindex) ||
63 nla_put_u64_64bit(skb: rsp, attrtype: NETDEV_A_DEV_XDP_FEATURES,
64 value: netdev->xdp_features, padattr: NETDEV_A_DEV_PAD) ||
65 nla_put_u64_64bit(skb: rsp, attrtype: NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
66 value: xdp_rx_meta, padattr: NETDEV_A_DEV_PAD) ||
67 nla_put_u64_64bit(skb: rsp, attrtype: NETDEV_A_DEV_XSK_FEATURES,
68 value: xsk_features, padattr: NETDEV_A_DEV_PAD))
69 goto err_cancel_msg;
70
71 if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
72 if (nla_put_u32(skb: rsp, attrtype: NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
73 value: netdev->xdp_zc_max_segs))
74 goto err_cancel_msg;
75 }
76
77 genlmsg_end(skb: rsp, hdr);
78
79 return 0;
80
81err_cancel_msg:
82 genlmsg_cancel(skb: rsp, hdr);
83 return -EMSGSIZE;
84}
85
86static void
87netdev_genl_dev_notify(struct net_device *netdev, int cmd)
88{
89 struct genl_info info;
90 struct sk_buff *ntf;
91
92 if (!genl_has_listeners(family: &netdev_nl_family, net: dev_net(dev: netdev),
93 group: NETDEV_NLGRP_MGMT))
94 return;
95
96 genl_info_init_ntf(info: &info, family: &netdev_nl_family, cmd);
97
98 ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
99 if (!ntf)
100 return;
101
102 if (netdev_nl_dev_fill(netdev, rsp: ntf, info: &info)) {
103 nlmsg_free(skb: ntf);
104 return;
105 }
106
107 genlmsg_multicast_netns(family: &netdev_nl_family, net: dev_net(dev: netdev), skb: ntf,
108 portid: 0, group: NETDEV_NLGRP_MGMT, GFP_KERNEL);
109}
110
111int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
112{
113 struct net_device *netdev;
114 struct sk_buff *rsp;
115 u32 ifindex;
116 int err;
117
118 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX))
119 return -EINVAL;
120
121 ifindex = nla_get_u32(nla: info->attrs[NETDEV_A_DEV_IFINDEX]);
122
123 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
124 if (!rsp)
125 return -ENOMEM;
126
127 netdev = netdev_get_by_index_lock(net: genl_info_net(info), ifindex);
128 if (!netdev) {
129 err = -ENODEV;
130 goto err_free_msg;
131 }
132
133 err = netdev_nl_dev_fill(netdev, rsp, info);
134 netdev_unlock(dev: netdev);
135
136 if (err)
137 goto err_free_msg;
138
139 return genlmsg_reply(skb: rsp, info);
140
141err_free_msg:
142 nlmsg_free(skb: rsp);
143 return err;
144}
145
146int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
147{
148 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
149 struct net *net = sock_net(sk: skb->sk);
150 int err;
151
152 for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) {
153 err = netdev_nl_dev_fill(netdev, rsp: skb, info: genl_info_dump(cb));
154 if (err < 0)
155 return err;
156 }
157
158 return 0;
159}
160
161static int
162netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
163 const struct genl_info *info)
164{
165 unsigned long irq_suspend_timeout;
166 unsigned long gro_flush_timeout;
167 u32 napi_defer_hard_irqs;
168 void *hdr;
169 pid_t pid;
170
171 if (!napi->dev->up)
172 return 0;
173
174 hdr = genlmsg_iput(skb: rsp, info);
175 if (!hdr)
176 return -EMSGSIZE;
177
178 if (nla_put_u32(skb: rsp, attrtype: NETDEV_A_NAPI_ID, value: napi->napi_id))
179 goto nla_put_failure;
180
181 if (nla_put_u32(skb: rsp, attrtype: NETDEV_A_NAPI_IFINDEX, value: napi->dev->ifindex))
182 goto nla_put_failure;
183
184 if (napi->irq >= 0 && nla_put_u32(skb: rsp, attrtype: NETDEV_A_NAPI_IRQ, value: napi->irq))
185 goto nla_put_failure;
186
187 if (nla_put_uint(skb: rsp, attrtype: NETDEV_A_NAPI_THREADED,
188 value: napi_get_threaded(n: napi)))
189 goto nla_put_failure;
190
191 if (napi->thread) {
192 pid = task_pid_nr(tsk: napi->thread);
193 if (nla_put_u32(skb: rsp, attrtype: NETDEV_A_NAPI_PID, value: pid))
194 goto nla_put_failure;
195 }
196
197 napi_defer_hard_irqs = napi_get_defer_hard_irqs(n: napi);
198 if (nla_put_s32(skb: rsp, attrtype: NETDEV_A_NAPI_DEFER_HARD_IRQS,
199 value: napi_defer_hard_irqs))
200 goto nla_put_failure;
201
202 irq_suspend_timeout = napi_get_irq_suspend_timeout(n: napi);
203 if (nla_put_uint(skb: rsp, attrtype: NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
204 value: irq_suspend_timeout))
205 goto nla_put_failure;
206
207 gro_flush_timeout = napi_get_gro_flush_timeout(n: napi);
208 if (nla_put_uint(skb: rsp, attrtype: NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT,
209 value: gro_flush_timeout))
210 goto nla_put_failure;
211
212 genlmsg_end(skb: rsp, hdr);
213
214 return 0;
215
216nla_put_failure:
217 genlmsg_cancel(skb: rsp, hdr);
218 return -EMSGSIZE;
219}
220
221int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
222{
223 struct napi_struct *napi;
224 struct sk_buff *rsp;
225 u32 napi_id;
226 int err;
227
228 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
229 return -EINVAL;
230
231 napi_id = nla_get_u32(nla: info->attrs[NETDEV_A_NAPI_ID]);
232
233 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
234 if (!rsp)
235 return -ENOMEM;
236
237 napi = netdev_napi_by_id_lock(net: genl_info_net(info), napi_id);
238 if (napi) {
239 err = netdev_nl_napi_fill_one(rsp, napi, info);
240 netdev_unlock(dev: napi->dev);
241 } else {
242 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
243 err = -ENOENT;
244 }
245
246 if (err) {
247 goto err_free_msg;
248 } else if (!rsp->len) {
249 err = -ENOENT;
250 goto err_free_msg;
251 }
252
253 return genlmsg_reply(skb: rsp, info);
254
255err_free_msg:
256 nlmsg_free(skb: rsp);
257 return err;
258}
259
260static int
261netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
262 const struct genl_info *info,
263 struct netdev_nl_dump_ctx *ctx)
264{
265 struct napi_struct *napi;
266 unsigned int prev_id;
267 int err = 0;
268
269 if (!netdev->up)
270 return err;
271
272 prev_id = UINT_MAX;
273 list_for_each_entry(napi, &netdev->napi_list, dev_list) {
274 if (!napi_id_valid(napi_id: napi->napi_id))
275 continue;
276
277 /* Dump continuation below depends on the list being sorted */
278 WARN_ON_ONCE(napi->napi_id >= prev_id);
279 prev_id = napi->napi_id;
280
281 if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
282 continue;
283
284 err = netdev_nl_napi_fill_one(rsp, napi, info);
285 if (err)
286 return err;
287 ctx->napi_id = napi->napi_id;
288 }
289 return err;
290}
291
292int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
293{
294 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
295 const struct genl_info *info = genl_info_dump(cb);
296 struct net *net = sock_net(sk: skb->sk);
297 struct net_device *netdev;
298 u32 ifindex = 0;
299 int err = 0;
300
301 if (info->attrs[NETDEV_A_NAPI_IFINDEX])
302 ifindex = nla_get_u32(nla: info->attrs[NETDEV_A_NAPI_IFINDEX]);
303
304 if (ifindex) {
305 netdev = netdev_get_by_index_lock(net, ifindex);
306 if (netdev) {
307 err = netdev_nl_napi_dump_one(netdev, rsp: skb, info, ctx);
308 netdev_unlock(dev: netdev);
309 } else {
310 err = -ENODEV;
311 }
312 } else {
313 for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) {
314 err = netdev_nl_napi_dump_one(netdev, rsp: skb, info, ctx);
315 if (err < 0)
316 break;
317 ctx->napi_id = 0;
318 }
319 }
320
321 return err;
322}
323
324static int
325netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info)
326{
327 u64 irq_suspend_timeout = 0;
328 u64 gro_flush_timeout = 0;
329 u8 threaded = 0;
330 u32 defer = 0;
331
332 if (info->attrs[NETDEV_A_NAPI_THREADED]) {
333 int ret;
334
335 threaded = nla_get_uint(nla: info->attrs[NETDEV_A_NAPI_THREADED]);
336 ret = napi_set_threaded(n: napi, threaded);
337 if (ret)
338 return ret;
339 }
340
341 if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) {
342 defer = nla_get_u32(nla: info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]);
343 napi_set_defer_hard_irqs(n: napi, defer);
344 }
345
346 if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) {
347 irq_suspend_timeout = nla_get_uint(nla: info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]);
348 napi_set_irq_suspend_timeout(n: napi, timeout: irq_suspend_timeout);
349 }
350
351 if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) {
352 gro_flush_timeout = nla_get_uint(nla: info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]);
353 napi_set_gro_flush_timeout(n: napi, timeout: gro_flush_timeout);
354 }
355
356 return 0;
357}
358
359int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info)
360{
361 struct napi_struct *napi;
362 unsigned int napi_id;
363 int err;
364
365 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
366 return -EINVAL;
367
368 napi_id = nla_get_u32(nla: info->attrs[NETDEV_A_NAPI_ID]);
369
370 napi = netdev_napi_by_id_lock(net: genl_info_net(info), napi_id);
371 if (napi) {
372 err = netdev_nl_napi_set_config(napi, info);
373 netdev_unlock(dev: napi->dev);
374 } else {
375 NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
376 err = -ENOENT;
377 }
378
379 return err;
380}
381
382static int nla_put_napi_id(struct sk_buff *skb, const struct napi_struct *napi)
383{
384 if (napi && napi_id_valid(napi_id: napi->napi_id))
385 return nla_put_u32(skb, attrtype: NETDEV_A_QUEUE_NAPI_ID, value: napi->napi_id);
386 return 0;
387}
388
389static int
390netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
391 u32 q_idx, u32 q_type, const struct genl_info *info)
392{
393 struct pp_memory_provider_params *params;
394 struct netdev_rx_queue *rxq;
395 struct netdev_queue *txq;
396 void *hdr;
397
398 hdr = genlmsg_iput(skb: rsp, info);
399 if (!hdr)
400 return -EMSGSIZE;
401
402 if (nla_put_u32(skb: rsp, attrtype: NETDEV_A_QUEUE_ID, value: q_idx) ||
403 nla_put_u32(skb: rsp, attrtype: NETDEV_A_QUEUE_TYPE, value: q_type) ||
404 nla_put_u32(skb: rsp, attrtype: NETDEV_A_QUEUE_IFINDEX, value: netdev->ifindex))
405 goto nla_put_failure;
406
407 switch (q_type) {
408 case NETDEV_QUEUE_TYPE_RX:
409 rxq = __netif_get_rx_queue(dev: netdev, rxq: q_idx);
410 if (nla_put_napi_id(skb: rsp, napi: rxq->napi))
411 goto nla_put_failure;
412
413 params = &rxq->mp_params;
414 if (params->mp_ops &&
415 params->mp_ops->nl_fill(params->mp_priv, rsp, rxq))
416 goto nla_put_failure;
417#ifdef CONFIG_XDP_SOCKETS
418 if (rxq->pool)
419 if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK))
420 goto nla_put_failure;
421#endif
422
423 break;
424 case NETDEV_QUEUE_TYPE_TX:
425 txq = netdev_get_tx_queue(dev: netdev, index: q_idx);
426 if (nla_put_napi_id(skb: rsp, napi: txq->napi))
427 goto nla_put_failure;
428#ifdef CONFIG_XDP_SOCKETS
429 if (txq->pool)
430 if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK))
431 goto nla_put_failure;
432#endif
433 break;
434 }
435
436 genlmsg_end(skb: rsp, hdr);
437
438 return 0;
439
440nla_put_failure:
441 genlmsg_cancel(skb: rsp, hdr);
442 return -EMSGSIZE;
443}
444
445static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id,
446 u32 q_type)
447{
448 switch (q_type) {
449 case NETDEV_QUEUE_TYPE_RX:
450 if (q_id >= netdev->real_num_rx_queues)
451 return -EINVAL;
452 return 0;
453 case NETDEV_QUEUE_TYPE_TX:
454 if (q_id >= netdev->real_num_tx_queues)
455 return -EINVAL;
456 }
457 return 0;
458}
459
460static int
461netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
462 u32 q_type, const struct genl_info *info)
463{
464 int err;
465
466 if (!netdev->up)
467 return -ENOENT;
468
469 err = netdev_nl_queue_validate(netdev, q_id: q_idx, q_type);
470 if (err)
471 return err;
472
473 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info);
474}
475
476int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
477{
478 u32 q_id, q_type, ifindex;
479 struct net_device *netdev;
480 struct sk_buff *rsp;
481 int err;
482
483 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) ||
484 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) ||
485 GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX))
486 return -EINVAL;
487
488 q_id = nla_get_u32(nla: info->attrs[NETDEV_A_QUEUE_ID]);
489 q_type = nla_get_u32(nla: info->attrs[NETDEV_A_QUEUE_TYPE]);
490 ifindex = nla_get_u32(nla: info->attrs[NETDEV_A_QUEUE_IFINDEX]);
491
492 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
493 if (!rsp)
494 return -ENOMEM;
495
496 netdev = netdev_get_by_index_lock_ops_compat(net: genl_info_net(info),
497 ifindex);
498 if (netdev) {
499 err = netdev_nl_queue_fill(rsp, netdev, q_idx: q_id, q_type, info);
500 netdev_unlock_ops_compat(dev: netdev);
501 } else {
502 err = -ENODEV;
503 }
504
505 if (err)
506 goto err_free_msg;
507
508 return genlmsg_reply(skb: rsp, info);
509
510err_free_msg:
511 nlmsg_free(skb: rsp);
512 return err;
513}
514
515static int
516netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
517 const struct genl_info *info,
518 struct netdev_nl_dump_ctx *ctx)
519{
520 int err = 0;
521
522 if (!netdev->up)
523 return err;
524
525 for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) {
526 err = netdev_nl_queue_fill_one(rsp, netdev, q_idx: ctx->rxq_idx,
527 q_type: NETDEV_QUEUE_TYPE_RX, info);
528 if (err)
529 return err;
530 }
531 for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) {
532 err = netdev_nl_queue_fill_one(rsp, netdev, q_idx: ctx->txq_idx,
533 q_type: NETDEV_QUEUE_TYPE_TX, info);
534 if (err)
535 return err;
536 }
537
538 return err;
539}
540
541int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
542{
543 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
544 const struct genl_info *info = genl_info_dump(cb);
545 struct net *net = sock_net(sk: skb->sk);
546 struct net_device *netdev;
547 u32 ifindex = 0;
548 int err = 0;
549
550 if (info->attrs[NETDEV_A_QUEUE_IFINDEX])
551 ifindex = nla_get_u32(nla: info->attrs[NETDEV_A_QUEUE_IFINDEX]);
552
553 if (ifindex) {
554 netdev = netdev_get_by_index_lock_ops_compat(net, ifindex);
555 if (netdev) {
556 err = netdev_nl_queue_dump_one(netdev, rsp: skb, info, ctx);
557 netdev_unlock_ops_compat(dev: netdev);
558 } else {
559 err = -ENODEV;
560 }
561 } else {
562 for_each_netdev_lock_ops_compat_scoped(net, netdev,
563 ctx->ifindex) {
564 err = netdev_nl_queue_dump_one(netdev, rsp: skb, info, ctx);
565 if (err < 0)
566 break;
567 ctx->rxq_idx = 0;
568 ctx->txq_idx = 0;
569 }
570 }
571
572 return err;
573}
574
575#define NETDEV_STAT_NOT_SET (~0ULL)
576
577static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
578{
579 const u64 *add = _add;
580 u64 *sum = _sum;
581
582 while (size) {
583 if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
584 *sum += *add;
585 sum++;
586 add++;
587 size -= 8;
588 }
589}
590
591static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
592{
593 if (value == NETDEV_STAT_NOT_SET)
594 return 0;
595 return nla_put_uint(skb: rsp, attrtype: attr_id, value);
596}
597
598static int
599netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
600{
601 if (netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_PACKETS, value: rx->packets) ||
602 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_BYTES, value: rx->bytes) ||
603 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_ALLOC_FAIL, value: rx->alloc_fail) ||
604 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_HW_DROPS, value: rx->hw_drops) ||
605 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, value: rx->hw_drop_overruns) ||
606 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_CSUM_COMPLETE, value: rx->csum_complete) ||
607 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, value: rx->csum_unnecessary) ||
608 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_CSUM_NONE, value: rx->csum_none) ||
609 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_CSUM_BAD, value: rx->csum_bad) ||
610 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, value: rx->hw_gro_packets) ||
611 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_HW_GRO_BYTES, value: rx->hw_gro_bytes) ||
612 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, value: rx->hw_gro_wire_packets) ||
613 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, value: rx->hw_gro_wire_bytes) ||
614 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, value: rx->hw_drop_ratelimits))
615 return -EMSGSIZE;
616 return 0;
617}
618
619static int
620netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
621{
622 if (netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_PACKETS, value: tx->packets) ||
623 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_BYTES, value: tx->bytes) ||
624 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_HW_DROPS, value: tx->hw_drops) ||
625 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, value: tx->hw_drop_errors) ||
626 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_CSUM_NONE, value: tx->csum_none) ||
627 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_NEEDS_CSUM, value: tx->needs_csum) ||
628 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, value: tx->hw_gso_packets) ||
629 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_HW_GSO_BYTES, value: tx->hw_gso_bytes) ||
630 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, value: tx->hw_gso_wire_packets) ||
631 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, value: tx->hw_gso_wire_bytes) ||
632 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, value: tx->hw_drop_ratelimits) ||
633 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_STOP, value: tx->stop) ||
634 netdev_stat_put(rsp, attr_id: NETDEV_A_QSTATS_TX_WAKE, value: tx->wake))
635 return -EMSGSIZE;
636 return 0;
637}
638
639static int
640netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
641 u32 q_type, int i, const struct genl_info *info)
642{
643 const struct netdev_stat_ops *ops = netdev->stat_ops;
644 struct netdev_queue_stats_rx rx;
645 struct netdev_queue_stats_tx tx;
646 void *hdr;
647
648 hdr = genlmsg_iput(skb: rsp, info);
649 if (!hdr)
650 return -EMSGSIZE;
651 if (nla_put_u32(skb: rsp, attrtype: NETDEV_A_QSTATS_IFINDEX, value: netdev->ifindex) ||
652 nla_put_u32(skb: rsp, attrtype: NETDEV_A_QSTATS_QUEUE_TYPE, value: q_type) ||
653 nla_put_u32(skb: rsp, attrtype: NETDEV_A_QSTATS_QUEUE_ID, value: i))
654 goto nla_put_failure;
655
656 switch (q_type) {
657 case NETDEV_QUEUE_TYPE_RX:
658 memset(s: &rx, c: 0xff, n: sizeof(rx));
659 ops->get_queue_stats_rx(netdev, i, &rx);
660 if (!memchr_inv(s: &rx, c: 0xff, n: sizeof(rx)))
661 goto nla_cancel;
662 if (netdev_nl_stats_write_rx(rsp, rx: &rx))
663 goto nla_put_failure;
664 break;
665 case NETDEV_QUEUE_TYPE_TX:
666 memset(s: &tx, c: 0xff, n: sizeof(tx));
667 ops->get_queue_stats_tx(netdev, i, &tx);
668 if (!memchr_inv(s: &tx, c: 0xff, n: sizeof(tx)))
669 goto nla_cancel;
670 if (netdev_nl_stats_write_tx(rsp, tx: &tx))
671 goto nla_put_failure;
672 break;
673 }
674
675 genlmsg_end(skb: rsp, hdr);
676 return 0;
677
678nla_cancel:
679 genlmsg_cancel(skb: rsp, hdr);
680 return 0;
681nla_put_failure:
682 genlmsg_cancel(skb: rsp, hdr);
683 return -EMSGSIZE;
684}
685
686static int
687netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
688 const struct genl_info *info,
689 struct netdev_nl_dump_ctx *ctx)
690{
691 const struct netdev_stat_ops *ops = netdev->stat_ops;
692 int i, err;
693
694 if (!(netdev->flags & IFF_UP))
695 return 0;
696
697 i = ctx->rxq_idx;
698 while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
699 err = netdev_nl_stats_queue(netdev, rsp, q_type: NETDEV_QUEUE_TYPE_RX,
700 i, info);
701 if (err)
702 return err;
703 ctx->rxq_idx = ++i;
704 }
705 i = ctx->txq_idx;
706 while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
707 err = netdev_nl_stats_queue(netdev, rsp, q_type: NETDEV_QUEUE_TYPE_TX,
708 i, info);
709 if (err)
710 return err;
711 ctx->txq_idx = ++i;
712 }
713
714 ctx->rxq_idx = 0;
715 ctx->txq_idx = 0;
716 return 0;
717}
718
719/**
720 * netdev_stat_queue_sum() - add up queue stats from range of queues
721 * @netdev: net_device
722 * @rx_start: index of the first Rx queue to query
723 * @rx_end: index after the last Rx queue (first *not* to query)
724 * @rx_sum: output Rx stats, should be already initialized
725 * @tx_start: index of the first Tx queue to query
726 * @tx_end: index after the last Tx queue (first *not* to query)
727 * @tx_sum: output Tx stats, should be already initialized
728 *
729 * Add stats from [start, end) range of queue IDs to *x_sum structs.
730 * The sum structs must be already initialized. Usually this
731 * helper is invoked from the .get_base_stats callbacks of drivers
732 * to account for stats of disabled queues. In that case the ranges
733 * are usually [netdev->real_num_*x_queues, netdev->num_*x_queues).
734 */
735void netdev_stat_queue_sum(struct net_device *netdev,
736 int rx_start, int rx_end,
737 struct netdev_queue_stats_rx *rx_sum,
738 int tx_start, int tx_end,
739 struct netdev_queue_stats_tx *tx_sum)
740{
741 const struct netdev_stat_ops *ops;
742 struct netdev_queue_stats_rx rx;
743 struct netdev_queue_stats_tx tx;
744 int i;
745
746 ops = netdev->stat_ops;
747
748 for (i = rx_start; i < rx_end; i++) {
749 memset(s: &rx, c: 0xff, n: sizeof(rx));
750 if (ops->get_queue_stats_rx)
751 ops->get_queue_stats_rx(netdev, i, &rx);
752 netdev_nl_stats_add(sum: rx_sum, add: &rx, size: sizeof(rx));
753 }
754 for (i = tx_start; i < tx_end; i++) {
755 memset(s: &tx, c: 0xff, n: sizeof(tx));
756 if (ops->get_queue_stats_tx)
757 ops->get_queue_stats_tx(netdev, i, &tx);
758 netdev_nl_stats_add(sum: tx_sum, add: &tx, size: sizeof(tx));
759 }
760}
761EXPORT_SYMBOL(netdev_stat_queue_sum);
762
763static int
764netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
765 const struct genl_info *info)
766{
767 struct netdev_queue_stats_rx rx_sum;
768 struct netdev_queue_stats_tx tx_sum;
769 void *hdr;
770
771 /* Netdev can't guarantee any complete counters */
772 if (!netdev->stat_ops->get_base_stats)
773 return 0;
774
775 memset(s: &rx_sum, c: 0xff, n: sizeof(rx_sum));
776 memset(s: &tx_sum, c: 0xff, n: sizeof(tx_sum));
777
778 netdev->stat_ops->get_base_stats(netdev, &rx_sum, &tx_sum);
779
780 /* The op was there, but nothing reported, don't bother */
781 if (!memchr_inv(s: &rx_sum, c: 0xff, n: sizeof(rx_sum)) &&
782 !memchr_inv(s: &tx_sum, c: 0xff, n: sizeof(tx_sum)))
783 return 0;
784
785 hdr = genlmsg_iput(skb: rsp, info);
786 if (!hdr)
787 return -EMSGSIZE;
788 if (nla_put_u32(skb: rsp, attrtype: NETDEV_A_QSTATS_IFINDEX, value: netdev->ifindex))
789 goto nla_put_failure;
790
791 netdev_stat_queue_sum(netdev, 0, netdev->real_num_rx_queues, &rx_sum,
792 0, netdev->real_num_tx_queues, &tx_sum);
793
794 if (netdev_nl_stats_write_rx(rsp, rx: &rx_sum) ||
795 netdev_nl_stats_write_tx(rsp, tx: &tx_sum))
796 goto nla_put_failure;
797
798 genlmsg_end(skb: rsp, hdr);
799 return 0;
800
801nla_put_failure:
802 genlmsg_cancel(skb: rsp, hdr);
803 return -EMSGSIZE;
804}
805
806static int
807netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope,
808 struct sk_buff *skb, const struct genl_info *info,
809 struct netdev_nl_dump_ctx *ctx)
810{
811 if (!netdev->stat_ops)
812 return 0;
813
814 switch (scope) {
815 case 0:
816 return netdev_nl_stats_by_netdev(netdev, rsp: skb, info);
817 case NETDEV_QSTATS_SCOPE_QUEUE:
818 return netdev_nl_stats_by_queue(netdev, rsp: skb, info, ctx);
819 }
820
821 return -EINVAL; /* Should not happen, per netlink policy */
822}
823
824int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
825 struct netlink_callback *cb)
826{
827 struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
828 const struct genl_info *info = genl_info_dump(cb);
829 struct net *net = sock_net(sk: skb->sk);
830 struct net_device *netdev;
831 unsigned int ifindex;
832 unsigned int scope;
833 int err = 0;
834
835 scope = 0;
836 if (info->attrs[NETDEV_A_QSTATS_SCOPE])
837 scope = nla_get_uint(nla: info->attrs[NETDEV_A_QSTATS_SCOPE]);
838
839 ifindex = 0;
840 if (info->attrs[NETDEV_A_QSTATS_IFINDEX])
841 ifindex = nla_get_u32(nla: info->attrs[NETDEV_A_QSTATS_IFINDEX]);
842
843 if (ifindex) {
844 netdev = netdev_get_by_index_lock_ops_compat(net, ifindex);
845 if (!netdev) {
846 NL_SET_BAD_ATTR(info->extack,
847 info->attrs[NETDEV_A_QSTATS_IFINDEX]);
848 return -ENODEV;
849 }
850 if (netdev->stat_ops) {
851 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
852 info, ctx);
853 } else {
854 NL_SET_BAD_ATTR(info->extack,
855 info->attrs[NETDEV_A_QSTATS_IFINDEX]);
856 err = -EOPNOTSUPP;
857 }
858 netdev_unlock_ops_compat(dev: netdev);
859 return err;
860 }
861
862 for_each_netdev_lock_ops_compat_scoped(net, netdev, ctx->ifindex) {
863 err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
864 info, ctx);
865 if (err < 0)
866 break;
867 }
868
869 return err;
870}
871
872static int netdev_nl_read_rxq_bitmap(struct genl_info *info,
873 u32 rxq_bitmap_len,
874 unsigned long *rxq_bitmap)
875{
876 const int maxtype = ARRAY_SIZE(netdev_queue_id_nl_policy) - 1;
877 struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
878 struct nlattr *attr;
879 int rem, err = 0;
880 u32 rxq_idx;
881
882 nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
883 genlmsg_data(info->genlhdr),
884 genlmsg_len(info->genlhdr), rem) {
885 err = nla_parse_nested(tb, maxtype, nla: attr,
886 policy: netdev_queue_id_nl_policy, extack: info->extack);
887 if (err < 0)
888 return err;
889
890 if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
891 NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE))
892 return -EINVAL;
893
894 if (nla_get_u32(nla: tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
895 NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
896 return -EINVAL;
897 }
898
899 rxq_idx = nla_get_u32(nla: tb[NETDEV_A_QUEUE_ID]);
900 if (rxq_idx >= rxq_bitmap_len) {
901 NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_ID]);
902 return -EINVAL;
903 }
904
905 bitmap_set(map: rxq_bitmap, start: rxq_idx, nbits: 1);
906 }
907
908 return 0;
909}
910
911static struct device *
912netdev_nl_get_dma_dev(struct net_device *netdev, unsigned long *rxq_bitmap,
913 struct netlink_ext_ack *extack)
914{
915 struct device *dma_dev = NULL;
916 u32 rxq_idx, prev_rxq_idx;
917
918 for_each_set_bit(rxq_idx, rxq_bitmap, netdev->real_num_rx_queues) {
919 struct device *rxq_dma_dev;
920
921 rxq_dma_dev = netdev_queue_get_dma_dev(dev: netdev, idx: rxq_idx);
922 if (dma_dev && rxq_dma_dev != dma_dev) {
923 NL_SET_ERR_MSG_FMT(extack, "DMA device mismatch between queue %u and %u (multi-PF device?)",
924 rxq_idx, prev_rxq_idx);
925 return ERR_PTR(error: -EOPNOTSUPP);
926 }
927
928 dma_dev = rxq_dma_dev;
929 prev_rxq_idx = rxq_idx;
930 }
931
932 return dma_dev;
933}
934
935int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
936{
937 struct net_devmem_dmabuf_binding *binding;
938 u32 ifindex, dmabuf_fd, rxq_idx;
939 struct netdev_nl_sock *priv;
940 struct net_device *netdev;
941 unsigned long *rxq_bitmap;
942 struct device *dma_dev;
943 struct sk_buff *rsp;
944 int err = 0;
945 void *hdr;
946
947 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
948 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) ||
949 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES))
950 return -EINVAL;
951
952 ifindex = nla_get_u32(nla: info->attrs[NETDEV_A_DEV_IFINDEX]);
953 dmabuf_fd = nla_get_u32(nla: info->attrs[NETDEV_A_DMABUF_FD]);
954
955 priv = genl_sk_priv_get(family: &netdev_nl_family, NETLINK_CB(skb).sk);
956 if (IS_ERR(ptr: priv))
957 return PTR_ERR(ptr: priv);
958
959 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
960 if (!rsp)
961 return -ENOMEM;
962
963 hdr = genlmsg_iput(skb: rsp, info);
964 if (!hdr) {
965 err = -EMSGSIZE;
966 goto err_genlmsg_free;
967 }
968
969 mutex_lock(lock: &priv->lock);
970
971 err = 0;
972 netdev = netdev_get_by_index_lock(net: genl_info_net(info), ifindex);
973 if (!netdev) {
974 err = -ENODEV;
975 goto err_unlock_sock;
976 }
977 if (!netif_device_present(dev: netdev))
978 err = -ENODEV;
979 else if (!netdev_need_ops_lock(dev: netdev))
980 err = -EOPNOTSUPP;
981 if (err) {
982 NL_SET_BAD_ATTR(info->extack,
983 info->attrs[NETDEV_A_DEV_IFINDEX]);
984 goto err_unlock;
985 }
986
987 rxq_bitmap = bitmap_zalloc(nbits: netdev->real_num_rx_queues, GFP_KERNEL);
988 if (!rxq_bitmap) {
989 err = -ENOMEM;
990 goto err_unlock;
991 }
992
993 err = netdev_nl_read_rxq_bitmap(info, rxq_bitmap_len: netdev->real_num_rx_queues,
994 rxq_bitmap);
995 if (err)
996 goto err_rxq_bitmap;
997
998 dma_dev = netdev_nl_get_dma_dev(netdev, rxq_bitmap, extack: info->extack);
999 if (IS_ERR(ptr: dma_dev)) {
1000 err = PTR_ERR(ptr: dma_dev);
1001 goto err_rxq_bitmap;
1002 }
1003
1004 binding = net_devmem_bind_dmabuf(dev: netdev, dma_dev, direction: DMA_FROM_DEVICE,
1005 dmabuf_fd, priv, extack: info->extack);
1006 if (IS_ERR(ptr: binding)) {
1007 err = PTR_ERR(ptr: binding);
1008 goto err_rxq_bitmap;
1009 }
1010
1011 for_each_set_bit(rxq_idx, rxq_bitmap, netdev->real_num_rx_queues) {
1012 err = net_devmem_bind_dmabuf_to_queue(dev: netdev, rxq_idx, binding,
1013 extack: info->extack);
1014 if (err)
1015 goto err_unbind;
1016 }
1017
1018 nla_put_u32(skb: rsp, attrtype: NETDEV_A_DMABUF_ID, value: binding->id);
1019 genlmsg_end(skb: rsp, hdr);
1020
1021 err = genlmsg_reply(skb: rsp, info);
1022 if (err)
1023 goto err_unbind;
1024
1025 bitmap_free(bitmap: rxq_bitmap);
1026
1027 netdev_unlock(dev: netdev);
1028
1029 mutex_unlock(lock: &priv->lock);
1030
1031 return 0;
1032
1033err_unbind:
1034 net_devmem_unbind_dmabuf(binding);
1035err_rxq_bitmap:
1036 bitmap_free(bitmap: rxq_bitmap);
1037err_unlock:
1038 netdev_unlock(dev: netdev);
1039err_unlock_sock:
1040 mutex_unlock(lock: &priv->lock);
1041err_genlmsg_free:
1042 nlmsg_free(skb: rsp);
1043 return err;
1044}
1045
1046int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info)
1047{
1048 struct net_devmem_dmabuf_binding *binding;
1049 struct netdev_nl_sock *priv;
1050 struct net_device *netdev;
1051 struct device *dma_dev;
1052 u32 ifindex, dmabuf_fd;
1053 struct sk_buff *rsp;
1054 int err = 0;
1055 void *hdr;
1056
1057 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
1058 GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD))
1059 return -EINVAL;
1060
1061 ifindex = nla_get_u32(nla: info->attrs[NETDEV_A_DEV_IFINDEX]);
1062 dmabuf_fd = nla_get_u32(nla: info->attrs[NETDEV_A_DMABUF_FD]);
1063
1064 priv = genl_sk_priv_get(family: &netdev_nl_family, NETLINK_CB(skb).sk);
1065 if (IS_ERR(ptr: priv))
1066 return PTR_ERR(ptr: priv);
1067
1068 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
1069 if (!rsp)
1070 return -ENOMEM;
1071
1072 hdr = genlmsg_iput(skb: rsp, info);
1073 if (!hdr) {
1074 err = -EMSGSIZE;
1075 goto err_genlmsg_free;
1076 }
1077
1078 mutex_lock(lock: &priv->lock);
1079
1080 netdev = netdev_get_by_index_lock(net: genl_info_net(info), ifindex);
1081 if (!netdev) {
1082 err = -ENODEV;
1083 goto err_unlock_sock;
1084 }
1085
1086 if (!netif_device_present(dev: netdev)) {
1087 err = -ENODEV;
1088 goto err_unlock_netdev;
1089 }
1090
1091 if (!netdev->netmem_tx) {
1092 err = -EOPNOTSUPP;
1093 NL_SET_ERR_MSG(info->extack,
1094 "Driver does not support netmem TX");
1095 goto err_unlock_netdev;
1096 }
1097
1098 dma_dev = netdev_queue_get_dma_dev(dev: netdev, idx: 0);
1099 binding = net_devmem_bind_dmabuf(dev: netdev, dma_dev, direction: DMA_TO_DEVICE,
1100 dmabuf_fd, priv, extack: info->extack);
1101 if (IS_ERR(ptr: binding)) {
1102 err = PTR_ERR(ptr: binding);
1103 goto err_unlock_netdev;
1104 }
1105
1106 nla_put_u32(skb: rsp, attrtype: NETDEV_A_DMABUF_ID, value: binding->id);
1107 genlmsg_end(skb: rsp, hdr);
1108
1109 netdev_unlock(dev: netdev);
1110 mutex_unlock(lock: &priv->lock);
1111
1112 return genlmsg_reply(skb: rsp, info);
1113
1114err_unlock_netdev:
1115 netdev_unlock(dev: netdev);
1116err_unlock_sock:
1117 mutex_unlock(lock: &priv->lock);
1118err_genlmsg_free:
1119 nlmsg_free(skb: rsp);
1120 return err;
1121}
1122
1123void netdev_nl_sock_priv_init(struct netdev_nl_sock *priv)
1124{
1125 INIT_LIST_HEAD(list: &priv->bindings);
1126 mutex_init(&priv->lock);
1127}
1128
1129void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv)
1130{
1131 struct net_devmem_dmabuf_binding *binding;
1132 struct net_devmem_dmabuf_binding *temp;
1133 netdevice_tracker dev_tracker;
1134 struct net_device *dev;
1135
1136 mutex_lock(lock: &priv->lock);
1137 list_for_each_entry_safe(binding, temp, &priv->bindings, list) {
1138 mutex_lock(lock: &binding->lock);
1139 dev = binding->dev;
1140 if (!dev) {
1141 mutex_unlock(lock: &binding->lock);
1142 net_devmem_unbind_dmabuf(binding);
1143 continue;
1144 }
1145 netdev_hold(dev, tracker: &dev_tracker, GFP_KERNEL);
1146 mutex_unlock(lock: &binding->lock);
1147
1148 netdev_lock(dev);
1149 net_devmem_unbind_dmabuf(binding);
1150 netdev_unlock(dev);
1151 netdev_put(dev, tracker: &dev_tracker);
1152 }
1153 mutex_unlock(lock: &priv->lock);
1154}
1155
1156static int netdev_genl_netdevice_event(struct notifier_block *nb,
1157 unsigned long event, void *ptr)
1158{
1159 struct net_device *netdev = netdev_notifier_info_to_dev(info: ptr);
1160
1161 switch (event) {
1162 case NETDEV_REGISTER:
1163 netdev_lock_ops_to_full(dev: netdev);
1164 netdev_genl_dev_notify(netdev, cmd: NETDEV_CMD_DEV_ADD_NTF);
1165 netdev_unlock_full_to_ops(dev: netdev);
1166 break;
1167 case NETDEV_UNREGISTER:
1168 netdev_lock(dev: netdev);
1169 netdev_genl_dev_notify(netdev, cmd: NETDEV_CMD_DEV_DEL_NTF);
1170 netdev_unlock(dev: netdev);
1171 break;
1172 case NETDEV_XDP_FEAT_CHANGE:
1173 netdev_genl_dev_notify(netdev, cmd: NETDEV_CMD_DEV_CHANGE_NTF);
1174 break;
1175 }
1176
1177 return NOTIFY_OK;
1178}
1179
1180static struct notifier_block netdev_genl_nb = {
1181 .notifier_call = netdev_genl_netdevice_event,
1182};
1183
1184static int __init netdev_genl_init(void)
1185{
1186 int err;
1187
1188 err = register_netdevice_notifier(nb: &netdev_genl_nb);
1189 if (err)
1190 return err;
1191
1192 err = genl_register_family(family: &netdev_nl_family);
1193 if (err)
1194 goto err_unreg_ntf;
1195
1196 return 0;
1197
1198err_unreg_ntf:
1199 unregister_netdevice_notifier(nb: &netdev_genl_nb);
1200 return err;
1201}
1202
1203subsys_initcall(netdev_genl_init);
1204