1// SPDX-License-Identifier: GPL-2.0
2/* Generic nexthop implementation
3 *
4 * Copyright (c) 2017-19 Cumulus Networks
5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
6 */
7
8#include <linux/nexthop.h>
9#include <linux/rtnetlink.h>
10#include <linux/slab.h>
11#include <linux/vmalloc.h>
12#include <net/arp.h>
13#include <net/ipv6_stubs.h>
14#include <net/lwtunnel.h>
15#include <net/ndisc.h>
16#include <net/nexthop.h>
17#include <net/route.h>
18#include <net/sock.h>
19
20#define NH_RES_DEFAULT_IDLE_TIMER (120 * HZ)
21#define NH_RES_DEFAULT_UNBALANCED_TIMER 0 /* No forced rebalancing. */
22
23static void remove_nexthop(struct net *net, struct nexthop *nh,
24 struct nl_info *nlinfo);
25
26#define NH_DEV_HASHBITS 8
27#define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
28
29#define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \
30 NHA_OP_FLAG_DUMP_HW_STATS)
31
32static const struct nla_policy rtm_nh_policy_new[] = {
33 [NHA_ID] = { .type = NLA_U32 },
34 [NHA_GROUP] = { .type = NLA_BINARY },
35 [NHA_GROUP_TYPE] = { .type = NLA_U16 },
36 [NHA_BLACKHOLE] = { .type = NLA_FLAG },
37 [NHA_OIF] = { .type = NLA_U32 },
38 [NHA_GATEWAY] = { .type = NLA_BINARY },
39 [NHA_ENCAP_TYPE] = { .type = NLA_U16 },
40 [NHA_ENCAP] = { .type = NLA_NESTED },
41 [NHA_FDB] = { .type = NLA_FLAG },
42 [NHA_RES_GROUP] = { .type = NLA_NESTED },
43 [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true),
44};
45
46static const struct nla_policy rtm_nh_policy_get[] = {
47 [NHA_ID] = { .type = NLA_U32 },
48 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
49 NHA_OP_FLAGS_DUMP_ALL),
50};
51
52static const struct nla_policy rtm_nh_policy_del[] = {
53 [NHA_ID] = { .type = NLA_U32 },
54};
55
56static const struct nla_policy rtm_nh_policy_dump[] = {
57 [NHA_OIF] = { .type = NLA_U32 },
58 [NHA_GROUPS] = { .type = NLA_FLAG },
59 [NHA_MASTER] = { .type = NLA_U32 },
60 [NHA_FDB] = { .type = NLA_FLAG },
61 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
62 NHA_OP_FLAGS_DUMP_ALL),
63};
64
65static const struct nla_policy rtm_nh_res_policy_new[] = {
66 [NHA_RES_GROUP_BUCKETS] = { .type = NLA_U16 },
67 [NHA_RES_GROUP_IDLE_TIMER] = { .type = NLA_U32 },
68 [NHA_RES_GROUP_UNBALANCED_TIMER] = { .type = NLA_U32 },
69};
70
71static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
72 [NHA_ID] = { .type = NLA_U32 },
73 [NHA_OIF] = { .type = NLA_U32 },
74 [NHA_MASTER] = { .type = NLA_U32 },
75 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
76};
77
78static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
79 [NHA_RES_BUCKET_NH_ID] = { .type = NLA_U32 },
80};
81
82static const struct nla_policy rtm_nh_policy_get_bucket[] = {
83 [NHA_ID] = { .type = NLA_U32 },
84 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
85};
86
87static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
88 [NHA_RES_BUCKET_INDEX] = { .type = NLA_U16 },
89};
90
91static bool nexthop_notifiers_is_empty(struct net *net)
92{
93 return !net->nexthop.notifier_chain.head;
94}
95
96static void
97__nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
98 const struct nh_info *nhi)
99{
100 nh_info->dev = nhi->fib_nhc.nhc_dev;
101 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
102 if (nh_info->gw_family == AF_INET)
103 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
104 else if (nh_info->gw_family == AF_INET6)
105 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
106
107 nh_info->id = nhi->nh_parent->id;
108 nh_info->is_reject = nhi->reject_nh;
109 nh_info->is_fdb = nhi->fdb_nh;
110 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
111}
112
113static int nh_notifier_single_info_init(struct nh_notifier_info *info,
114 const struct nexthop *nh)
115{
116 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
117
118 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
119 info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
120 if (!info->nh)
121 return -ENOMEM;
122
123 __nh_notifier_single_info_init(nh_info: info->nh, nhi);
124
125 return 0;
126}
127
128static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
129{
130 kfree(objp: info->nh);
131}
132
133static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
134 struct nh_group *nhg)
135{
136 u16 num_nh = nhg->num_nh;
137 int i;
138
139 info->type = NH_NOTIFIER_INFO_TYPE_GRP;
140 info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
141 GFP_KERNEL);
142 if (!info->nh_grp)
143 return -ENOMEM;
144
145 info->nh_grp->num_nh = num_nh;
146 info->nh_grp->is_fdb = nhg->fdb_nh;
147 info->nh_grp->hw_stats = nhg->hw_stats;
148
149 for (i = 0; i < num_nh; i++) {
150 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
151 struct nh_info *nhi;
152
153 nhi = rtnl_dereference(nhge->nh->nh_info);
154 info->nh_grp->nh_entries[i].weight = nhge->weight;
155 __nh_notifier_single_info_init(nh_info: &info->nh_grp->nh_entries[i].nh,
156 nhi);
157 }
158
159 return 0;
160}
161
162static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
163 struct nh_group *nhg)
164{
165 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
166 u16 num_nh_buckets = res_table->num_nh_buckets;
167 unsigned long size;
168 u16 i;
169
170 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
171 size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
172 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
173 __GFP_NOWARN);
174 if (!info->nh_res_table)
175 return -ENOMEM;
176
177 info->nh_res_table->num_nh_buckets = num_nh_buckets;
178 info->nh_res_table->hw_stats = nhg->hw_stats;
179
180 for (i = 0; i < num_nh_buckets; i++) {
181 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
182 struct nh_grp_entry *nhge;
183 struct nh_info *nhi;
184
185 nhge = rtnl_dereference(bucket->nh_entry);
186 nhi = rtnl_dereference(nhge->nh->nh_info);
187 __nh_notifier_single_info_init(nh_info: &info->nh_res_table->nhs[i],
188 nhi);
189 }
190
191 return 0;
192}
193
194static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
195 const struct nexthop *nh)
196{
197 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
198
199 if (nhg->hash_threshold)
200 return nh_notifier_mpath_info_init(info, nhg);
201 else if (nhg->resilient)
202 return nh_notifier_res_table_info_init(info, nhg);
203 return -EINVAL;
204}
205
206static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
207 const struct nexthop *nh)
208{
209 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
210
211 if (nhg->hash_threshold)
212 kfree(objp: info->nh_grp);
213 else if (nhg->resilient)
214 vfree(addr: info->nh_res_table);
215}
216
217static int nh_notifier_info_init(struct nh_notifier_info *info,
218 const struct nexthop *nh)
219{
220 info->id = nh->id;
221
222 if (nh->is_group)
223 return nh_notifier_grp_info_init(info, nh);
224 else
225 return nh_notifier_single_info_init(info, nh);
226}
227
228static void nh_notifier_info_fini(struct nh_notifier_info *info,
229 const struct nexthop *nh)
230{
231 if (nh->is_group)
232 nh_notifier_grp_info_fini(info, nh);
233 else
234 nh_notifier_single_info_fini(info);
235}
236
237static int call_nexthop_notifiers(struct net *net,
238 enum nexthop_event_type event_type,
239 struct nexthop *nh,
240 struct netlink_ext_ack *extack)
241{
242 struct nh_notifier_info info = {
243 .net = net,
244 .extack = extack,
245 };
246 int err;
247
248 ASSERT_RTNL();
249
250 if (nexthop_notifiers_is_empty(net))
251 return 0;
252
253 err = nh_notifier_info_init(info: &info, nh);
254 if (err) {
255 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
256 return err;
257 }
258
259 err = blocking_notifier_call_chain(nh: &net->nexthop.notifier_chain,
260 val: event_type, v: &info);
261 nh_notifier_info_fini(info: &info, nh);
262
263 return notifier_to_errno(ret: err);
264}
265
266static int
267nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
268 bool force, unsigned int *p_idle_timer_ms)
269{
270 struct nh_res_table *res_table;
271 struct nh_group *nhg;
272 struct nexthop *nh;
273 int err = 0;
274
275 /* When 'force' is false, nexthop bucket replacement is performed
276 * because the bucket was deemed to be idle. In this case, capable
277 * listeners can choose to perform an atomic replacement: The bucket is
278 * only replaced if it is inactive. However, if the idle timer interval
279 * is smaller than the interval in which a listener is querying
280 * buckets' activity from the device, then atomic replacement should
281 * not be tried. Pass the idle timer value to listeners, so that they
282 * could determine which type of replacement to perform.
283 */
284 if (force) {
285 *p_idle_timer_ms = 0;
286 return 0;
287 }
288
289 rcu_read_lock();
290
291 nh = nexthop_find_by_id(net: info->net, id: info->id);
292 if (!nh) {
293 err = -EINVAL;
294 goto out;
295 }
296
297 nhg = rcu_dereference(nh->nh_grp);
298 res_table = rcu_dereference(nhg->res_table);
299 *p_idle_timer_ms = jiffies_to_msecs(j: res_table->idle_timer);
300
301out:
302 rcu_read_unlock();
303
304 return err;
305}
306
307static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
308 u16 bucket_index, bool force,
309 struct nh_info *oldi,
310 struct nh_info *newi)
311{
312 unsigned int idle_timer_ms;
313 int err;
314
315 err = nh_notifier_res_bucket_idle_timer_get(info, force,
316 p_idle_timer_ms: &idle_timer_ms);
317 if (err)
318 return err;
319
320 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
321 info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
322 GFP_KERNEL);
323 if (!info->nh_res_bucket)
324 return -ENOMEM;
325
326 info->nh_res_bucket->bucket_index = bucket_index;
327 info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
328 info->nh_res_bucket->force = force;
329 __nh_notifier_single_info_init(nh_info: &info->nh_res_bucket->old_nh, nhi: oldi);
330 __nh_notifier_single_info_init(nh_info: &info->nh_res_bucket->new_nh, nhi: newi);
331 return 0;
332}
333
334static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
335{
336 kfree(objp: info->nh_res_bucket);
337}
338
339static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
340 u16 bucket_index, bool force,
341 struct nh_info *oldi,
342 struct nh_info *newi,
343 struct netlink_ext_ack *extack)
344{
345 struct nh_notifier_info info = {
346 .net = net,
347 .extack = extack,
348 .id = nhg_id,
349 };
350 int err;
351
352 if (nexthop_notifiers_is_empty(net))
353 return 0;
354
355 err = nh_notifier_res_bucket_info_init(info: &info, bucket_index, force,
356 oldi, newi);
357 if (err)
358 return err;
359
360 err = blocking_notifier_call_chain(nh: &net->nexthop.notifier_chain,
361 val: NEXTHOP_EVENT_BUCKET_REPLACE, v: &info);
362 nh_notifier_res_bucket_info_fini(info: &info);
363
364 return notifier_to_errno(ret: err);
365}
366
367/* There are three users of RES_TABLE, and NHs etc. referenced from there:
368 *
369 * 1) a collection of callbacks for NH maintenance. This operates under
370 * RTNL,
371 * 2) the delayed work that gradually balances the resilient table,
372 * 3) and nexthop_select_path(), operating under RCU.
373 *
374 * Both the delayed work and the RTNL block are writers, and need to
375 * maintain mutual exclusion. Since there are only two and well-known
376 * writers for each table, the RTNL code can make sure it has exclusive
377 * access thus:
378 *
379 * - Have the DW operate without locking;
380 * - synchronously cancel the DW;
381 * - do the writing;
382 * - if the write was not actually a delete, call upkeep, which schedules
383 * DW again if necessary.
384 *
385 * The functions that are always called from the RTNL context use
386 * rtnl_dereference(). The functions that can also be called from the DW do
387 * a raw dereference and rely on the above mutual exclusion scheme.
388 */
389#define nh_res_dereference(p) (rcu_dereference_raw(p))
390
391static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
392 u16 bucket_index, bool force,
393 struct nexthop *old_nh,
394 struct nexthop *new_nh,
395 struct netlink_ext_ack *extack)
396{
397 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
398 struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
399
400 return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
401 force, oldi, newi, extack);
402}
403
404static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
405 struct netlink_ext_ack *extack)
406{
407 struct nh_notifier_info info = {
408 .net = net,
409 .extack = extack,
410 .id = nh->id,
411 };
412 struct nh_group *nhg;
413 int err;
414
415 ASSERT_RTNL();
416
417 if (nexthop_notifiers_is_empty(net))
418 return 0;
419
420 /* At this point, the nexthop buckets are still not populated. Only
421 * emit a notification with the logical nexthops, so that a listener
422 * could potentially veto it in case of unsupported configuration.
423 */
424 nhg = rtnl_dereference(nh->nh_grp);
425 err = nh_notifier_mpath_info_init(info: &info, nhg);
426 if (err) {
427 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
428 return err;
429 }
430
431 err = blocking_notifier_call_chain(nh: &net->nexthop.notifier_chain,
432 val: NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
433 v: &info);
434 kfree(objp: info.nh_grp);
435
436 return notifier_to_errno(ret: err);
437}
438
439static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
440 enum nexthop_event_type event_type,
441 struct nexthop *nh,
442 struct netlink_ext_ack *extack)
443{
444 struct nh_notifier_info info = {
445 .net = net,
446 .extack = extack,
447 };
448 int err;
449
450 err = nh_notifier_info_init(info: &info, nh);
451 if (err)
452 return err;
453
454 err = nb->notifier_call(nb, event_type, &info);
455 nh_notifier_info_fini(info: &info, nh);
456
457 return notifier_to_errno(ret: err);
458}
459
460static unsigned int nh_dev_hashfn(unsigned int val)
461{
462 unsigned int mask = NH_DEV_HASHSIZE - 1;
463
464 return (val ^
465 (val >> NH_DEV_HASHBITS) ^
466 (val >> (NH_DEV_HASHBITS * 2))) & mask;
467}
468
469static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
470{
471 struct net_device *dev = nhi->fib_nhc.nhc_dev;
472 struct hlist_head *head;
473 unsigned int hash;
474
475 WARN_ON(!dev);
476
477 hash = nh_dev_hashfn(val: dev->ifindex);
478 head = &net->nexthop.devhash[hash];
479 hlist_add_head(n: &nhi->dev_hash, h: head);
480}
481
482static void nexthop_free_group(struct nexthop *nh)
483{
484 struct nh_group *nhg;
485 int i;
486
487 nhg = rcu_dereference_raw(nh->nh_grp);
488 for (i = 0; i < nhg->num_nh; ++i) {
489 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
490
491 WARN_ON(!list_empty(&nhge->nh_list));
492 free_percpu(pdata: nhge->stats);
493 nexthop_put(nh: nhge->nh);
494 }
495
496 WARN_ON(nhg->spare == nhg);
497
498 if (nhg->resilient)
499 vfree(rcu_dereference_raw(nhg->res_table));
500
501 kfree(objp: nhg->spare);
502 kfree(objp: nhg);
503}
504
505static void nexthop_free_single(struct nexthop *nh)
506{
507 struct nh_info *nhi;
508
509 nhi = rcu_dereference_raw(nh->nh_info);
510 switch (nhi->family) {
511 case AF_INET:
512 fib_nh_release(net: nh->net, fib_nh: &nhi->fib_nh);
513 break;
514 case AF_INET6:
515 ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
516 break;
517 }
518 kfree(objp: nhi);
519}
520
521void nexthop_free_rcu(struct rcu_head *head)
522{
523 struct nexthop *nh = container_of(head, struct nexthop, rcu);
524
525 if (nh->is_group)
526 nexthop_free_group(nh);
527 else
528 nexthop_free_single(nh);
529
530 kfree(objp: nh);
531}
532EXPORT_SYMBOL_GPL(nexthop_free_rcu);
533
534static struct nexthop *nexthop_alloc(void)
535{
536 struct nexthop *nh;
537
538 nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
539 if (nh) {
540 INIT_LIST_HEAD(list: &nh->fi_list);
541 INIT_LIST_HEAD(list: &nh->f6i_list);
542 INIT_LIST_HEAD(list: &nh->grp_list);
543 INIT_LIST_HEAD(list: &nh->fdb_list);
544 spin_lock_init(&nh->lock);
545 }
546 return nh;
547}
548
549static struct nh_group *nexthop_grp_alloc(u16 num_nh)
550{
551 struct nh_group *nhg;
552
553 nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
554 if (nhg)
555 nhg->num_nh = num_nh;
556
557 return nhg;
558}
559
560static void nh_res_table_upkeep_dw(struct work_struct *work);
561
562static struct nh_res_table *
563nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
564{
565 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
566 struct nh_res_table *res_table;
567 unsigned long size;
568
569 size = struct_size(res_table, nh_buckets, num_nh_buckets);
570 res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
571 if (!res_table)
572 return NULL;
573
574 res_table->net = net;
575 res_table->nhg_id = nhg_id;
576 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
577 INIT_LIST_HEAD(list: &res_table->uw_nh_entries);
578 res_table->idle_timer = cfg->nh_grp_res_idle_timer;
579 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
580 res_table->num_nh_buckets = num_nh_buckets;
581 return res_table;
582}
583
584static void nh_base_seq_inc(struct net *net)
585{
586 while (++net->nexthop.seq == 0)
587 ;
588}
589
590/* no reference taken; rcu lock or rtnl must be held */
591struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
592{
593 struct rb_node **pp, *parent = NULL, *next;
594
595 pp = &net->nexthop.rb_root.rb_node;
596 while (1) {
597 struct nexthop *nh;
598
599 next = rcu_dereference_raw(*pp);
600 if (!next)
601 break;
602 parent = next;
603
604 nh = rb_entry(parent, struct nexthop, rb_node);
605 if (id < nh->id)
606 pp = &next->rb_left;
607 else if (id > nh->id)
608 pp = &next->rb_right;
609 else
610 return nh;
611 }
612 return NULL;
613}
614EXPORT_SYMBOL_GPL(nexthop_find_by_id);
615
616/* used for auto id allocation; called with rtnl held */
617static u32 nh_find_unused_id(struct net *net)
618{
619 u32 id_start = net->nexthop.last_id_allocated;
620
621 while (1) {
622 net->nexthop.last_id_allocated++;
623 if (net->nexthop.last_id_allocated == id_start)
624 break;
625
626 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
627 return net->nexthop.last_id_allocated;
628 }
629 return 0;
630}
631
632static void nh_res_time_set_deadline(unsigned long next_time,
633 unsigned long *deadline)
634{
635 if (time_before(next_time, *deadline))
636 *deadline = next_time;
637}
638
639static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
640{
641 if (list_empty(head: &res_table->uw_nh_entries))
642 return 0;
643 return jiffies_delta_to_clock_t(delta: jiffies - res_table->unbalanced_since);
644}
645
646static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
647{
648 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
649 struct nlattr *nest;
650
651 nest = nla_nest_start(skb, attrtype: NHA_RES_GROUP);
652 if (!nest)
653 return -EMSGSIZE;
654
655 if (nla_put_u16(skb, attrtype: NHA_RES_GROUP_BUCKETS,
656 value: res_table->num_nh_buckets) ||
657 nla_put_u32(skb, attrtype: NHA_RES_GROUP_IDLE_TIMER,
658 value: jiffies_to_clock_t(x: res_table->idle_timer)) ||
659 nla_put_u32(skb, attrtype: NHA_RES_GROUP_UNBALANCED_TIMER,
660 value: jiffies_to_clock_t(x: res_table->unbalanced_timer)) ||
661 nla_put_u64_64bit(skb, attrtype: NHA_RES_GROUP_UNBALANCED_TIME,
662 value: nh_res_table_unbalanced_time(res_table),
663 padattr: NHA_RES_GROUP_PAD))
664 goto nla_put_failure;
665
666 nla_nest_end(skb, start: nest);
667 return 0;
668
669nla_put_failure:
670 nla_nest_cancel(skb, start: nest);
671 return -EMSGSIZE;
672}
673
674static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
675{
676 struct nh_grp_entry_stats *cpu_stats;
677
678 cpu_stats = get_cpu_ptr(nhge->stats);
679 u64_stats_update_begin(syncp: &cpu_stats->syncp);
680 u64_stats_inc(p: &cpu_stats->packets);
681 u64_stats_update_end(syncp: &cpu_stats->syncp);
682 put_cpu_ptr(cpu_stats);
683}
684
685static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
686 u64 *ret_packets)
687{
688 int i;
689
690 *ret_packets = 0;
691
692 for_each_possible_cpu(i) {
693 struct nh_grp_entry_stats *cpu_stats;
694 unsigned int start;
695 u64 packets;
696
697 cpu_stats = per_cpu_ptr(nhge->stats, i);
698 do {
699 start = u64_stats_fetch_begin(syncp: &cpu_stats->syncp);
700 packets = u64_stats_read(p: &cpu_stats->packets);
701 } while (u64_stats_fetch_retry(syncp: &cpu_stats->syncp, start));
702
703 *ret_packets += packets;
704 }
705}
706
707static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
708 const struct nexthop *nh)
709{
710 struct nh_group *nhg;
711 int i;
712
713 ASSERT_RTNL();
714 nhg = rtnl_dereference(nh->nh_grp);
715
716 info->id = nh->id;
717 info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
718 info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats,
719 stats, nhg->num_nh),
720 GFP_KERNEL);
721 if (!info->nh_grp_hw_stats)
722 return -ENOMEM;
723
724 info->nh_grp_hw_stats->num_nh = nhg->num_nh;
725 for (i = 0; i < nhg->num_nh; i++) {
726 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
727
728 info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
729 }
730
731 return 0;
732}
733
734static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
735{
736 kfree(objp: info->nh_grp_hw_stats);
737}
738
739void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
740 unsigned int nh_idx,
741 u64 delta_packets)
742{
743 info->hw_stats_used = true;
744 info->stats[nh_idx].packets += delta_packets;
745}
746EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
747
748static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
749 struct nh_notifier_info *info)
750{
751 struct nh_group *nhg;
752 int i;
753
754 ASSERT_RTNL();
755 nhg = rtnl_dereference(nh->nh_grp);
756
757 for (i = 0; i < nhg->num_nh; i++) {
758 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
759
760 nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
761 }
762}
763
764static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
765{
766 struct nh_notifier_info info = {
767 .net = nh->net,
768 };
769 struct net *net = nh->net;
770 int err;
771
772 if (nexthop_notifiers_is_empty(net)) {
773 *hw_stats_used = false;
774 return 0;
775 }
776
777 err = nh_notifier_grp_hw_stats_init(info: &info, nh);
778 if (err)
779 return err;
780
781 err = blocking_notifier_call_chain(nh: &net->nexthop.notifier_chain,
782 val: NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
783 v: &info);
784
785 /* Cache whatever we got, even if there was an error, otherwise the
786 * successful stats retrievals would get lost.
787 */
788 nh_grp_hw_stats_apply_update(nh, info: &info);
789 *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
790
791 nh_notifier_grp_hw_stats_fini(info: &info);
792 return notifier_to_errno(ret: err);
793}
794
795static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
796 struct nh_grp_entry *nhge,
797 u32 op_flags)
798{
799 struct nlattr *nest;
800 u64 packets;
801
802 nh_grp_entry_stats_read(nhge, ret_packets: &packets);
803
804 nest = nla_nest_start(skb, attrtype: NHA_GROUP_STATS_ENTRY);
805 if (!nest)
806 return -EMSGSIZE;
807
808 if (nla_put_u32(skb, attrtype: NHA_GROUP_STATS_ENTRY_ID, value: nhge->nh->id) ||
809 nla_put_uint(skb, attrtype: NHA_GROUP_STATS_ENTRY_PACKETS,
810 value: packets + nhge->packets_hw))
811 goto nla_put_failure;
812
813 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
814 nla_put_uint(skb, attrtype: NHA_GROUP_STATS_ENTRY_PACKETS_HW,
815 value: nhge->packets_hw))
816 goto nla_put_failure;
817
818 nla_nest_end(skb, start: nest);
819 return 0;
820
821nla_put_failure:
822 nla_nest_cancel(skb, start: nest);
823 return -EMSGSIZE;
824}
825
826static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
827 u32 op_flags)
828{
829 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
830 struct nlattr *nest;
831 bool hw_stats_used;
832 int err;
833 int i;
834
835 if (nla_put_u32(skb, attrtype: NHA_HW_STATS_ENABLE, value: nhg->hw_stats))
836 goto err_out;
837
838 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
839 nhg->hw_stats) {
840 err = nh_grp_hw_stats_update(nh, hw_stats_used: &hw_stats_used);
841 if (err)
842 goto out;
843
844 if (nla_put_u32(skb, attrtype: NHA_HW_STATS_USED, value: hw_stats_used))
845 goto err_out;
846 }
847
848 nest = nla_nest_start(skb, attrtype: NHA_GROUP_STATS);
849 if (!nest)
850 goto err_out;
851
852 for (i = 0; i < nhg->num_nh; i++)
853 if (nla_put_nh_group_stats_entry(skb, nhge: &nhg->nh_entries[i],
854 op_flags))
855 goto cancel_out;
856
857 nla_nest_end(skb, start: nest);
858 return 0;
859
860cancel_out:
861 nla_nest_cancel(skb, start: nest);
862err_out:
863 err = -EMSGSIZE;
864out:
865 return err;
866}
867
868static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
869 u32 op_flags, u32 *resp_op_flags)
870{
871 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
872 struct nexthop_grp *p;
873 size_t len = nhg->num_nh * sizeof(*p);
874 struct nlattr *nla;
875 u16 group_type = 0;
876 u16 weight;
877 int i;
878
879 *resp_op_flags |= NHA_OP_FLAG_RESP_GRP_RESVD_0;
880
881 if (nhg->hash_threshold)
882 group_type = NEXTHOP_GRP_TYPE_MPATH;
883 else if (nhg->resilient)
884 group_type = NEXTHOP_GRP_TYPE_RES;
885
886 if (nla_put_u16(skb, attrtype: NHA_GROUP_TYPE, value: group_type))
887 goto nla_put_failure;
888
889 nla = nla_reserve(skb, attrtype: NHA_GROUP, attrlen: len);
890 if (!nla)
891 goto nla_put_failure;
892
893 p = nla_data(nla);
894 for (i = 0; i < nhg->num_nh; ++i) {
895 weight = nhg->nh_entries[i].weight - 1;
896
897 *p++ = (struct nexthop_grp) {
898 .id = nhg->nh_entries[i].nh->id,
899 .weight = weight,
900 .weight_high = weight >> 8,
901 };
902 }
903
904 if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
905 goto nla_put_failure;
906
907 if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
908 (nla_put_u32(skb, attrtype: NHA_HW_STATS_ENABLE, value: nhg->hw_stats) ||
909 nla_put_nh_group_stats(skb, nh, op_flags)))
910 goto nla_put_failure;
911
912 return 0;
913
914nla_put_failure:
915 return -EMSGSIZE;
916}
917
918static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
919 int event, u32 portid, u32 seq, unsigned int nlflags,
920 u32 op_flags)
921{
922 struct fib6_nh *fib6_nh;
923 struct fib_nh *fib_nh;
924 struct nlmsghdr *nlh;
925 struct nh_info *nhi;
926 struct nhmsg *nhm;
927
928 nlh = nlmsg_put(skb, portid, seq, type: event, payload: sizeof(*nhm), flags: nlflags);
929 if (!nlh)
930 return -EMSGSIZE;
931
932 nhm = nlmsg_data(nlh);
933 nhm->nh_family = AF_UNSPEC;
934 nhm->nh_flags = nh->nh_flags;
935 nhm->nh_protocol = nh->protocol;
936 nhm->nh_scope = 0;
937 nhm->resvd = 0;
938
939 if (nla_put_u32(skb, attrtype: NHA_ID, value: nh->id))
940 goto nla_put_failure;
941
942 if (nh->is_group) {
943 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
944 u32 resp_op_flags = 0;
945
946 if (nhg->fdb_nh && nla_put_flag(skb, attrtype: NHA_FDB))
947 goto nla_put_failure;
948 if (nla_put_nh_group(skb, nh, op_flags, resp_op_flags: &resp_op_flags) ||
949 nla_put_u32(skb, attrtype: NHA_OP_FLAGS, value: resp_op_flags))
950 goto nla_put_failure;
951 goto out;
952 }
953
954 nhi = rtnl_dereference(nh->nh_info);
955 nhm->nh_family = nhi->family;
956 if (nhi->reject_nh) {
957 if (nla_put_flag(skb, attrtype: NHA_BLACKHOLE))
958 goto nla_put_failure;
959 goto out;
960 } else if (nhi->fdb_nh) {
961 if (nla_put_flag(skb, attrtype: NHA_FDB))
962 goto nla_put_failure;
963 } else {
964 const struct net_device *dev;
965
966 dev = nhi->fib_nhc.nhc_dev;
967 if (dev && nla_put_u32(skb, attrtype: NHA_OIF, value: dev->ifindex))
968 goto nla_put_failure;
969 }
970
971 nhm->nh_scope = nhi->fib_nhc.nhc_scope;
972 switch (nhi->family) {
973 case AF_INET:
974 fib_nh = &nhi->fib_nh;
975 if (fib_nh->fib_nh_gw_family &&
976 nla_put_be32(skb, attrtype: NHA_GATEWAY, value: fib_nh->fib_nh_gw4))
977 goto nla_put_failure;
978 break;
979
980 case AF_INET6:
981 fib6_nh = &nhi->fib6_nh;
982 if (fib6_nh->fib_nh_gw_family &&
983 nla_put_in6_addr(skb, attrtype: NHA_GATEWAY, addr: &fib6_nh->fib_nh_gw6))
984 goto nla_put_failure;
985 break;
986 }
987
988 if (lwtunnel_fill_encap(skb, lwtstate: nhi->fib_nhc.nhc_lwtstate,
989 encap_attr: NHA_ENCAP, encap_type_attr: NHA_ENCAP_TYPE) < 0)
990 goto nla_put_failure;
991
992out:
993 nlmsg_end(skb, nlh);
994 return 0;
995
996nla_put_failure:
997 nlmsg_cancel(skb, nlh);
998 return -EMSGSIZE;
999}
1000
1001static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
1002{
1003 return nla_total_size(payload: 0) + /* NHA_RES_GROUP */
1004 nla_total_size(payload: 2) + /* NHA_RES_GROUP_BUCKETS */
1005 nla_total_size(payload: 4) + /* NHA_RES_GROUP_IDLE_TIMER */
1006 nla_total_size(payload: 4) + /* NHA_RES_GROUP_UNBALANCED_TIMER */
1007 nla_total_size_64bit(payload: 8);/* NHA_RES_GROUP_UNBALANCED_TIME */
1008}
1009
1010static size_t nh_nlmsg_size_grp(struct nexthop *nh)
1011{
1012 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1013 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
1014 size_t tot = nla_total_size(payload: sz) +
1015 nla_total_size(payload: 2); /* NHA_GROUP_TYPE */
1016
1017 if (nhg->resilient)
1018 tot += nh_nlmsg_size_grp_res(nhg);
1019
1020 return tot;
1021}
1022
1023static size_t nh_nlmsg_size_single(struct nexthop *nh)
1024{
1025 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1026 size_t sz;
1027
1028 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
1029 * are mutually exclusive
1030 */
1031 sz = nla_total_size(payload: 4); /* NHA_OIF */
1032
1033 switch (nhi->family) {
1034 case AF_INET:
1035 if (nhi->fib_nh.fib_nh_gw_family)
1036 sz += nla_total_size(payload: 4); /* NHA_GATEWAY */
1037 break;
1038
1039 case AF_INET6:
1040 /* NHA_GATEWAY */
1041 if (nhi->fib6_nh.fib_nh_gw_family)
1042 sz += nla_total_size(payload: sizeof(const struct in6_addr));
1043 break;
1044 }
1045
1046 if (nhi->fib_nhc.nhc_lwtstate) {
1047 sz += lwtunnel_get_encap_size(lwtstate: nhi->fib_nhc.nhc_lwtstate);
1048 sz += nla_total_size(payload: 2); /* NHA_ENCAP_TYPE */
1049 }
1050
1051 return sz;
1052}
1053
1054static size_t nh_nlmsg_size(struct nexthop *nh)
1055{
1056 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
1057
1058 sz += nla_total_size(payload: 4); /* NHA_ID */
1059
1060 if (nh->is_group)
1061 sz += nh_nlmsg_size_grp(nh) +
1062 nla_total_size(payload: 4) + /* NHA_OP_FLAGS */
1063 0;
1064 else
1065 sz += nh_nlmsg_size_single(nh);
1066
1067 return sz;
1068}
1069
1070static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
1071{
1072 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
1073 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
1074 struct sk_buff *skb;
1075 int err = -ENOBUFS;
1076
1077 skb = nlmsg_new(payload: nh_nlmsg_size(nh), flags: gfp_any());
1078 if (!skb)
1079 goto errout;
1080
1081 err = nh_fill_node(skb, nh, event, portid: info->portid, seq, nlflags, op_flags: 0);
1082 if (err < 0) {
1083 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
1084 WARN_ON(err == -EMSGSIZE);
1085 kfree_skb(skb);
1086 goto errout;
1087 }
1088
1089 rtnl_notify(skb, net: info->nl_net, pid: info->portid, RTNLGRP_NEXTHOP,
1090 nlh: info->nlh, flags: gfp_any());
1091 return;
1092errout:
1093 rtnl_set_sk_err(net: info->nl_net, RTNLGRP_NEXTHOP, error: err);
1094}
1095
1096static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
1097{
1098 return (unsigned long)atomic_long_read(v: &bucket->used_time);
1099}
1100
1101static unsigned long
1102nh_res_bucket_idle_point(const struct nh_res_table *res_table,
1103 const struct nh_res_bucket *bucket,
1104 unsigned long now)
1105{
1106 unsigned long time = nh_res_bucket_used_time(bucket);
1107
1108 /* Bucket was not used since it was migrated. The idle time is now. */
1109 if (time == bucket->migrated_time)
1110 return now;
1111
1112 return time + res_table->idle_timer;
1113}
1114
1115static unsigned long
1116nh_res_table_unb_point(const struct nh_res_table *res_table)
1117{
1118 return res_table->unbalanced_since + res_table->unbalanced_timer;
1119}
1120
1121static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
1122 struct nh_res_bucket *bucket)
1123{
1124 unsigned long now = jiffies;
1125
1126 atomic_long_set(v: &bucket->used_time, i: (long)now);
1127 bucket->migrated_time = now;
1128}
1129
1130static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
1131{
1132 atomic_long_set(v: &bucket->used_time, i: (long)jiffies);
1133}
1134
1135static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
1136{
1137 unsigned long used_time = nh_res_bucket_used_time(bucket);
1138
1139 return jiffies_delta_to_clock_t(delta: jiffies - used_time);
1140}
1141
1142static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
1143 struct nh_res_bucket *bucket, u16 bucket_index,
1144 int event, u32 portid, u32 seq,
1145 unsigned int nlflags,
1146 struct netlink_ext_ack *extack)
1147{
1148 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1149 struct nlmsghdr *nlh;
1150 struct nlattr *nest;
1151 struct nhmsg *nhm;
1152
1153 nlh = nlmsg_put(skb, portid, seq, type: event, payload: sizeof(*nhm), flags: nlflags);
1154 if (!nlh)
1155 return -EMSGSIZE;
1156
1157 nhm = nlmsg_data(nlh);
1158 nhm->nh_family = AF_UNSPEC;
1159 nhm->nh_flags = bucket->nh_flags;
1160 nhm->nh_protocol = nh->protocol;
1161 nhm->nh_scope = 0;
1162 nhm->resvd = 0;
1163
1164 if (nla_put_u32(skb, attrtype: NHA_ID, value: nh->id))
1165 goto nla_put_failure;
1166
1167 nest = nla_nest_start(skb, attrtype: NHA_RES_BUCKET);
1168 if (!nest)
1169 goto nla_put_failure;
1170
1171 if (nla_put_u16(skb, attrtype: NHA_RES_BUCKET_INDEX, value: bucket_index) ||
1172 nla_put_u32(skb, attrtype: NHA_RES_BUCKET_NH_ID, value: nhge->nh->id) ||
1173 nla_put_u64_64bit(skb, attrtype: NHA_RES_BUCKET_IDLE_TIME,
1174 value: nh_res_bucket_idle_time(bucket),
1175 padattr: NHA_RES_BUCKET_PAD))
1176 goto nla_put_failure_nest;
1177
1178 nla_nest_end(skb, start: nest);
1179 nlmsg_end(skb, nlh);
1180 return 0;
1181
1182nla_put_failure_nest:
1183 nla_nest_cancel(skb, start: nest);
1184nla_put_failure:
1185 nlmsg_cancel(skb, nlh);
1186 return -EMSGSIZE;
1187}
1188
1189static void nexthop_bucket_notify(struct nh_res_table *res_table,
1190 u16 bucket_index)
1191{
1192 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1193 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1194 struct nexthop *nh = nhge->nh_parent;
1195 struct sk_buff *skb;
1196 int err = -ENOBUFS;
1197
1198 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1199 if (!skb)
1200 goto errout;
1201
1202 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
1203 RTM_NEWNEXTHOPBUCKET, portid: 0, seq: 0, NLM_F_REPLACE,
1204 NULL);
1205 if (err < 0) {
1206 kfree_skb(skb);
1207 goto errout;
1208 }
1209
1210 rtnl_notify(skb, net: nh->net, pid: 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
1211 return;
1212errout:
1213 rtnl_set_sk_err(net: nh->net, RTNLGRP_NEXTHOP, error: err);
1214}
1215
1216static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
1217 bool *is_fdb, struct netlink_ext_ack *extack)
1218{
1219 if (nh->is_group) {
1220 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1221
1222 /* Nesting groups within groups is not supported. */
1223 if (nhg->hash_threshold) {
1224 NL_SET_ERR_MSG(extack,
1225 "Hash-threshold group can not be a nexthop within a group");
1226 return false;
1227 }
1228 if (nhg->resilient) {
1229 NL_SET_ERR_MSG(extack,
1230 "Resilient group can not be a nexthop within a group");
1231 return false;
1232 }
1233 *is_fdb = nhg->fdb_nh;
1234 } else {
1235 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1236
1237 if (nhi->reject_nh && npaths > 1) {
1238 NL_SET_ERR_MSG(extack,
1239 "Blackhole nexthop can not be used in a group with more than 1 path");
1240 return false;
1241 }
1242 *is_fdb = nhi->fdb_nh;
1243 }
1244
1245 return true;
1246}
1247
1248static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1249 struct netlink_ext_ack *extack)
1250{
1251 struct nh_info *nhi;
1252
1253 nhi = rtnl_dereference(nh->nh_info);
1254
1255 if (!nhi->fdb_nh) {
1256 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1257 return -EINVAL;
1258 }
1259
1260 if (*nh_family == AF_UNSPEC) {
1261 *nh_family = nhi->family;
1262 } else if (*nh_family != nhi->family) {
1263 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1264 return -EINVAL;
1265 }
1266
1267 return 0;
1268}
1269
1270static int nh_check_attr_group(struct net *net,
1271 struct nlattr *tb[], size_t tb_size,
1272 u16 nh_grp_type, struct netlink_ext_ack *extack)
1273{
1274 unsigned int len = nla_len(nla: tb[NHA_GROUP]);
1275 struct nexthop_grp *nhg;
1276 unsigned int i, j;
1277
1278 if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1279 NL_SET_ERR_MSG(extack,
1280 "Invalid length for nexthop group attribute");
1281 return -EINVAL;
1282 }
1283
1284 /* convert len to number of nexthop ids */
1285 len /= sizeof(*nhg);
1286
1287 nhg = nla_data(nla: tb[NHA_GROUP]);
1288 for (i = 0; i < len; ++i) {
1289 if (nhg[i].resvd2) {
1290 NL_SET_ERR_MSG(extack, "Reserved field in nexthop_grp must be 0");
1291 return -EINVAL;
1292 }
1293 if (nexthop_grp_weight(entry: &nhg[i]) == 0) {
1294 /* 0xffff got passed in, representing weight of 0x10000,
1295 * which is too heavy.
1296 */
1297 NL_SET_ERR_MSG(extack, "Invalid value for weight");
1298 return -EINVAL;
1299 }
1300 for (j = i + 1; j < len; ++j) {
1301 if (nhg[i].id == nhg[j].id) {
1302 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1303 return -EINVAL;
1304 }
1305 }
1306 }
1307
1308 nhg = nla_data(nla: tb[NHA_GROUP]);
1309 for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1310 if (!tb[i])
1311 continue;
1312 switch (i) {
1313 case NHA_HW_STATS_ENABLE:
1314 case NHA_FDB:
1315 continue;
1316 case NHA_RES_GROUP:
1317 if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1318 continue;
1319 break;
1320 }
1321 NL_SET_ERR_MSG(extack,
1322 "No other attributes can be set in nexthop groups");
1323 return -EINVAL;
1324 }
1325
1326 return 0;
1327}
1328
1329static int nh_check_attr_group_rtnl(struct net *net, struct nlattr *tb[],
1330 struct netlink_ext_ack *extack)
1331{
1332 u8 nh_family = AF_UNSPEC;
1333 struct nexthop_grp *nhg;
1334 unsigned int len;
1335 unsigned int i;
1336 u8 nhg_fdb;
1337
1338 len = nla_len(nla: tb[NHA_GROUP]) / sizeof(*nhg);
1339 nhg = nla_data(nla: tb[NHA_GROUP]);
1340 nhg_fdb = !!tb[NHA_FDB];
1341
1342 for (i = 0; i < len; i++) {
1343 struct nexthop *nh;
1344 bool is_fdb_nh;
1345
1346 nh = nexthop_find_by_id(net, nhg[i].id);
1347 if (!nh) {
1348 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1349 return -EINVAL;
1350 }
1351 if (!valid_group_nh(nh, npaths: len, is_fdb: &is_fdb_nh, extack))
1352 return -EINVAL;
1353
1354 if (nhg_fdb && nh_check_attr_fdb_group(nh, nh_family: &nh_family, extack))
1355 return -EINVAL;
1356
1357 if (!nhg_fdb && is_fdb_nh) {
1358 NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1359 return -EINVAL;
1360 }
1361 }
1362
1363 return 0;
1364}
1365
1366static bool ipv6_good_nh(const struct fib6_nh *nh)
1367{
1368 int state = NUD_REACHABLE;
1369 struct neighbour *n;
1370
1371 rcu_read_lock();
1372
1373 n = __ipv6_neigh_lookup_noref_stub(dev: nh->fib_nh_dev, pkey: &nh->fib_nh_gw6);
1374 if (n)
1375 state = READ_ONCE(n->nud_state);
1376
1377 rcu_read_unlock();
1378
1379 return !!(state & NUD_VALID);
1380}
1381
1382static bool ipv4_good_nh(const struct fib_nh *nh)
1383{
1384 int state = NUD_REACHABLE;
1385 struct neighbour *n;
1386
1387 rcu_read_lock();
1388
1389 n = __ipv4_neigh_lookup_noref(dev: nh->fib_nh_dev,
1390 key: (__force u32)nh->fib_nh_gw4);
1391 if (n)
1392 state = READ_ONCE(n->nud_state);
1393
1394 rcu_read_unlock();
1395
1396 return !!(state & NUD_VALID);
1397}
1398
1399static bool nexthop_is_good_nh(const struct nexthop *nh)
1400{
1401 struct nh_info *nhi = rcu_dereference(nh->nh_info);
1402
1403 switch (nhi->family) {
1404 case AF_INET:
1405 return ipv4_good_nh(nh: &nhi->fib_nh);
1406 case AF_INET6:
1407 return ipv6_good_nh(nh: &nhi->fib6_nh);
1408 }
1409
1410 return false;
1411}
1412
1413static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
1414{
1415 int i;
1416
1417 for (i = 0; i < nhg->num_nh; i++) {
1418 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1419
1420 if (hash > atomic_read(v: &nhge->hthr.upper_bound))
1421 continue;
1422
1423 nh_grp_entry_stats_inc(nhge);
1424 return nhge->nh;
1425 }
1426
1427 WARN_ON_ONCE(1);
1428 return NULL;
1429}
1430
1431static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
1432{
1433 struct nh_grp_entry *nhge0 = NULL;
1434 int i;
1435
1436 if (nhg->fdb_nh)
1437 return nexthop_select_path_fdb(nhg, hash);
1438
1439 for (i = 0; i < nhg->num_nh; ++i) {
1440 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1441
1442 /* nexthops always check if it is good and does
1443 * not rely on a sysctl for this behavior
1444 */
1445 if (!nexthop_is_good_nh(nh: nhge->nh))
1446 continue;
1447
1448 if (!nhge0)
1449 nhge0 = nhge;
1450
1451 if (hash > atomic_read(v: &nhge->hthr.upper_bound))
1452 continue;
1453
1454 nh_grp_entry_stats_inc(nhge);
1455 return nhge->nh;
1456 }
1457
1458 if (!nhge0)
1459 nhge0 = &nhg->nh_entries[0];
1460 nh_grp_entry_stats_inc(nhge: nhge0);
1461 return nhge0->nh;
1462}
1463
1464static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1465{
1466 struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1467 u16 bucket_index = hash % res_table->num_nh_buckets;
1468 struct nh_res_bucket *bucket;
1469 struct nh_grp_entry *nhge;
1470
1471 /* nexthop_select_path() is expected to return a non-NULL value, so
1472 * skip protocol validation and just hand out whatever there is.
1473 */
1474 bucket = &res_table->nh_buckets[bucket_index];
1475 nh_res_bucket_set_busy(bucket);
1476 nhge = rcu_dereference(bucket->nh_entry);
1477 nh_grp_entry_stats_inc(nhge);
1478 return nhge->nh;
1479}
1480
1481struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1482{
1483 struct nh_group *nhg;
1484
1485 if (!nh->is_group)
1486 return nh;
1487
1488 nhg = rcu_dereference(nh->nh_grp);
1489 if (nhg->hash_threshold)
1490 return nexthop_select_path_hthr(nhg, hash);
1491 else if (nhg->resilient)
1492 return nexthop_select_path_res(nhg, hash);
1493
1494 /* Unreachable. */
1495 return NULL;
1496}
1497EXPORT_SYMBOL_GPL(nexthop_select_path);
1498
1499int nexthop_for_each_fib6_nh(struct nexthop *nh,
1500 int (*cb)(struct fib6_nh *nh, void *arg),
1501 void *arg)
1502{
1503 struct nh_info *nhi;
1504 int err;
1505
1506 if (nh->is_group) {
1507 struct nh_group *nhg;
1508 int i;
1509
1510 nhg = rcu_dereference_rtnl(nh->nh_grp);
1511 for (i = 0; i < nhg->num_nh; i++) {
1512 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1513
1514 nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1515 err = cb(&nhi->fib6_nh, arg);
1516 if (err)
1517 return err;
1518 }
1519 } else {
1520 nhi = rcu_dereference_rtnl(nh->nh_info);
1521 err = cb(&nhi->fib6_nh, arg);
1522 if (err)
1523 return err;
1524 }
1525
1526 return 0;
1527}
1528EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1529
1530static int check_src_addr(const struct in6_addr *saddr,
1531 struct netlink_ext_ack *extack)
1532{
1533 if (!ipv6_addr_any(a: saddr)) {
1534 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1535 return -EINVAL;
1536 }
1537 return 0;
1538}
1539
1540int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1541 struct netlink_ext_ack *extack)
1542{
1543 struct nh_info *nhi;
1544 bool is_fdb_nh;
1545
1546 /* fib6_src is unique to a fib6_info and limits the ability to cache
1547 * routes in fib6_nh within a nexthop that is potentially shared
1548 * across multiple fib entries. If the config wants to use source
1549 * routing it can not use nexthop objects. mlxsw also does not allow
1550 * fib6_src on routes.
1551 */
1552 if (cfg && check_src_addr(saddr: &cfg->fc_src, extack) < 0)
1553 return -EINVAL;
1554
1555 if (nh->is_group) {
1556 struct nh_group *nhg;
1557
1558 nhg = rcu_dereference_rtnl(nh->nh_grp);
1559 if (nhg->has_v4)
1560 goto no_v4_nh;
1561 is_fdb_nh = nhg->fdb_nh;
1562 } else {
1563 nhi = rcu_dereference_rtnl(nh->nh_info);
1564 if (nhi->family == AF_INET)
1565 goto no_v4_nh;
1566 is_fdb_nh = nhi->fdb_nh;
1567 }
1568
1569 if (is_fdb_nh) {
1570 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1571 return -EINVAL;
1572 }
1573
1574 return 0;
1575no_v4_nh:
1576 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1577 return -EINVAL;
1578}
1579EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1580
1581/* if existing nexthop has ipv6 routes linked to it, need
1582 * to verify this new spec works with ipv6
1583 */
1584static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1585 struct netlink_ext_ack *extack)
1586{
1587 struct fib6_info *f6i;
1588
1589 if (list_empty(head: &old->f6i_list))
1590 return 0;
1591
1592 list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1593 if (check_src_addr(saddr: &f6i->fib6_src.addr, extack) < 0)
1594 return -EINVAL;
1595 }
1596
1597 return fib6_check_nexthop(new, NULL, extack);
1598}
1599
1600static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1601 struct netlink_ext_ack *extack)
1602{
1603 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1604 NL_SET_ERR_MSG(extack,
1605 "Route with host scope can not have a gateway");
1606 return -EINVAL;
1607 }
1608
1609 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1610 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1611 return -EINVAL;
1612 }
1613
1614 return 0;
1615}
1616
1617/* Invoked by fib add code to verify nexthop by id is ok with
1618 * config for prefix; parts of fib_check_nh not done when nexthop
1619 * object is used.
1620 */
1621int fib_check_nexthop(struct nexthop *nh, u8 scope,
1622 struct netlink_ext_ack *extack)
1623{
1624 struct nh_info *nhi;
1625 int err = 0;
1626
1627 if (nh->is_group) {
1628 struct nh_group *nhg;
1629
1630 nhg = rtnl_dereference(nh->nh_grp);
1631 if (nhg->fdb_nh) {
1632 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1633 err = -EINVAL;
1634 goto out;
1635 }
1636
1637 if (scope == RT_SCOPE_HOST) {
1638 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1639 err = -EINVAL;
1640 goto out;
1641 }
1642
1643 /* all nexthops in a group have the same scope */
1644 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1645 err = nexthop_check_scope(nhi, scope, extack);
1646 } else {
1647 nhi = rtnl_dereference(nh->nh_info);
1648 if (nhi->fdb_nh) {
1649 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1650 err = -EINVAL;
1651 goto out;
1652 }
1653 err = nexthop_check_scope(nhi, scope, extack);
1654 }
1655
1656out:
1657 return err;
1658}
1659
1660static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1661 struct netlink_ext_ack *extack)
1662{
1663 struct fib_info *fi;
1664
1665 list_for_each_entry(fi, &old->fi_list, nh_list) {
1666 int err;
1667
1668 err = fib_check_nexthop(nh: new, scope: fi->fib_scope, extack);
1669 if (err)
1670 return err;
1671 }
1672 return 0;
1673}
1674
1675static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1676{
1677 return nhge->res.count_buckets == nhge->res.wants_buckets;
1678}
1679
1680static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1681{
1682 return nhge->res.count_buckets > nhge->res.wants_buckets;
1683}
1684
1685static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1686{
1687 return nhge->res.count_buckets < nhge->res.wants_buckets;
1688}
1689
1690static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1691{
1692 return list_empty(head: &res_table->uw_nh_entries);
1693}
1694
1695static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1696{
1697 struct nh_grp_entry *nhge;
1698
1699 if (bucket->occupied) {
1700 nhge = nh_res_dereference(bucket->nh_entry);
1701 nhge->res.count_buckets--;
1702 bucket->occupied = false;
1703 }
1704}
1705
1706static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1707 struct nh_grp_entry *nhge)
1708{
1709 nh_res_bucket_unset_nh(bucket);
1710
1711 bucket->occupied = true;
1712 rcu_assign_pointer(bucket->nh_entry, nhge);
1713 nhge->res.count_buckets++;
1714}
1715
1716static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1717 struct nh_res_bucket *bucket,
1718 unsigned long *deadline, bool *force)
1719{
1720 unsigned long now = jiffies;
1721 struct nh_grp_entry *nhge;
1722 unsigned long idle_point;
1723
1724 if (!bucket->occupied) {
1725 /* The bucket is not occupied, its NHGE pointer is either
1726 * NULL or obsolete. We _have to_ migrate: set force.
1727 */
1728 *force = true;
1729 return true;
1730 }
1731
1732 nhge = nh_res_dereference(bucket->nh_entry);
1733
1734 /* If the bucket is populated by an underweight or balanced
1735 * nexthop, do not migrate.
1736 */
1737 if (!nh_res_nhge_is_ow(nhge))
1738 return false;
1739
1740 /* At this point we know that the bucket is populated with an
1741 * overweight nexthop. It needs to be migrated to a new nexthop if
1742 * the idle timer of unbalanced timer expired.
1743 */
1744
1745 idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1746 if (time_after_eq(now, idle_point)) {
1747 /* The bucket is idle. We _can_ migrate: unset force. */
1748 *force = false;
1749 return true;
1750 }
1751
1752 /* Unbalanced timer of 0 means "never force". */
1753 if (res_table->unbalanced_timer) {
1754 unsigned long unb_point;
1755
1756 unb_point = nh_res_table_unb_point(res_table);
1757 if (time_after(now, unb_point)) {
1758 /* The bucket is not idle, but the unbalanced timer
1759 * expired. We _can_ migrate, but set force anyway,
1760 * so that drivers know to ignore activity reports
1761 * from the HW.
1762 */
1763 *force = true;
1764 return true;
1765 }
1766
1767 nh_res_time_set_deadline(next_time: unb_point, deadline);
1768 }
1769
1770 nh_res_time_set_deadline(next_time: idle_point, deadline);
1771 return false;
1772}
1773
1774static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1775 u16 bucket_index, bool notify,
1776 bool notify_nl, bool force)
1777{
1778 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1779 struct nh_grp_entry *new_nhge;
1780 struct netlink_ext_ack extack;
1781 int err;
1782
1783 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1784 struct nh_grp_entry,
1785 res.uw_nh_entry);
1786 if (WARN_ON_ONCE(!new_nhge))
1787 /* If this function is called, "bucket" is either not
1788 * occupied, or it belongs to a next hop that is
1789 * overweight. In either case, there ought to be a
1790 * corresponding underweight next hop.
1791 */
1792 return false;
1793
1794 if (notify) {
1795 struct nh_grp_entry *old_nhge;
1796
1797 old_nhge = nh_res_dereference(bucket->nh_entry);
1798 err = call_nexthop_res_bucket_notifiers(net: res_table->net,
1799 nhg_id: res_table->nhg_id,
1800 bucket_index, force,
1801 old_nh: old_nhge->nh,
1802 new_nh: new_nhge->nh, extack: &extack);
1803 if (err) {
1804 pr_err_ratelimited("%s\n", extack._msg);
1805 if (!force)
1806 return false;
1807 /* It is not possible to veto a forced replacement, so
1808 * just clear the hardware flags from the nexthop
1809 * bucket to indicate to user space that this bucket is
1810 * not correctly populated in hardware.
1811 */
1812 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1813 }
1814 }
1815
1816 nh_res_bucket_set_nh(bucket, nhge: new_nhge);
1817 nh_res_bucket_set_idle(res_table, bucket);
1818
1819 if (notify_nl)
1820 nexthop_bucket_notify(res_table, bucket_index);
1821
1822 if (nh_res_nhge_is_balanced(nhge: new_nhge))
1823 list_del(entry: &new_nhge->res.uw_nh_entry);
1824 return true;
1825}
1826
1827#define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1828
1829static void nh_res_table_upkeep(struct nh_res_table *res_table,
1830 bool notify, bool notify_nl)
1831{
1832 unsigned long now = jiffies;
1833 unsigned long deadline;
1834 u16 i;
1835
1836 /* Deadline is the next time that upkeep should be run. It is the
1837 * earliest time at which one of the buckets might be migrated.
1838 * Start at the most pessimistic estimate: either unbalanced_timer
1839 * from now, or if there is none, idle_timer from now. For each
1840 * encountered time point, call nh_res_time_set_deadline() to
1841 * refine the estimate.
1842 */
1843 if (res_table->unbalanced_timer)
1844 deadline = now + res_table->unbalanced_timer;
1845 else
1846 deadline = now + res_table->idle_timer;
1847
1848 for (i = 0; i < res_table->num_nh_buckets; i++) {
1849 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1850 bool force;
1851
1852 if (nh_res_bucket_should_migrate(res_table, bucket,
1853 deadline: &deadline, force: &force)) {
1854 if (!nh_res_bucket_migrate(res_table, bucket_index: i, notify,
1855 notify_nl, force)) {
1856 unsigned long idle_point;
1857
1858 /* A driver can override the migration
1859 * decision if the HW reports that the
1860 * bucket is actually not idle. Therefore
1861 * remark the bucket as busy again and
1862 * update the deadline.
1863 */
1864 nh_res_bucket_set_busy(bucket);
1865 idle_point = nh_res_bucket_idle_point(res_table,
1866 bucket,
1867 now);
1868 nh_res_time_set_deadline(next_time: idle_point, deadline: &deadline);
1869 }
1870 }
1871 }
1872
1873 /* If the group is still unbalanced, schedule the next upkeep to
1874 * either the deadline computed above, or the minimum deadline,
1875 * whichever comes later.
1876 */
1877 if (!nh_res_table_is_balanced(res_table)) {
1878 unsigned long now = jiffies;
1879 unsigned long min_deadline;
1880
1881 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1882 if (time_before(deadline, min_deadline))
1883 deadline = min_deadline;
1884
1885 queue_delayed_work(wq: system_power_efficient_wq,
1886 dwork: &res_table->upkeep_dw, delay: deadline - now);
1887 }
1888}
1889
1890static void nh_res_table_upkeep_dw(struct work_struct *work)
1891{
1892 struct delayed_work *dw = to_delayed_work(work);
1893 struct nh_res_table *res_table;
1894
1895 res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1896 nh_res_table_upkeep(res_table, notify: true, notify_nl: true);
1897}
1898
1899static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1900{
1901 cancel_delayed_work_sync(dwork: &res_table->upkeep_dw);
1902}
1903
1904static void nh_res_group_rebalance(struct nh_group *nhg,
1905 struct nh_res_table *res_table)
1906{
1907 u16 prev_upper_bound = 0;
1908 u32 total = 0;
1909 u32 w = 0;
1910 int i;
1911
1912 INIT_LIST_HEAD(list: &res_table->uw_nh_entries);
1913
1914 for (i = 0; i < nhg->num_nh; ++i)
1915 total += nhg->nh_entries[i].weight;
1916
1917 for (i = 0; i < nhg->num_nh; ++i) {
1918 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1919 u16 upper_bound;
1920 u64 btw;
1921
1922 w += nhge->weight;
1923 btw = ((u64)res_table->num_nh_buckets) * w;
1924 upper_bound = DIV_ROUND_CLOSEST_ULL(btw, total);
1925 nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1926 prev_upper_bound = upper_bound;
1927
1928 if (nh_res_nhge_is_uw(nhge)) {
1929 if (list_empty(head: &res_table->uw_nh_entries))
1930 res_table->unbalanced_since = jiffies;
1931 list_add(new: &nhge->res.uw_nh_entry,
1932 head: &res_table->uw_nh_entries);
1933 }
1934 }
1935}
1936
1937/* Migrate buckets in res_table so that they reference NHGE's from NHG with
1938 * the right NH ID. Set those buckets that do not have a corresponding NHGE
1939 * entry in NHG as not occupied.
1940 */
1941static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1942 struct nh_group *nhg)
1943{
1944 u16 i;
1945
1946 for (i = 0; i < res_table->num_nh_buckets; i++) {
1947 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1948 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1949 bool found = false;
1950 int j;
1951
1952 for (j = 0; j < nhg->num_nh; j++) {
1953 struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1954
1955 if (nhge->nh->id == id) {
1956 nh_res_bucket_set_nh(bucket, nhge);
1957 found = true;
1958 break;
1959 }
1960 }
1961
1962 if (!found)
1963 nh_res_bucket_unset_nh(bucket);
1964 }
1965}
1966
1967static void replace_nexthop_grp_res(struct nh_group *oldg,
1968 struct nh_group *newg)
1969{
1970 /* For NH group replacement, the new NHG might only have a stub
1971 * hash table with 0 buckets, because the number of buckets was not
1972 * specified. For NH removal, oldg and newg both reference the same
1973 * res_table. So in any case, in the following, we want to work
1974 * with oldg->res_table.
1975 */
1976 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1977 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1978 bool prev_has_uw = !list_empty(head: &old_res_table->uw_nh_entries);
1979
1980 nh_res_table_cancel_upkeep(res_table: old_res_table);
1981 nh_res_table_migrate_buckets(res_table: old_res_table, nhg: newg);
1982 nh_res_group_rebalance(nhg: newg, res_table: old_res_table);
1983 if (prev_has_uw && !list_empty(head: &old_res_table->uw_nh_entries))
1984 old_res_table->unbalanced_since = prev_unbalanced_since;
1985 nh_res_table_upkeep(res_table: old_res_table, notify: true, notify_nl: false);
1986}
1987
1988static void nh_hthr_group_rebalance(struct nh_group *nhg)
1989{
1990 u32 total = 0;
1991 u32 w = 0;
1992 int i;
1993
1994 for (i = 0; i < nhg->num_nh; ++i)
1995 total += nhg->nh_entries[i].weight;
1996
1997 for (i = 0; i < nhg->num_nh; ++i) {
1998 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1999 u32 upper_bound;
2000
2001 w += nhge->weight;
2002 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
2003 atomic_set(v: &nhge->hthr.upper_bound, i: upper_bound);
2004 }
2005}
2006
2007static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
2008 struct nl_info *nlinfo)
2009{
2010 struct nh_grp_entry *nhges, *new_nhges;
2011 struct nexthop *nhp = nhge->nh_parent;
2012 struct netlink_ext_ack extack;
2013 struct nexthop *nh = nhge->nh;
2014 struct nh_group *nhg, *newg;
2015 int i, j, err;
2016
2017 WARN_ON(!nh);
2018
2019 nhg = rtnl_dereference(nhp->nh_grp);
2020 newg = nhg->spare;
2021
2022 /* last entry, keep it visible and remove the parent */
2023 if (nhg->num_nh == 1) {
2024 remove_nexthop(net, nh: nhp, nlinfo);
2025 return;
2026 }
2027
2028 newg->has_v4 = false;
2029 newg->is_multipath = nhg->is_multipath;
2030 newg->hash_threshold = nhg->hash_threshold;
2031 newg->resilient = nhg->resilient;
2032 newg->fdb_nh = nhg->fdb_nh;
2033 newg->num_nh = nhg->num_nh;
2034
2035 /* copy old entries to new except the one getting removed */
2036 nhges = nhg->nh_entries;
2037 new_nhges = newg->nh_entries;
2038 for (i = 0, j = 0; i < nhg->num_nh; ++i) {
2039 struct nh_info *nhi;
2040
2041 /* current nexthop getting removed */
2042 if (nhg->nh_entries[i].nh == nh) {
2043 newg->num_nh--;
2044 continue;
2045 }
2046
2047 nhi = rtnl_dereference(nhges[i].nh->nh_info);
2048 if (nhi->family == AF_INET)
2049 newg->has_v4 = true;
2050
2051 list_del(entry: &nhges[i].nh_list);
2052 new_nhges[j].stats = nhges[i].stats;
2053 new_nhges[j].nh_parent = nhges[i].nh_parent;
2054 new_nhges[j].nh = nhges[i].nh;
2055 new_nhges[j].weight = nhges[i].weight;
2056 list_add(new: &new_nhges[j].nh_list, head: &new_nhges[j].nh->grp_list);
2057 j++;
2058 }
2059
2060 if (newg->hash_threshold)
2061 nh_hthr_group_rebalance(nhg: newg);
2062 else if (newg->resilient)
2063 replace_nexthop_grp_res(oldg: nhg, newg);
2064
2065 rcu_assign_pointer(nhp->nh_grp, newg);
2066
2067 list_del(entry: &nhge->nh_list);
2068 free_percpu(pdata: nhge->stats);
2069 nexthop_put(nh: nhge->nh);
2070
2071 /* Removal of a NH from a resilient group is notified through
2072 * bucket notifications.
2073 */
2074 if (newg->hash_threshold) {
2075 err = call_nexthop_notifiers(net, event_type: NEXTHOP_EVENT_REPLACE, nh: nhp,
2076 extack: &extack);
2077 if (err)
2078 pr_err("%s\n", extack._msg);
2079 }
2080
2081 if (nlinfo)
2082 nexthop_notify(RTM_NEWNEXTHOP, nh: nhp, info: nlinfo);
2083}
2084
2085static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
2086 struct nl_info *nlinfo)
2087{
2088 struct nh_grp_entry *nhge, *tmp;
2089
2090 /* If there is nothing to do, let's avoid the costly call to
2091 * synchronize_net()
2092 */
2093 if (list_empty(head: &nh->grp_list))
2094 return;
2095
2096 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
2097 remove_nh_grp_entry(net, nhge, nlinfo);
2098
2099 /* make sure all see the newly published array before releasing rtnl */
2100 synchronize_net();
2101}
2102
2103static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
2104{
2105 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
2106 struct nh_res_table *res_table;
2107 int i, num_nh = nhg->num_nh;
2108
2109 for (i = 0; i < num_nh; ++i) {
2110 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2111
2112 if (WARN_ON(!nhge->nh))
2113 continue;
2114
2115 list_del_init(entry: &nhge->nh_list);
2116 }
2117
2118 if (nhg->resilient) {
2119 res_table = rtnl_dereference(nhg->res_table);
2120 nh_res_table_cancel_upkeep(res_table);
2121 }
2122}
2123
2124/* not called for nexthop replace */
2125static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
2126{
2127 struct fib6_info *f6i;
2128 bool do_flush = false;
2129 struct fib_info *fi;
2130
2131 list_for_each_entry(fi, &nh->fi_list, nh_list) {
2132 fi->fib_flags |= RTNH_F_DEAD;
2133 do_flush = true;
2134 }
2135 if (do_flush)
2136 fib_flush(net);
2137
2138 spin_lock_bh(lock: &nh->lock);
2139
2140 nh->dead = true;
2141
2142 while (!list_empty(head: &nh->f6i_list)) {
2143 f6i = list_first_entry(&nh->f6i_list, typeof(*f6i), nh_list);
2144
2145 /* __ip6_del_rt does a release, so do a hold here */
2146 fib6_info_hold(f6i);
2147
2148 spin_unlock_bh(lock: &nh->lock);
2149 ipv6_stub->ip6_del_rt(net, f6i,
2150 !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
2151
2152 spin_lock_bh(lock: &nh->lock);
2153 }
2154
2155 spin_unlock_bh(lock: &nh->lock);
2156}
2157
2158static void __remove_nexthop(struct net *net, struct nexthop *nh,
2159 struct nl_info *nlinfo)
2160{
2161 __remove_nexthop_fib(net, nh);
2162
2163 if (nh->is_group) {
2164 remove_nexthop_group(nh, nlinfo);
2165 } else {
2166 struct nh_info *nhi;
2167
2168 nhi = rtnl_dereference(nh->nh_info);
2169 if (nhi->fib_nhc.nhc_dev)
2170 hlist_del(n: &nhi->dev_hash);
2171
2172 remove_nexthop_from_groups(net, nh, nlinfo);
2173 }
2174}
2175
2176static void remove_nexthop(struct net *net, struct nexthop *nh,
2177 struct nl_info *nlinfo)
2178{
2179 call_nexthop_notifiers(net, event_type: NEXTHOP_EVENT_DEL, nh, NULL);
2180
2181 /* remove from the tree */
2182 rb_erase(&nh->rb_node, &net->nexthop.rb_root);
2183
2184 if (nlinfo)
2185 nexthop_notify(RTM_DELNEXTHOP, nh, info: nlinfo);
2186
2187 __remove_nexthop(net, nh, nlinfo);
2188 nh_base_seq_inc(net);
2189
2190 nexthop_put(nh);
2191}
2192
2193/* if any FIB entries reference this nexthop, any dst entries
2194 * need to be regenerated
2195 */
2196static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
2197 struct nexthop *replaced_nh)
2198{
2199 struct fib6_info *f6i;
2200 struct nh_group *nhg;
2201 int i;
2202
2203 if (!list_empty(head: &nh->fi_list))
2204 rt_cache_flush(net);
2205
2206 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2207 ipv6_stub->fib6_update_sernum(net, f6i);
2208
2209 /* if an IPv6 group was replaced, we have to release all old
2210 * dsts to make sure all refcounts are released
2211 */
2212 if (!replaced_nh->is_group)
2213 return;
2214
2215 nhg = rtnl_dereference(replaced_nh->nh_grp);
2216 for (i = 0; i < nhg->num_nh; i++) {
2217 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2218 struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
2219
2220 if (nhi->family == AF_INET6)
2221 ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
2222 }
2223}
2224
2225static int replace_nexthop_grp(struct net *net, struct nexthop *old,
2226 struct nexthop *new, const struct nh_config *cfg,
2227 struct netlink_ext_ack *extack)
2228{
2229 struct nh_res_table *tmp_table = NULL;
2230 struct nh_res_table *new_res_table;
2231 struct nh_res_table *old_res_table;
2232 struct nh_group *oldg, *newg;
2233 int i, err;
2234
2235 if (!new->is_group) {
2236 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
2237 return -EINVAL;
2238 }
2239
2240 oldg = rtnl_dereference(old->nh_grp);
2241 newg = rtnl_dereference(new->nh_grp);
2242
2243 if (newg->hash_threshold != oldg->hash_threshold) {
2244 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
2245 return -EINVAL;
2246 }
2247
2248 if (newg->hash_threshold) {
2249 err = call_nexthop_notifiers(net, event_type: NEXTHOP_EVENT_REPLACE, nh: new,
2250 extack);
2251 if (err)
2252 return err;
2253 } else if (newg->resilient) {
2254 new_res_table = rtnl_dereference(newg->res_table);
2255 old_res_table = rtnl_dereference(oldg->res_table);
2256
2257 /* Accept if num_nh_buckets was not given, but if it was
2258 * given, demand that the value be correct.
2259 */
2260 if (cfg->nh_grp_res_has_num_buckets &&
2261 cfg->nh_grp_res_num_buckets !=
2262 old_res_table->num_nh_buckets) {
2263 NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
2264 return -EINVAL;
2265 }
2266
2267 /* Emit a pre-replace notification so that listeners could veto
2268 * a potentially unsupported configuration. Otherwise,
2269 * individual bucket replacement notifications would need to be
2270 * vetoed, which is something that should only happen if the
2271 * bucket is currently active.
2272 */
2273 err = call_nexthop_res_table_notifiers(net, nh: new, extack);
2274 if (err)
2275 return err;
2276
2277 if (cfg->nh_grp_res_has_idle_timer)
2278 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
2279 if (cfg->nh_grp_res_has_unbalanced_timer)
2280 old_res_table->unbalanced_timer =
2281 cfg->nh_grp_res_unbalanced_timer;
2282
2283 replace_nexthop_grp_res(oldg, newg);
2284
2285 tmp_table = new_res_table;
2286 rcu_assign_pointer(newg->res_table, old_res_table);
2287 rcu_assign_pointer(newg->spare->res_table, old_res_table);
2288 }
2289
2290 /* update parents - used by nexthop code for cleanup */
2291 for (i = 0; i < newg->num_nh; i++)
2292 newg->nh_entries[i].nh_parent = old;
2293
2294 rcu_assign_pointer(old->nh_grp, newg);
2295
2296 /* Make sure concurrent readers are not using 'oldg' anymore. */
2297 synchronize_net();
2298
2299 if (newg->resilient) {
2300 rcu_assign_pointer(oldg->res_table, tmp_table);
2301 rcu_assign_pointer(oldg->spare->res_table, tmp_table);
2302 }
2303
2304 for (i = 0; i < oldg->num_nh; i++)
2305 oldg->nh_entries[i].nh_parent = new;
2306
2307 rcu_assign_pointer(new->nh_grp, oldg);
2308
2309 return 0;
2310}
2311
2312static void nh_group_v4_update(struct nh_group *nhg)
2313{
2314 struct nh_grp_entry *nhges;
2315 bool has_v4 = false;
2316 int i;
2317
2318 nhges = nhg->nh_entries;
2319 for (i = 0; i < nhg->num_nh; i++) {
2320 struct nh_info *nhi;
2321
2322 nhi = rtnl_dereference(nhges[i].nh->nh_info);
2323 if (nhi->family == AF_INET)
2324 has_v4 = true;
2325 }
2326 nhg->has_v4 = has_v4;
2327}
2328
2329static int replace_nexthop_single_notify_res(struct net *net,
2330 struct nh_res_table *res_table,
2331 struct nexthop *old,
2332 struct nh_info *oldi,
2333 struct nh_info *newi,
2334 struct netlink_ext_ack *extack)
2335{
2336 u32 nhg_id = res_table->nhg_id;
2337 int err;
2338 u16 i;
2339
2340 for (i = 0; i < res_table->num_nh_buckets; i++) {
2341 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2342 struct nh_grp_entry *nhge;
2343
2344 nhge = rtnl_dereference(bucket->nh_entry);
2345 if (nhge->nh == old) {
2346 err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2347 bucket_index: i, force: true,
2348 oldi, newi,
2349 extack);
2350 if (err)
2351 goto err_notify;
2352 }
2353 }
2354
2355 return 0;
2356
2357err_notify:
2358 while (i-- > 0) {
2359 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2360 struct nh_grp_entry *nhge;
2361
2362 nhge = rtnl_dereference(bucket->nh_entry);
2363 if (nhge->nh == old)
2364 __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index: i,
2365 force: true, oldi: newi, newi: oldi,
2366 extack);
2367 }
2368 return err;
2369}
2370
2371static int replace_nexthop_single_notify(struct net *net,
2372 struct nexthop *group_nh,
2373 struct nexthop *old,
2374 struct nh_info *oldi,
2375 struct nh_info *newi,
2376 struct netlink_ext_ack *extack)
2377{
2378 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2379 struct nh_res_table *res_table;
2380
2381 if (nhg->hash_threshold) {
2382 return call_nexthop_notifiers(net, event_type: NEXTHOP_EVENT_REPLACE,
2383 nh: group_nh, extack);
2384 } else if (nhg->resilient) {
2385 res_table = rtnl_dereference(nhg->res_table);
2386 return replace_nexthop_single_notify_res(net, res_table,
2387 old, oldi, newi,
2388 extack);
2389 }
2390
2391 return -EINVAL;
2392}
2393
2394static int replace_nexthop_single(struct net *net, struct nexthop *old,
2395 struct nexthop *new,
2396 struct netlink_ext_ack *extack)
2397{
2398 u8 old_protocol, old_nh_flags;
2399 struct nh_info *oldi, *newi;
2400 struct nh_grp_entry *nhge;
2401 int err;
2402
2403 if (new->is_group) {
2404 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2405 return -EINVAL;
2406 }
2407
2408 if (!list_empty(head: &old->grp_list) &&
2409 rtnl_dereference(new->nh_info)->fdb_nh !=
2410 rtnl_dereference(old->nh_info)->fdb_nh) {
2411 NL_SET_ERR_MSG(extack, "Cannot change nexthop FDB status while in a group");
2412 return -EINVAL;
2413 }
2414
2415 err = call_nexthop_notifiers(net, event_type: NEXTHOP_EVENT_REPLACE, nh: new, extack);
2416 if (err)
2417 return err;
2418
2419 /* Hardware flags were set on 'old' as 'new' is not in the red-black
2420 * tree. Therefore, inherit the flags from 'old' to 'new'.
2421 */
2422 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2423
2424 oldi = rtnl_dereference(old->nh_info);
2425 newi = rtnl_dereference(new->nh_info);
2426
2427 newi->nh_parent = old;
2428 oldi->nh_parent = new;
2429
2430 old_protocol = old->protocol;
2431 old_nh_flags = old->nh_flags;
2432
2433 old->protocol = new->protocol;
2434 old->nh_flags = new->nh_flags;
2435
2436 rcu_assign_pointer(old->nh_info, newi);
2437 rcu_assign_pointer(new->nh_info, oldi);
2438
2439 /* Send a replace notification for all the groups using the nexthop. */
2440 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2441 struct nexthop *nhp = nhge->nh_parent;
2442
2443 err = replace_nexthop_single_notify(net, group_nh: nhp, old, oldi, newi,
2444 extack);
2445 if (err)
2446 goto err_notify;
2447 }
2448
2449 /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2450 * update IPv4 indication in all the groups using the nexthop.
2451 */
2452 if (oldi->family == AF_INET && newi->family == AF_INET6) {
2453 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2454 struct nexthop *nhp = nhge->nh_parent;
2455 struct nh_group *nhg;
2456
2457 nhg = rtnl_dereference(nhp->nh_grp);
2458 nh_group_v4_update(nhg);
2459 }
2460 }
2461
2462 return 0;
2463
2464err_notify:
2465 rcu_assign_pointer(new->nh_info, newi);
2466 rcu_assign_pointer(old->nh_info, oldi);
2467 old->nh_flags = old_nh_flags;
2468 old->protocol = old_protocol;
2469 oldi->nh_parent = old;
2470 newi->nh_parent = new;
2471 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2472 struct nexthop *nhp = nhge->nh_parent;
2473
2474 replace_nexthop_single_notify(net, group_nh: nhp, old, oldi: newi, newi: oldi, NULL);
2475 }
2476 call_nexthop_notifiers(net, event_type: NEXTHOP_EVENT_REPLACE, nh: old, extack);
2477 return err;
2478}
2479
2480static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2481 struct nl_info *info)
2482{
2483 struct fib6_info *f6i;
2484
2485 if (!list_empty(head: &nh->fi_list)) {
2486 struct fib_info *fi;
2487
2488 /* expectation is a few fib_info per nexthop and then
2489 * a lot of routes per fib_info. So mark the fib_info
2490 * and then walk the fib tables once
2491 */
2492 list_for_each_entry(fi, &nh->fi_list, nh_list)
2493 fi->nh_updated = true;
2494
2495 fib_info_notify_update(net, info);
2496
2497 list_for_each_entry(fi, &nh->fi_list, nh_list)
2498 fi->nh_updated = false;
2499 }
2500
2501 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2502 ipv6_stub->fib6_rt_update(net, f6i, info);
2503}
2504
2505/* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2506 * linked to this nexthop and for all groups that the nexthop
2507 * is a member of
2508 */
2509static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2510 struct nl_info *info)
2511{
2512 struct nh_grp_entry *nhge;
2513
2514 __nexthop_replace_notify(net, nh, info);
2515
2516 list_for_each_entry(nhge, &nh->grp_list, nh_list)
2517 __nexthop_replace_notify(net, nh: nhge->nh_parent, info);
2518}
2519
2520static int replace_nexthop(struct net *net, struct nexthop *old,
2521 struct nexthop *new, const struct nh_config *cfg,
2522 struct netlink_ext_ack *extack)
2523{
2524 bool new_is_reject = false;
2525 struct nh_grp_entry *nhge;
2526 int err;
2527
2528 /* check that existing FIB entries are ok with the
2529 * new nexthop definition
2530 */
2531 err = fib_check_nh_list(old, new, extack);
2532 if (err)
2533 return err;
2534
2535 err = fib6_check_nh_list(old, new, extack);
2536 if (err)
2537 return err;
2538
2539 if (!new->is_group) {
2540 struct nh_info *nhi = rtnl_dereference(new->nh_info);
2541
2542 new_is_reject = nhi->reject_nh;
2543 }
2544
2545 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2546 /* if new nexthop is a blackhole, any groups using this
2547 * nexthop cannot have more than 1 path
2548 */
2549 if (new_is_reject &&
2550 nexthop_num_path(nh: nhge->nh_parent) > 1) {
2551 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2552 return -EINVAL;
2553 }
2554
2555 err = fib_check_nh_list(old: nhge->nh_parent, new, extack);
2556 if (err)
2557 return err;
2558
2559 err = fib6_check_nh_list(old: nhge->nh_parent, new, extack);
2560 if (err)
2561 return err;
2562 }
2563
2564 if (old->is_group)
2565 err = replace_nexthop_grp(net, old, new, cfg, extack);
2566 else
2567 err = replace_nexthop_single(net, old, new, extack);
2568
2569 if (!err) {
2570 nh_rt_cache_flush(net, nh: old, replaced_nh: new);
2571
2572 __remove_nexthop(net, nh: new, NULL);
2573 nexthop_put(nh: new);
2574 }
2575
2576 return err;
2577}
2578
2579/* called with rtnl_lock held */
2580static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2581 struct nh_config *cfg, struct netlink_ext_ack *extack)
2582{
2583 struct rb_node **pp, *parent = NULL, *next;
2584 struct rb_root *root = &net->nexthop.rb_root;
2585 bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2586 bool create = !!(cfg->nlflags & NLM_F_CREATE);
2587 u32 new_id = new_nh->id;
2588 int replace_notify = 0;
2589 int rc = -EEXIST;
2590
2591 pp = &root->rb_node;
2592 while (1) {
2593 struct nexthop *nh;
2594
2595 next = *pp;
2596 if (!next)
2597 break;
2598
2599 parent = next;
2600
2601 nh = rb_entry(parent, struct nexthop, rb_node);
2602 if (new_id < nh->id) {
2603 pp = &next->rb_left;
2604 } else if (new_id > nh->id) {
2605 pp = &next->rb_right;
2606 } else if (replace) {
2607 rc = replace_nexthop(net, old: nh, new: new_nh, cfg, extack);
2608 if (!rc) {
2609 new_nh = nh; /* send notification with old nh */
2610 replace_notify = 1;
2611 }
2612 goto out;
2613 } else {
2614 /* id already exists and not a replace */
2615 goto out;
2616 }
2617 }
2618
2619 if (replace && !create) {
2620 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2621 rc = -ENOENT;
2622 goto out;
2623 }
2624
2625 if (new_nh->is_group) {
2626 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2627 struct nh_res_table *res_table;
2628
2629 if (nhg->resilient) {
2630 res_table = rtnl_dereference(nhg->res_table);
2631
2632 /* Not passing the number of buckets is OK when
2633 * replacing, but not when creating a new group.
2634 */
2635 if (!cfg->nh_grp_res_has_num_buckets) {
2636 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2637 rc = -EINVAL;
2638 goto out;
2639 }
2640
2641 nh_res_group_rebalance(nhg, res_table);
2642
2643 /* Do not send bucket notifications, we do full
2644 * notification below.
2645 */
2646 nh_res_table_upkeep(res_table, notify: false, notify_nl: false);
2647 }
2648 }
2649
2650 rb_link_node_rcu(node: &new_nh->rb_node, parent, rb_link: pp);
2651 rb_insert_color(&new_nh->rb_node, root);
2652
2653 /* The initial insertion is a full notification for hash-threshold as
2654 * well as resilient groups.
2655 */
2656 rc = call_nexthop_notifiers(net, event_type: NEXTHOP_EVENT_REPLACE, nh: new_nh, extack);
2657 if (rc)
2658 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2659
2660out:
2661 if (!rc) {
2662 nh_base_seq_inc(net);
2663 nexthop_notify(RTM_NEWNEXTHOP, nh: new_nh, info: &cfg->nlinfo);
2664 if (replace_notify &&
2665 READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
2666 nexthop_replace_notify(net, nh: new_nh, info: &cfg->nlinfo);
2667 }
2668
2669 return rc;
2670}
2671
2672/* rtnl */
2673/* remove all nexthops tied to a device being deleted */
2674static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2675{
2676 unsigned int hash = nh_dev_hashfn(val: dev->ifindex);
2677 struct net *net = dev_net(dev);
2678 struct hlist_head *head = &net->nexthop.devhash[hash];
2679 struct hlist_node *n;
2680 struct nh_info *nhi;
2681
2682 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2683 if (nhi->fib_nhc.nhc_dev != dev)
2684 continue;
2685
2686 if (nhi->reject_nh &&
2687 (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2688 continue;
2689
2690 remove_nexthop(net, nh: nhi->nh_parent, NULL);
2691 }
2692}
2693
2694/* rtnl; called when net namespace is deleted */
2695static void flush_all_nexthops(struct net *net)
2696{
2697 struct rb_root *root = &net->nexthop.rb_root;
2698 struct rb_node *node;
2699 struct nexthop *nh;
2700
2701 while ((node = rb_first(root))) {
2702 nh = rb_entry(node, struct nexthop, rb_node);
2703 remove_nexthop(net, nh, NULL);
2704 cond_resched();
2705 }
2706}
2707
2708static struct nexthop *nexthop_create_group(struct net *net,
2709 struct nh_config *cfg)
2710{
2711 struct nlattr *grps_attr = cfg->nh_grp;
2712 struct nexthop_grp *entry = nla_data(nla: grps_attr);
2713 u16 num_nh = nla_len(nla: grps_attr) / sizeof(*entry);
2714 struct nh_group *nhg;
2715 struct nexthop *nh;
2716 int err;
2717 int i;
2718
2719 nh = nexthop_alloc();
2720 if (!nh)
2721 return ERR_PTR(error: -ENOMEM);
2722
2723 nh->is_group = 1;
2724
2725 nhg = nexthop_grp_alloc(num_nh);
2726 if (!nhg) {
2727 kfree(objp: nh);
2728 return ERR_PTR(error: -ENOMEM);
2729 }
2730
2731 /* spare group used for removals */
2732 nhg->spare = nexthop_grp_alloc(num_nh);
2733 if (!nhg->spare) {
2734 kfree(objp: nhg);
2735 kfree(objp: nh);
2736 return ERR_PTR(error: -ENOMEM);
2737 }
2738 nhg->spare->spare = nhg;
2739
2740 for (i = 0; i < nhg->num_nh; ++i) {
2741 struct nexthop *nhe;
2742 struct nh_info *nhi;
2743
2744 nhe = nexthop_find_by_id(net, entry[i].id);
2745 if (!nexthop_get(nh: nhe)) {
2746 err = -ENOENT;
2747 goto out_no_nh;
2748 }
2749
2750 nhi = rtnl_dereference(nhe->nh_info);
2751 if (nhi->family == AF_INET)
2752 nhg->has_v4 = true;
2753
2754 nhg->nh_entries[i].stats =
2755 netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
2756 if (!nhg->nh_entries[i].stats) {
2757 err = -ENOMEM;
2758 nexthop_put(nh: nhe);
2759 goto out_no_nh;
2760 }
2761 nhg->nh_entries[i].nh = nhe;
2762 nhg->nh_entries[i].weight = nexthop_grp_weight(entry: &entry[i]);
2763
2764 list_add(new: &nhg->nh_entries[i].nh_list, head: &nhe->grp_list);
2765 nhg->nh_entries[i].nh_parent = nh;
2766 }
2767
2768 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2769 nhg->hash_threshold = 1;
2770 nhg->is_multipath = true;
2771 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2772 struct nh_res_table *res_table;
2773
2774 res_table = nexthop_res_table_alloc(net, nhg_id: cfg->nh_id, cfg);
2775 if (!res_table) {
2776 err = -ENOMEM;
2777 goto out_no_nh;
2778 }
2779
2780 rcu_assign_pointer(nhg->spare->res_table, res_table);
2781 rcu_assign_pointer(nhg->res_table, res_table);
2782 nhg->resilient = true;
2783 nhg->is_multipath = true;
2784 }
2785
2786 WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2787
2788 if (nhg->hash_threshold)
2789 nh_hthr_group_rebalance(nhg);
2790
2791 if (cfg->nh_fdb)
2792 nhg->fdb_nh = 1;
2793
2794 if (cfg->nh_hw_stats)
2795 nhg->hw_stats = true;
2796
2797 rcu_assign_pointer(nh->nh_grp, nhg);
2798
2799 return nh;
2800
2801out_no_nh:
2802 for (i--; i >= 0; --i) {
2803 list_del(entry: &nhg->nh_entries[i].nh_list);
2804 free_percpu(pdata: nhg->nh_entries[i].stats);
2805 nexthop_put(nh: nhg->nh_entries[i].nh);
2806 }
2807
2808 kfree(objp: nhg->spare);
2809 kfree(objp: nhg);
2810 kfree(objp: nh);
2811
2812 return ERR_PTR(error: err);
2813}
2814
2815static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2816 struct nh_info *nhi, struct nh_config *cfg,
2817 struct netlink_ext_ack *extack)
2818{
2819 struct fib_nh *fib_nh = &nhi->fib_nh;
2820 struct fib_config fib_cfg = {
2821 .fc_oif = cfg->nh_ifindex,
2822 .fc_gw4 = cfg->gw.ipv4,
2823 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2824 .fc_flags = cfg->nh_flags,
2825 .fc_nlinfo = cfg->nlinfo,
2826 .fc_encap = cfg->nh_encap,
2827 .fc_encap_type = cfg->nh_encap_type,
2828 };
2829 u32 tb_id = (cfg->dev ? l3mdev_fib_table(dev: cfg->dev) : RT_TABLE_MAIN);
2830 int err;
2831
2832 err = fib_nh_init(net, fib_nh, cfg: &fib_cfg, nh_weight: 1, extack);
2833 if (err) {
2834 fib_nh_release(net, fib_nh);
2835 goto out;
2836 }
2837
2838 if (nhi->fdb_nh)
2839 goto out;
2840
2841 /* sets nh_dev if successful */
2842 err = fib_check_nh(net, nh: fib_nh, table: tb_id, scope: 0, extack);
2843 if (!err) {
2844 nh->nh_flags = fib_nh->fib_nh_flags;
2845 fib_info_update_nhc_saddr(net, nhc: &fib_nh->nh_common,
2846 scope: !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
2847 } else {
2848 fib_nh_release(net, fib_nh);
2849 }
2850out:
2851 return err;
2852}
2853
2854static int nh_create_ipv6(struct net *net, struct nexthop *nh,
2855 struct nh_info *nhi, struct nh_config *cfg,
2856 struct netlink_ext_ack *extack)
2857{
2858 struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2859 struct fib6_config fib6_cfg = {
2860 .fc_table = l3mdev_fib_table(dev: cfg->dev),
2861 .fc_ifindex = cfg->nh_ifindex,
2862 .fc_gateway = cfg->gw.ipv6,
2863 .fc_flags = cfg->nh_flags,
2864 .fc_nlinfo = cfg->nlinfo,
2865 .fc_encap = cfg->nh_encap,
2866 .fc_encap_type = cfg->nh_encap_type,
2867 .fc_is_fdb = cfg->nh_fdb,
2868 };
2869 int err;
2870
2871 if (!ipv6_addr_any(a: &cfg->gw.ipv6))
2872 fib6_cfg.fc_flags |= RTF_GATEWAY;
2873
2874 /* sets nh_dev if successful */
2875 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2876 extack);
2877 if (err) {
2878 /* IPv6 is not enabled, don't call fib6_nh_release */
2879 if (err == -EAFNOSUPPORT)
2880 goto out;
2881 ipv6_stub->fib6_nh_release(fib6_nh);
2882 } else {
2883 nh->nh_flags = fib6_nh->fib_nh_flags;
2884 }
2885out:
2886 return err;
2887}
2888
2889static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2890 struct netlink_ext_ack *extack)
2891{
2892 struct nh_info *nhi;
2893 struct nexthop *nh;
2894 int err = 0;
2895
2896 nh = nexthop_alloc();
2897 if (!nh)
2898 return ERR_PTR(error: -ENOMEM);
2899
2900 nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2901 if (!nhi) {
2902 kfree(objp: nh);
2903 return ERR_PTR(error: -ENOMEM);
2904 }
2905
2906 nh->nh_flags = cfg->nh_flags;
2907 nh->net = net;
2908
2909 nhi->nh_parent = nh;
2910 nhi->family = cfg->nh_family;
2911 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2912
2913 if (cfg->nh_fdb)
2914 nhi->fdb_nh = 1;
2915
2916 if (cfg->nh_blackhole) {
2917 nhi->reject_nh = 1;
2918 cfg->nh_ifindex = net->loopback_dev->ifindex;
2919 }
2920
2921 switch (cfg->nh_family) {
2922 case AF_INET:
2923 err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2924 break;
2925 case AF_INET6:
2926 err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2927 break;
2928 }
2929
2930 if (err) {
2931 kfree(objp: nhi);
2932 kfree(objp: nh);
2933 return ERR_PTR(error: err);
2934 }
2935
2936 /* add the entry to the device based hash */
2937 if (!nhi->fdb_nh)
2938 nexthop_devhash_add(net, nhi);
2939
2940 rcu_assign_pointer(nh->nh_info, nhi);
2941
2942 return nh;
2943}
2944
2945/* called with rtnl lock held */
2946static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2947 struct netlink_ext_ack *extack)
2948{
2949 struct nexthop *nh;
2950 int err;
2951
2952 if (!cfg->nh_id) {
2953 cfg->nh_id = nh_find_unused_id(net);
2954 if (!cfg->nh_id) {
2955 NL_SET_ERR_MSG(extack, "No unused id");
2956 return ERR_PTR(error: -EINVAL);
2957 }
2958 }
2959
2960 if (cfg->nh_grp)
2961 nh = nexthop_create_group(net, cfg);
2962 else
2963 nh = nexthop_create(net, cfg, extack);
2964
2965 if (IS_ERR(ptr: nh))
2966 return nh;
2967
2968 refcount_set(r: &nh->refcnt, n: 1);
2969 nh->id = cfg->nh_id;
2970 nh->protocol = cfg->nh_protocol;
2971 nh->net = net;
2972
2973 err = insert_nexthop(net, new_nh: nh, cfg, extack);
2974 if (err) {
2975 __remove_nexthop(net, nh, NULL);
2976 nexthop_put(nh);
2977 nh = ERR_PTR(error: err);
2978 }
2979
2980 return nh;
2981}
2982
2983static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2984 unsigned long *timer_p, bool *has_p,
2985 struct netlink_ext_ack *extack)
2986{
2987 unsigned long timer;
2988 u32 value;
2989
2990 if (!attr) {
2991 *timer_p = fallback;
2992 *has_p = false;
2993 return 0;
2994 }
2995
2996 value = nla_get_u32(nla: attr);
2997 timer = clock_t_to_jiffies(x: value);
2998 if (timer == ~0UL) {
2999 NL_SET_ERR_MSG(extack, "Timer value too large");
3000 return -EINVAL;
3001 }
3002
3003 *timer_p = timer;
3004 *has_p = true;
3005 return 0;
3006}
3007
3008static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
3009 struct netlink_ext_ack *extack)
3010{
3011 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
3012 int err;
3013
3014 if (res) {
3015 err = nla_parse_nested(tb,
3016 ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
3017 nla: res, policy: rtm_nh_res_policy_new, extack);
3018 if (err < 0)
3019 return err;
3020 }
3021
3022 if (tb[NHA_RES_GROUP_BUCKETS]) {
3023 cfg->nh_grp_res_num_buckets =
3024 nla_get_u16(nla: tb[NHA_RES_GROUP_BUCKETS]);
3025 cfg->nh_grp_res_has_num_buckets = true;
3026 if (!cfg->nh_grp_res_num_buckets) {
3027 NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
3028 return -EINVAL;
3029 }
3030 }
3031
3032 err = rtm_nh_get_timer(attr: tb[NHA_RES_GROUP_IDLE_TIMER],
3033 NH_RES_DEFAULT_IDLE_TIMER,
3034 timer_p: &cfg->nh_grp_res_idle_timer,
3035 has_p: &cfg->nh_grp_res_has_idle_timer,
3036 extack);
3037 if (err)
3038 return err;
3039
3040 return rtm_nh_get_timer(attr: tb[NHA_RES_GROUP_UNBALANCED_TIMER],
3041 NH_RES_DEFAULT_UNBALANCED_TIMER,
3042 timer_p: &cfg->nh_grp_res_unbalanced_timer,
3043 has_p: &cfg->nh_grp_res_has_unbalanced_timer,
3044 extack);
3045}
3046
3047static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
3048 struct nlmsghdr *nlh, struct nlattr **tb,
3049 struct nh_config *cfg,
3050 struct netlink_ext_ack *extack)
3051{
3052 struct nhmsg *nhm = nlmsg_data(nlh);
3053 int err;
3054
3055 err = -EINVAL;
3056 if (nhm->resvd || nhm->nh_scope) {
3057 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
3058 goto out;
3059 }
3060 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
3061 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
3062 goto out;
3063 }
3064
3065 switch (nhm->nh_family) {
3066 case AF_INET:
3067 case AF_INET6:
3068 break;
3069 case AF_UNSPEC:
3070 if (tb[NHA_GROUP])
3071 break;
3072 fallthrough;
3073 default:
3074 NL_SET_ERR_MSG(extack, "Invalid address family");
3075 goto out;
3076 }
3077
3078 memset(s: cfg, c: 0, n: sizeof(*cfg));
3079 cfg->nlflags = nlh->nlmsg_flags;
3080 cfg->nlinfo.portid = NETLINK_CB(skb).portid;
3081 cfg->nlinfo.nlh = nlh;
3082 cfg->nlinfo.nl_net = net;
3083
3084 cfg->nh_family = nhm->nh_family;
3085 cfg->nh_protocol = nhm->nh_protocol;
3086 cfg->nh_flags = nhm->nh_flags;
3087
3088 if (tb[NHA_ID])
3089 cfg->nh_id = nla_get_u32(nla: tb[NHA_ID]);
3090
3091 if (tb[NHA_FDB]) {
3092 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
3093 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) {
3094 NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
3095 goto out;
3096 }
3097 if (nhm->nh_flags) {
3098 NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
3099 goto out;
3100 }
3101 cfg->nh_fdb = nla_get_flag(nla: tb[NHA_FDB]);
3102 }
3103
3104 if (tb[NHA_GROUP]) {
3105 if (nhm->nh_family != AF_UNSPEC) {
3106 NL_SET_ERR_MSG(extack, "Invalid family for group");
3107 goto out;
3108 }
3109 cfg->nh_grp = tb[NHA_GROUP];
3110
3111 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
3112 if (tb[NHA_GROUP_TYPE])
3113 cfg->nh_grp_type = nla_get_u16(nla: tb[NHA_GROUP_TYPE]);
3114
3115 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
3116 NL_SET_ERR_MSG(extack, "Invalid group type");
3117 goto out;
3118 }
3119
3120 err = nh_check_attr_group(net, tb, ARRAY_SIZE(rtm_nh_policy_new),
3121 nh_grp_type: cfg->nh_grp_type, extack);
3122 if (err)
3123 goto out;
3124
3125 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
3126 err = rtm_to_nh_config_grp_res(res: tb[NHA_RES_GROUP],
3127 cfg, extack);
3128
3129 if (tb[NHA_HW_STATS_ENABLE])
3130 cfg->nh_hw_stats = nla_get_u32(nla: tb[NHA_HW_STATS_ENABLE]);
3131
3132 /* no other attributes should be set */
3133 goto out;
3134 }
3135
3136 if (tb[NHA_BLACKHOLE]) {
3137 if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
3138 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
3139 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
3140 goto out;
3141 }
3142
3143 cfg->nh_blackhole = 1;
3144 err = 0;
3145 goto out;
3146 }
3147
3148 if (!cfg->nh_fdb && !tb[NHA_OIF]) {
3149 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
3150 goto out;
3151 }
3152
3153 err = -EINVAL;
3154 if (tb[NHA_GATEWAY]) {
3155 struct nlattr *gwa = tb[NHA_GATEWAY];
3156
3157 switch (cfg->nh_family) {
3158 case AF_INET:
3159 if (nla_len(nla: gwa) != sizeof(u32)) {
3160 NL_SET_ERR_MSG(extack, "Invalid gateway");
3161 goto out;
3162 }
3163 cfg->gw.ipv4 = nla_get_be32(nla: gwa);
3164 break;
3165 case AF_INET6:
3166 if (nla_len(nla: gwa) != sizeof(struct in6_addr)) {
3167 NL_SET_ERR_MSG(extack, "Invalid gateway");
3168 goto out;
3169 }
3170 cfg->gw.ipv6 = nla_get_in6_addr(nla: gwa);
3171 break;
3172 default:
3173 NL_SET_ERR_MSG(extack,
3174 "Unknown address family for gateway");
3175 goto out;
3176 }
3177 } else {
3178 /* device only nexthop (no gateway) */
3179 if (cfg->nh_flags & RTNH_F_ONLINK) {
3180 NL_SET_ERR_MSG(extack,
3181 "ONLINK flag can not be set for nexthop without a gateway");
3182 goto out;
3183 }
3184 }
3185
3186 if (tb[NHA_ENCAP]) {
3187 cfg->nh_encap = tb[NHA_ENCAP];
3188
3189 if (!tb[NHA_ENCAP_TYPE]) {
3190 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
3191 goto out;
3192 }
3193
3194 cfg->nh_encap_type = nla_get_u16(nla: tb[NHA_ENCAP_TYPE]);
3195 err = lwtunnel_valid_encap_type(encap_type: cfg->nh_encap_type, extack);
3196 if (err < 0)
3197 goto out;
3198
3199 } else if (tb[NHA_ENCAP_TYPE]) {
3200 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
3201 goto out;
3202 }
3203
3204 if (tb[NHA_HW_STATS_ENABLE]) {
3205 NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
3206 goto out;
3207 }
3208
3209 err = 0;
3210out:
3211 return err;
3212}
3213
3214static int rtm_to_nh_config_rtnl(struct net *net, struct nlattr **tb,
3215 struct nh_config *cfg,
3216 struct netlink_ext_ack *extack)
3217{
3218 if (tb[NHA_GROUP])
3219 return nh_check_attr_group_rtnl(net, tb, extack);
3220
3221 if (tb[NHA_OIF]) {
3222 cfg->nh_ifindex = nla_get_u32(nla: tb[NHA_OIF]);
3223 if (cfg->nh_ifindex)
3224 cfg->dev = __dev_get_by_index(net, ifindex: cfg->nh_ifindex);
3225
3226 if (!cfg->dev) {
3227 NL_SET_ERR_MSG(extack, "Invalid device index");
3228 return -EINVAL;
3229 }
3230
3231 if (!(cfg->dev->flags & IFF_UP)) {
3232 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3233 return -ENETDOWN;
3234 }
3235
3236 if (!netif_carrier_ok(dev: cfg->dev)) {
3237 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
3238 return -ENETDOWN;
3239 }
3240 }
3241
3242 return 0;
3243}
3244
3245/* rtnl */
3246static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3247 struct netlink_ext_ack *extack)
3248{
3249 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
3250 struct net *net = sock_net(sk: skb->sk);
3251 struct nh_config cfg;
3252 struct nexthop *nh;
3253 int err;
3254
3255 err = nlmsg_parse(nlh, hdrlen: sizeof(struct nhmsg), tb,
3256 ARRAY_SIZE(rtm_nh_policy_new) - 1,
3257 policy: rtm_nh_policy_new, extack);
3258 if (err < 0)
3259 goto out;
3260
3261 err = rtm_to_nh_config(net, skb, nlh, tb, cfg: &cfg, extack);
3262 if (err)
3263 goto out;
3264
3265 if (cfg.nlflags & NLM_F_REPLACE && !cfg.nh_id) {
3266 NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
3267 err = -EINVAL;
3268 goto out;
3269 }
3270
3271 rtnl_net_lock(net);
3272
3273 err = rtm_to_nh_config_rtnl(net, tb, cfg: &cfg, extack);
3274 if (err)
3275 goto unlock;
3276
3277 nh = nexthop_add(net, cfg: &cfg, extack);
3278 if (IS_ERR(ptr: nh))
3279 err = PTR_ERR(ptr: nh);
3280
3281unlock:
3282 rtnl_net_unlock(net);
3283out:
3284 return err;
3285}
3286
3287static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
3288 struct nlattr **tb, u32 *id, u32 *op_flags,
3289 struct netlink_ext_ack *extack)
3290{
3291 struct nhmsg *nhm = nlmsg_data(nlh);
3292
3293 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3294 NL_SET_ERR_MSG(extack, "Invalid values in header");
3295 return -EINVAL;
3296 }
3297
3298 if (!tb[NHA_ID]) {
3299 NL_SET_ERR_MSG(extack, "Nexthop id is missing");
3300 return -EINVAL;
3301 }
3302
3303 *id = nla_get_u32(nla: tb[NHA_ID]);
3304 if (!(*id)) {
3305 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3306 return -EINVAL;
3307 }
3308
3309 if (op_flags)
3310 *op_flags = nla_get_u32_default(nla: tb[NHA_OP_FLAGS], defvalue: 0);
3311
3312 return 0;
3313}
3314
3315/* rtnl */
3316static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3317 struct netlink_ext_ack *extack)
3318{
3319 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
3320 struct net *net = sock_net(sk: skb->sk);
3321 struct nl_info nlinfo = {
3322 .nlh = nlh,
3323 .nl_net = net,
3324 .portid = NETLINK_CB(skb).portid,
3325 };
3326 struct nexthop *nh;
3327 int err;
3328 u32 id;
3329
3330 err = nlmsg_parse(nlh, hdrlen: sizeof(struct nhmsg), tb,
3331 ARRAY_SIZE(rtm_nh_policy_del) - 1, policy: rtm_nh_policy_del,
3332 extack);
3333 if (err < 0)
3334 return err;
3335
3336 err = nh_valid_get_del_req(nlh, tb, id: &id, NULL, extack);
3337 if (err)
3338 return err;
3339
3340 rtnl_net_lock(net);
3341
3342 nh = nexthop_find_by_id(net, id);
3343 if (nh)
3344 remove_nexthop(net, nh, nlinfo: &nlinfo);
3345 else
3346 err = -ENOENT;
3347
3348 rtnl_net_unlock(net);
3349
3350 return err;
3351}
3352
3353/* rtnl */
3354static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3355 struct netlink_ext_ack *extack)
3356{
3357 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
3358 struct net *net = sock_net(sk: in_skb->sk);
3359 struct sk_buff *skb = NULL;
3360 struct nexthop *nh;
3361 u32 op_flags;
3362 int err;
3363 u32 id;
3364
3365 err = nlmsg_parse(nlh, hdrlen: sizeof(struct nhmsg), tb,
3366 ARRAY_SIZE(rtm_nh_policy_get) - 1, policy: rtm_nh_policy_get,
3367 extack);
3368 if (err < 0)
3369 return err;
3370
3371 err = nh_valid_get_del_req(nlh, tb, id: &id, op_flags: &op_flags, extack);
3372 if (err)
3373 return err;
3374
3375 err = -ENOBUFS;
3376 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3377 if (!skb)
3378 goto out;
3379
3380 err = -ENOENT;
3381 nh = nexthop_find_by_id(net, id);
3382 if (!nh)
3383 goto errout_free;
3384
3385 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3386 seq: nlh->nlmsg_seq, nlflags: 0, op_flags);
3387 if (err < 0) {
3388 WARN_ON(err == -EMSGSIZE);
3389 goto errout_free;
3390 }
3391
3392 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3393out:
3394 return err;
3395errout_free:
3396 kfree_skb(skb);
3397 goto out;
3398}
3399
3400struct nh_dump_filter {
3401 u32 nh_id;
3402 int dev_idx;
3403 int master_idx;
3404 bool group_filter;
3405 bool fdb_filter;
3406 u32 res_bucket_nh_id;
3407 u32 op_flags;
3408};
3409
3410static bool nh_dump_filtered(struct nexthop *nh,
3411 struct nh_dump_filter *filter, u8 family)
3412{
3413 const struct net_device *dev;
3414 const struct nh_info *nhi;
3415
3416 if (filter->group_filter && !nh->is_group)
3417 return true;
3418
3419 if (!filter->dev_idx && !filter->master_idx && !family)
3420 return false;
3421
3422 if (nh->is_group)
3423 return true;
3424
3425 nhi = rtnl_dereference(nh->nh_info);
3426 if (family && nhi->family != family)
3427 return true;
3428
3429 dev = nhi->fib_nhc.nhc_dev;
3430 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3431 return true;
3432
3433 if (filter->master_idx) {
3434 struct net_device *master;
3435
3436 if (!dev)
3437 return true;
3438
3439 master = netdev_master_upper_dev_get(dev: (struct net_device *)dev);
3440 if (!master || master->ifindex != filter->master_idx)
3441 return true;
3442 }
3443
3444 return false;
3445}
3446
3447static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3448 struct nh_dump_filter *filter,
3449 struct netlink_ext_ack *extack)
3450{
3451 struct nhmsg *nhm;
3452 u32 idx;
3453
3454 if (tb[NHA_OIF]) {
3455 idx = nla_get_u32(nla: tb[NHA_OIF]);
3456 if (idx > INT_MAX) {
3457 NL_SET_ERR_MSG(extack, "Invalid device index");
3458 return -EINVAL;
3459 }
3460 filter->dev_idx = idx;
3461 }
3462 if (tb[NHA_MASTER]) {
3463 idx = nla_get_u32(nla: tb[NHA_MASTER]);
3464 if (idx > INT_MAX) {
3465 NL_SET_ERR_MSG(extack, "Invalid master device index");
3466 return -EINVAL;
3467 }
3468 filter->master_idx = idx;
3469 }
3470 filter->group_filter = nla_get_flag(nla: tb[NHA_GROUPS]);
3471 filter->fdb_filter = nla_get_flag(nla: tb[NHA_FDB]);
3472
3473 nhm = nlmsg_data(nlh);
3474 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3475 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3476 return -EINVAL;
3477 }
3478
3479 return 0;
3480}
3481
3482static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3483 struct nh_dump_filter *filter,
3484 struct netlink_callback *cb)
3485{
3486 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3487 int err;
3488
3489 err = nlmsg_parse(nlh, hdrlen: sizeof(struct nhmsg), tb,
3490 ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3491 policy: rtm_nh_policy_dump, extack: cb->extack);
3492 if (err < 0)
3493 return err;
3494
3495 filter->op_flags = nla_get_u32_default(nla: tb[NHA_OP_FLAGS], defvalue: 0);
3496
3497 return __nh_valid_dump_req(nlh, tb, filter, extack: cb->extack);
3498}
3499
3500struct rtm_dump_nh_ctx {
3501 u32 idx;
3502};
3503
3504static struct rtm_dump_nh_ctx *
3505rtm_dump_nh_ctx(struct netlink_callback *cb)
3506{
3507 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3508
3509 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3510 return ctx;
3511}
3512
3513static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3514 struct netlink_callback *cb,
3515 struct rb_root *root,
3516 struct rtm_dump_nh_ctx *ctx,
3517 int (*nh_cb)(struct sk_buff *skb,
3518 struct netlink_callback *cb,
3519 struct nexthop *nh, void *data),
3520 void *data)
3521{
3522 struct rb_node *node;
3523 int s_idx;
3524 int err;
3525
3526 s_idx = ctx->idx;
3527
3528 /* If this is not the first invocation, ctx->idx will contain the id of
3529 * the last nexthop we processed. Instead of starting from the very
3530 * first element of the red/black tree again and linearly skipping the
3531 * (potentially large) set of nodes with an id smaller than s_idx, walk
3532 * the tree and find the left-most node whose id is >= s_idx. This
3533 * provides an efficient O(log n) starting point for the dump
3534 * continuation.
3535 */
3536 if (s_idx != 0) {
3537 struct rb_node *tmp = root->rb_node;
3538
3539 node = NULL;
3540 while (tmp) {
3541 struct nexthop *nh;
3542
3543 nh = rb_entry(tmp, struct nexthop, rb_node);
3544 if (nh->id < s_idx) {
3545 tmp = tmp->rb_right;
3546 } else {
3547 /* Track current candidate and keep looking on
3548 * the left side to find the left-most
3549 * (smallest id) that is still >= s_idx.
3550 */
3551 node = tmp;
3552 tmp = tmp->rb_left;
3553 }
3554 }
3555 } else {
3556 node = rb_first(root);
3557 }
3558
3559 for (; node; node = rb_next(node)) {
3560 struct nexthop *nh;
3561
3562 nh = rb_entry(node, struct nexthop, rb_node);
3563
3564 ctx->idx = nh->id;
3565 err = nh_cb(skb, cb, nh, data);
3566 if (err)
3567 return err;
3568 }
3569
3570 return 0;
3571}
3572
3573static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3574 struct nexthop *nh, void *data)
3575{
3576 struct nhmsg *nhm = nlmsg_data(nlh: cb->nlh);
3577 struct nh_dump_filter *filter = data;
3578
3579 if (nh_dump_filtered(nh, filter, family: nhm->nh_family))
3580 return 0;
3581
3582 return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3583 NETLINK_CB(cb->skb).portid,
3584 seq: cb->nlh->nlmsg_seq, NLM_F_MULTI, op_flags: filter->op_flags);
3585}
3586
3587/* rtnl */
3588static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3589{
3590 struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3591 struct net *net = sock_net(sk: skb->sk);
3592 struct rb_root *root = &net->nexthop.rb_root;
3593 struct nh_dump_filter filter = {};
3594 int err;
3595
3596 err = nh_valid_dump_req(nlh: cb->nlh, filter: &filter, cb);
3597 if (err < 0)
3598 return err;
3599
3600 err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3601 nh_cb: &rtm_dump_nexthop_cb, data: &filter);
3602
3603 cb->seq = net->nexthop.seq;
3604 nl_dump_check_consistent(cb, nlh: nlmsg_hdr(skb));
3605 return err;
3606}
3607
3608static struct nexthop *
3609nexthop_find_group_resilient(struct net *net, u32 id,
3610 struct netlink_ext_ack *extack)
3611{
3612 struct nh_group *nhg;
3613 struct nexthop *nh;
3614
3615 nh = nexthop_find_by_id(net, id);
3616 if (!nh)
3617 return ERR_PTR(error: -ENOENT);
3618
3619 if (!nh->is_group) {
3620 NL_SET_ERR_MSG(extack, "Not a nexthop group");
3621 return ERR_PTR(error: -EINVAL);
3622 }
3623
3624 nhg = rtnl_dereference(nh->nh_grp);
3625 if (!nhg->resilient) {
3626 NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3627 return ERR_PTR(error: -EINVAL);
3628 }
3629
3630 return nh;
3631}
3632
3633static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3634 struct netlink_ext_ack *extack)
3635{
3636 u32 idx;
3637
3638 if (attr) {
3639 idx = nla_get_u32(nla: attr);
3640 if (!idx) {
3641 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3642 return -EINVAL;
3643 }
3644 *nh_id_p = idx;
3645 } else {
3646 *nh_id_p = 0;
3647 }
3648
3649 return 0;
3650}
3651
3652static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3653 struct nh_dump_filter *filter,
3654 struct netlink_callback *cb)
3655{
3656 struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3657 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
3658 int err;
3659
3660 err = nlmsg_parse(nlh, hdrlen: sizeof(struct nhmsg), tb,
3661 ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
3662 policy: rtm_nh_policy_dump_bucket, NULL);
3663 if (err < 0)
3664 return err;
3665
3666 err = nh_valid_dump_nhid(attr: tb[NHA_ID], nh_id_p: &filter->nh_id, extack: cb->extack);
3667 if (err)
3668 return err;
3669
3670 if (tb[NHA_RES_BUCKET]) {
3671 size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3672
3673 err = nla_parse_nested(tb: res_tb, maxtype: max,
3674 nla: tb[NHA_RES_BUCKET],
3675 policy: rtm_nh_res_bucket_policy_dump,
3676 extack: cb->extack);
3677 if (err < 0)
3678 return err;
3679
3680 err = nh_valid_dump_nhid(attr: res_tb[NHA_RES_BUCKET_NH_ID],
3681 nh_id_p: &filter->res_bucket_nh_id,
3682 extack: cb->extack);
3683 if (err)
3684 return err;
3685 }
3686
3687 return __nh_valid_dump_req(nlh, tb, filter, extack: cb->extack);
3688}
3689
3690struct rtm_dump_res_bucket_ctx {
3691 struct rtm_dump_nh_ctx nh;
3692 u16 bucket_index;
3693};
3694
3695static struct rtm_dump_res_bucket_ctx *
3696rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3697{
3698 struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3699
3700 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3701 return ctx;
3702}
3703
3704struct rtm_dump_nexthop_bucket_data {
3705 struct rtm_dump_res_bucket_ctx *ctx;
3706 struct nh_dump_filter filter;
3707};
3708
3709static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3710 struct netlink_callback *cb,
3711 struct nexthop *nh,
3712 struct rtm_dump_nexthop_bucket_data *dd)
3713{
3714 u32 portid = NETLINK_CB(cb->skb).portid;
3715 struct nhmsg *nhm = nlmsg_data(nlh: cb->nlh);
3716 struct nh_res_table *res_table;
3717 struct nh_group *nhg;
3718 u16 bucket_index;
3719 int err;
3720
3721 nhg = rtnl_dereference(nh->nh_grp);
3722 res_table = rtnl_dereference(nhg->res_table);
3723 for (bucket_index = dd->ctx->bucket_index;
3724 bucket_index < res_table->num_nh_buckets;
3725 bucket_index++) {
3726 struct nh_res_bucket *bucket;
3727 struct nh_grp_entry *nhge;
3728
3729 bucket = &res_table->nh_buckets[bucket_index];
3730 nhge = rtnl_dereference(bucket->nh_entry);
3731 if (nh_dump_filtered(nh: nhge->nh, filter: &dd->filter, family: nhm->nh_family))
3732 continue;
3733
3734 if (dd->filter.res_bucket_nh_id &&
3735 dd->filter.res_bucket_nh_id != nhge->nh->id)
3736 continue;
3737
3738 dd->ctx->bucket_index = bucket_index;
3739 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3740 RTM_NEWNEXTHOPBUCKET, portid,
3741 seq: cb->nlh->nlmsg_seq, NLM_F_MULTI,
3742 extack: cb->extack);
3743 if (err)
3744 return err;
3745 }
3746
3747 dd->ctx->bucket_index = 0;
3748
3749 return 0;
3750}
3751
3752static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3753 struct netlink_callback *cb,
3754 struct nexthop *nh, void *data)
3755{
3756 struct rtm_dump_nexthop_bucket_data *dd = data;
3757 struct nh_group *nhg;
3758
3759 if (!nh->is_group)
3760 return 0;
3761
3762 nhg = rtnl_dereference(nh->nh_grp);
3763 if (!nhg->resilient)
3764 return 0;
3765
3766 return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3767}
3768
3769/* rtnl */
3770static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3771 struct netlink_callback *cb)
3772{
3773 struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3774 struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3775 struct net *net = sock_net(sk: skb->sk);
3776 struct nexthop *nh;
3777 int err;
3778
3779 err = nh_valid_dump_bucket_req(nlh: cb->nlh, filter: &dd.filter, cb);
3780 if (err)
3781 return err;
3782
3783 if (dd.filter.nh_id) {
3784 nh = nexthop_find_group_resilient(net, id: dd.filter.nh_id,
3785 extack: cb->extack);
3786 if (IS_ERR(ptr: nh))
3787 return PTR_ERR(ptr: nh);
3788 err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd: &dd);
3789 } else {
3790 struct rb_root *root = &net->nexthop.rb_root;
3791
3792 err = rtm_dump_walk_nexthops(skb, cb, root, ctx: &ctx->nh,
3793 nh_cb: &rtm_dump_nexthop_bucket_cb, data: &dd);
3794 }
3795
3796 cb->seq = net->nexthop.seq;
3797 nl_dump_check_consistent(cb, nlh: nlmsg_hdr(skb));
3798 return err;
3799}
3800
3801static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3802 u16 *bucket_index,
3803 struct netlink_ext_ack *extack)
3804{
3805 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3806 int err;
3807
3808 err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3809 nla: res, policy: rtm_nh_res_bucket_policy_get, extack);
3810 if (err < 0)
3811 return err;
3812
3813 if (!tb[NHA_RES_BUCKET_INDEX]) {
3814 NL_SET_ERR_MSG(extack, "Bucket index is missing");
3815 return -EINVAL;
3816 }
3817
3818 *bucket_index = nla_get_u16(nla: tb[NHA_RES_BUCKET_INDEX]);
3819 return 0;
3820}
3821
3822static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3823 u32 *id, u16 *bucket_index,
3824 struct netlink_ext_ack *extack)
3825{
3826 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
3827 int err;
3828
3829 err = nlmsg_parse(nlh, hdrlen: sizeof(struct nhmsg), tb,
3830 ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
3831 policy: rtm_nh_policy_get_bucket, extack);
3832 if (err < 0)
3833 return err;
3834
3835 err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
3836 if (err)
3837 return err;
3838
3839 if (!tb[NHA_RES_BUCKET]) {
3840 NL_SET_ERR_MSG(extack, "Bucket information is missing");
3841 return -EINVAL;
3842 }
3843
3844 err = nh_valid_get_bucket_req_res_bucket(res: tb[NHA_RES_BUCKET],
3845 bucket_index, extack);
3846 if (err)
3847 return err;
3848
3849 return 0;
3850}
3851
3852/* rtnl */
3853static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3854 struct netlink_ext_ack *extack)
3855{
3856 struct net *net = sock_net(sk: in_skb->sk);
3857 struct nh_res_table *res_table;
3858 struct sk_buff *skb = NULL;
3859 struct nh_group *nhg;
3860 struct nexthop *nh;
3861 u16 bucket_index;
3862 int err;
3863 u32 id;
3864
3865 err = nh_valid_get_bucket_req(nlh, id: &id, bucket_index: &bucket_index, extack);
3866 if (err)
3867 return err;
3868
3869 nh = nexthop_find_group_resilient(net, id, extack);
3870 if (IS_ERR(ptr: nh))
3871 return PTR_ERR(ptr: nh);
3872
3873 nhg = rtnl_dereference(nh->nh_grp);
3874 res_table = rtnl_dereference(nhg->res_table);
3875 if (bucket_index >= res_table->num_nh_buckets) {
3876 NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3877 return -ENOENT;
3878 }
3879
3880 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3881 if (!skb)
3882 return -ENOBUFS;
3883
3884 err = nh_fill_res_bucket(skb, nh, bucket: &res_table->nh_buckets[bucket_index],
3885 bucket_index, RTM_NEWNEXTHOPBUCKET,
3886 NETLINK_CB(in_skb).portid, seq: nlh->nlmsg_seq,
3887 nlflags: 0, extack);
3888 if (err < 0) {
3889 WARN_ON(err == -EMSGSIZE);
3890 goto errout_free;
3891 }
3892
3893 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3894
3895errout_free:
3896 kfree_skb(skb);
3897 return err;
3898}
3899
3900static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3901{
3902 unsigned int hash = nh_dev_hashfn(val: dev->ifindex);
3903 struct net *net = dev_net(dev);
3904 struct hlist_head *head = &net->nexthop.devhash[hash];
3905 struct hlist_node *n;
3906 struct nh_info *nhi;
3907
3908 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3909 if (nhi->fib_nhc.nhc_dev == dev) {
3910 if (nhi->family == AF_INET)
3911 fib_nhc_update_mtu(nhc: &nhi->fib_nhc, new: dev->mtu,
3912 orig: orig_mtu);
3913 }
3914 }
3915}
3916
3917/* rtnl */
3918static int nh_netdev_event(struct notifier_block *this,
3919 unsigned long event, void *ptr)
3920{
3921 struct net_device *dev = netdev_notifier_info_to_dev(info: ptr);
3922 struct netdev_notifier_info_ext *info_ext;
3923
3924 switch (event) {
3925 case NETDEV_DOWN:
3926 case NETDEV_UNREGISTER:
3927 nexthop_flush_dev(dev, event);
3928 break;
3929 case NETDEV_CHANGE:
3930 if (!(netif_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3931 nexthop_flush_dev(dev, event);
3932 break;
3933 case NETDEV_CHANGEMTU:
3934 info_ext = ptr;
3935 nexthop_sync_mtu(dev, orig_mtu: info_ext->ext.mtu);
3936 rt_cache_flush(net: dev_net(dev));
3937 break;
3938 }
3939 return NOTIFY_DONE;
3940}
3941
3942static struct notifier_block nh_netdev_notifier = {
3943 .notifier_call = nh_netdev_event,
3944};
3945
3946static int nexthops_dump(struct net *net, struct notifier_block *nb,
3947 enum nexthop_event_type event_type,
3948 struct netlink_ext_ack *extack)
3949{
3950 struct rb_root *root = &net->nexthop.rb_root;
3951 struct rb_node *node;
3952 int err = 0;
3953
3954 for (node = rb_first(root); node; node = rb_next(node)) {
3955 struct nexthop *nh;
3956
3957 nh = rb_entry(node, struct nexthop, rb_node);
3958 err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3959 if (err)
3960 break;
3961 }
3962
3963 return err;
3964}
3965
3966int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3967 struct netlink_ext_ack *extack)
3968{
3969 int err;
3970
3971 rtnl_lock();
3972 err = nexthops_dump(net, nb, event_type: NEXTHOP_EVENT_REPLACE, extack);
3973 if (err)
3974 goto unlock;
3975 err = blocking_notifier_chain_register(nh: &net->nexthop.notifier_chain,
3976 nb);
3977unlock:
3978 rtnl_unlock();
3979 return err;
3980}
3981EXPORT_SYMBOL(register_nexthop_notifier);
3982
3983int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3984{
3985 int err;
3986
3987 err = blocking_notifier_chain_unregister(nh: &net->nexthop.notifier_chain,
3988 nb);
3989 if (!err)
3990 nexthops_dump(net, nb, event_type: NEXTHOP_EVENT_DEL, NULL);
3991 return err;
3992}
3993EXPORT_SYMBOL(__unregister_nexthop_notifier);
3994
3995int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3996{
3997 int err;
3998
3999 rtnl_lock();
4000 err = __unregister_nexthop_notifier(net, nb);
4001 rtnl_unlock();
4002 return err;
4003}
4004EXPORT_SYMBOL(unregister_nexthop_notifier);
4005
4006void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
4007{
4008 struct nexthop *nexthop;
4009
4010 rcu_read_lock();
4011
4012 nexthop = nexthop_find_by_id(net, id);
4013 if (!nexthop)
4014 goto out;
4015
4016 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
4017 if (offload)
4018 nexthop->nh_flags |= RTNH_F_OFFLOAD;
4019 if (trap)
4020 nexthop->nh_flags |= RTNH_F_TRAP;
4021
4022out:
4023 rcu_read_unlock();
4024}
4025EXPORT_SYMBOL(nexthop_set_hw_flags);
4026
4027void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
4028 bool offload, bool trap)
4029{
4030 struct nh_res_table *res_table;
4031 struct nh_res_bucket *bucket;
4032 struct nexthop *nexthop;
4033 struct nh_group *nhg;
4034
4035 rcu_read_lock();
4036
4037 nexthop = nexthop_find_by_id(net, id);
4038 if (!nexthop || !nexthop->is_group)
4039 goto out;
4040
4041 nhg = rcu_dereference(nexthop->nh_grp);
4042 if (!nhg->resilient)
4043 goto out;
4044
4045 if (bucket_index >= nhg->res_table->num_nh_buckets)
4046 goto out;
4047
4048 res_table = rcu_dereference(nhg->res_table);
4049 bucket = &res_table->nh_buckets[bucket_index];
4050 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
4051 if (offload)
4052 bucket->nh_flags |= RTNH_F_OFFLOAD;
4053 if (trap)
4054 bucket->nh_flags |= RTNH_F_TRAP;
4055
4056out:
4057 rcu_read_unlock();
4058}
4059EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
4060
4061void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
4062 unsigned long *activity)
4063{
4064 struct nh_res_table *res_table;
4065 struct nexthop *nexthop;
4066 struct nh_group *nhg;
4067 u16 i;
4068
4069 rcu_read_lock();
4070
4071 nexthop = nexthop_find_by_id(net, id);
4072 if (!nexthop || !nexthop->is_group)
4073 goto out;
4074
4075 nhg = rcu_dereference(nexthop->nh_grp);
4076 if (!nhg->resilient)
4077 goto out;
4078
4079 /* Instead of silently ignoring some buckets, demand that the sizes
4080 * be the same.
4081 */
4082 res_table = rcu_dereference(nhg->res_table);
4083 if (num_buckets != res_table->num_nh_buckets)
4084 goto out;
4085
4086 for (i = 0; i < num_buckets; i++) {
4087 if (test_bit(i, activity))
4088 nh_res_bucket_set_busy(bucket: &res_table->nh_buckets[i]);
4089 }
4090
4091out:
4092 rcu_read_unlock();
4093}
4094EXPORT_SYMBOL(nexthop_res_grp_activity_update);
4095
4096static void __net_exit nexthop_net_exit_rtnl(struct net *net,
4097 struct list_head *dev_to_kill)
4098{
4099 ASSERT_RTNL_NET(net);
4100 flush_all_nexthops(net);
4101}
4102
4103static void __net_exit nexthop_net_exit(struct net *net)
4104{
4105 kfree(objp: net->nexthop.devhash);
4106 net->nexthop.devhash = NULL;
4107}
4108
4109static int __net_init nexthop_net_init(struct net *net)
4110{
4111 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
4112
4113 net->nexthop.rb_root = RB_ROOT;
4114 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
4115 if (!net->nexthop.devhash)
4116 return -ENOMEM;
4117 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
4118
4119 return 0;
4120}
4121
4122static struct pernet_operations nexthop_net_ops = {
4123 .init = nexthop_net_init,
4124 .exit = nexthop_net_exit,
4125 .exit_rtnl = nexthop_net_exit_rtnl,
4126};
4127
4128static const struct rtnl_msg_handler nexthop_rtnl_msg_handlers[] __initconst = {
4129 {.msgtype = RTM_NEWNEXTHOP, .doit = rtm_new_nexthop,
4130 .flags = RTNL_FLAG_DOIT_PERNET},
4131 {.msgtype = RTM_DELNEXTHOP, .doit = rtm_del_nexthop,
4132 .flags = RTNL_FLAG_DOIT_PERNET},
4133 {.msgtype = RTM_GETNEXTHOP, .doit = rtm_get_nexthop,
4134 .dumpit = rtm_dump_nexthop},
4135 {.msgtype = RTM_GETNEXTHOPBUCKET, .doit = rtm_get_nexthop_bucket,
4136 .dumpit = rtm_dump_nexthop_bucket},
4137 {.protocol = PF_INET, .msgtype = RTM_NEWNEXTHOP,
4138 .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET},
4139 {.protocol = PF_INET, .msgtype = RTM_GETNEXTHOP,
4140 .dumpit = rtm_dump_nexthop},
4141 {.protocol = PF_INET6, .msgtype = RTM_NEWNEXTHOP,
4142 .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET},
4143 {.protocol = PF_INET6, .msgtype = RTM_GETNEXTHOP,
4144 .dumpit = rtm_dump_nexthop},
4145};
4146
4147static int __init nexthop_init(void)
4148{
4149 register_pernet_subsys(&nexthop_net_ops);
4150
4151 register_netdevice_notifier(nb: &nh_netdev_notifier);
4152
4153 rtnl_register_many(nexthop_rtnl_msg_handlers);
4154
4155 return 0;
4156}
4157subsys_initcall(nexthop_init);
4158