1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2#include <linux/if_vlan.h>
3#include <net/netlink.h>
4#include <net/sch_generic.h>
5#include <net/pkt_sched.h>
6#include <net/dst.h>
7#include <net/ip.h>
8#include <net/ip6_fib.h>
9
10struct sch_frag_data {
11 unsigned long dst;
12 struct qdisc_skb_cb cb;
13 __be16 inner_protocol;
14 u16 vlan_tci;
15 __be16 vlan_proto;
16 unsigned int l2_len;
17 u8 l2_data[VLAN_ETH_HLEN];
18 int (*xmit)(struct sk_buff *skb);
19 local_lock_t bh_lock;
20};
21
22static DEFINE_PER_CPU(struct sch_frag_data, sch_frag_data_storage) = {
23 .bh_lock = INIT_LOCAL_LOCK(bh_lock),
24};
25
26static int sch_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
27{
28 struct sch_frag_data *data = this_cpu_ptr(&sch_frag_data_storage);
29
30 lockdep_assert_held(&data->bh_lock);
31 if (skb_cow_head(skb, headroom: data->l2_len) < 0) {
32 kfree_skb(skb);
33 return -ENOMEM;
34 }
35
36 __skb_dst_copy(nskb: skb, refdst: data->dst);
37 *qdisc_skb_cb(skb) = data->cb;
38 skb->inner_protocol = data->inner_protocol;
39 if (data->vlan_tci & VLAN_CFI_MASK)
40 __vlan_hwaccel_put_tag(skb, vlan_proto: data->vlan_proto,
41 vlan_tci: data->vlan_tci & ~VLAN_CFI_MASK);
42 else
43 __vlan_hwaccel_clear_tag(skb);
44
45 /* Reconstruct the MAC header. */
46 skb_push(skb, len: data->l2_len);
47 memcpy(to: skb->data, from: &data->l2_data, len: data->l2_len);
48 skb_postpush_rcsum(skb, start: skb->data, len: data->l2_len);
49 skb_reset_mac_header(skb);
50
51 return data->xmit(skb);
52}
53
54static void sch_frag_prepare_frag(struct sk_buff *skb,
55 int (*xmit)(struct sk_buff *skb))
56{
57 unsigned int hlen = skb_network_offset(skb);
58 struct sch_frag_data *data;
59
60 data = this_cpu_ptr(&sch_frag_data_storage);
61 data->dst = skb->_skb_refdst;
62 data->cb = *qdisc_skb_cb(skb);
63 data->xmit = xmit;
64 data->inner_protocol = skb->inner_protocol;
65 if (skb_vlan_tag_present(skb))
66 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
67 else
68 data->vlan_tci = 0;
69 data->vlan_proto = skb->vlan_proto;
70 data->l2_len = hlen;
71 memcpy(to: &data->l2_data, from: skb->data, len: hlen);
72
73 memset(IPCB(skb), c: 0, n: sizeof(struct inet_skb_parm));
74 skb_pull(skb, len: hlen);
75}
76
77static unsigned int
78sch_frag_dst_get_mtu(const struct dst_entry *dst)
79{
80 return dst->dev->mtu;
81}
82
83static struct dst_ops sch_frag_dst_ops = {
84 .family = AF_UNSPEC,
85 .mtu = sch_frag_dst_get_mtu,
86};
87
88static int sch_fragment(struct net *net, struct sk_buff *skb,
89 u16 mru, int (*xmit)(struct sk_buff *skb))
90{
91 int ret = -1;
92
93 if (skb_network_offset(skb) > VLAN_ETH_HLEN) {
94 net_warn_ratelimited("L2 header too long to fragment\n");
95 goto err;
96 }
97
98 if (skb_protocol(skb, skip_vlan: true) == htons(ETH_P_IP)) {
99 struct rtable sch_frag_rt = { 0 };
100 unsigned long orig_dst;
101
102 local_lock_nested_bh(&sch_frag_data_storage.bh_lock);
103 sch_frag_prepare_frag(skb, xmit);
104 dst_init(dst: &sch_frag_rt.dst, ops: &sch_frag_dst_ops, NULL,
105 DST_OBSOLETE_NONE, DST_NOCOUNT);
106 sch_frag_rt.dst.dev = skb->dev;
107
108 orig_dst = skb->_skb_refdst;
109 skb_dst_set_noref(skb, dst: &sch_frag_rt.dst);
110 IPCB(skb)->frag_max_size = mru;
111
112 ret = ip_do_fragment(net, sk: skb->sk, skb, output: sch_frag_xmit);
113 local_unlock_nested_bh(&sch_frag_data_storage.bh_lock);
114 refdst_drop(refdst: orig_dst);
115 } else if (skb_protocol(skb, skip_vlan: true) == htons(ETH_P_IPV6)) {
116 unsigned long orig_dst;
117 struct rt6_info sch_frag_rt;
118
119 local_lock_nested_bh(&sch_frag_data_storage.bh_lock);
120 sch_frag_prepare_frag(skb, xmit);
121 memset(s: &sch_frag_rt, c: 0, n: sizeof(sch_frag_rt));
122 dst_init(dst: &sch_frag_rt.dst, ops: &sch_frag_dst_ops, NULL,
123 DST_OBSOLETE_NONE, DST_NOCOUNT);
124 sch_frag_rt.dst.dev = skb->dev;
125
126 orig_dst = skb->_skb_refdst;
127 skb_dst_set_noref(skb, dst: &sch_frag_rt.dst);
128 IP6CB(skb)->frag_max_size = mru;
129
130 ret = ipv6_stub->ipv6_fragment(net, skb->sk, skb,
131 sch_frag_xmit);
132 local_unlock_nested_bh(&sch_frag_data_storage.bh_lock);
133 refdst_drop(refdst: orig_dst);
134 } else {
135 net_warn_ratelimited("Fail frag %s: eth=%x, MRU=%d, MTU=%d\n",
136 netdev_name(skb->dev),
137 ntohs(skb_protocol(skb, true)), mru,
138 skb->dev->mtu);
139 goto err;
140 }
141
142 return ret;
143err:
144 kfree_skb(skb);
145 return ret;
146}
147
148int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
149{
150 u16 mru = tc_skb_cb(skb)->mru;
151 int err;
152
153 if (mru && skb->len > mru + skb->dev->hard_header_len)
154 err = sch_fragment(net: dev_net(dev: skb->dev), skb, mru, xmit);
155 else
156 err = xmit(skb);
157
158 return err;
159}
160EXPORT_SYMBOL_GPL(sch_frag_xmit_hook);
161