1// SPDX-License-Identifier: GPL-2.0-only
2/* (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
4 */
5
6#include <linux/module.h>
7#include <net/ip.h>
8#include <net/tcp.h>
9#include <net/route.h>
10#include <net/dst.h>
11#include <net/netfilter/ipv4/nf_reject.h>
12#include <linux/netfilter_ipv4.h>
13#include <linux/netfilter_bridge.h>
14
15static struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
16 const struct sk_buff *oldskb,
17 __u8 protocol, int ttl);
18static void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
19 const struct tcphdr *oth);
20static const struct tcphdr *
21nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
22 struct tcphdr *_oth, int hook);
23
24static int nf_reject_iphdr_validate(struct sk_buff *skb)
25{
26 struct iphdr *iph;
27 u32 len;
28
29 if (!pskb_may_pull(skb, len: sizeof(struct iphdr)))
30 return 0;
31
32 iph = ip_hdr(skb);
33 if (iph->ihl < 5 || iph->version != 4)
34 return 0;
35
36 len = ntohs(iph->tot_len);
37 if (skb->len < len)
38 return 0;
39 else if (len < (iph->ihl*4))
40 return 0;
41
42 if (!pskb_may_pull(skb, len: iph->ihl*4))
43 return 0;
44
45 return 1;
46}
47
48struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
49 struct sk_buff *oldskb,
50 const struct net_device *dev,
51 int hook)
52{
53 const struct tcphdr *oth;
54 struct sk_buff *nskb;
55 struct iphdr *niph;
56 struct tcphdr _oth;
57
58 if (!nf_reject_iphdr_validate(skb: oldskb))
59 return NULL;
60
61 oth = nf_reject_ip_tcphdr_get(oldskb, oth: &_oth, hook);
62 if (!oth)
63 return NULL;
64
65 nskb = alloc_skb(size: sizeof(struct iphdr) + sizeof(struct tcphdr) +
66 LL_MAX_HEADER, GFP_ATOMIC);
67 if (!nskb)
68 return NULL;
69
70 nskb->dev = (struct net_device *)dev;
71
72 skb_reserve(skb: nskb, LL_MAX_HEADER);
73 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
74 READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
75 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
76 niph->tot_len = htons(nskb->len);
77 ip_send_check(ip: niph);
78
79 return nskb;
80}
81EXPORT_SYMBOL_GPL(nf_reject_skb_v4_tcp_reset);
82
83static bool nf_skb_is_icmp_unreach(const struct sk_buff *skb)
84{
85 const struct iphdr *iph = ip_hdr(skb);
86 u8 *tp, _type;
87 int thoff;
88
89 if (iph->protocol != IPPROTO_ICMP)
90 return false;
91
92 thoff = skb_network_offset(skb) + sizeof(*iph);
93
94 tp = skb_header_pointer(skb,
95 offset: thoff + offsetof(struct icmphdr, type),
96 len: sizeof(_type), buffer: &_type);
97
98 if (!tp)
99 return false;
100
101 return *tp == ICMP_DEST_UNREACH;
102}
103
104struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
105 struct sk_buff *oldskb,
106 const struct net_device *dev,
107 int hook, u8 code)
108{
109 struct sk_buff *nskb;
110 struct iphdr *niph;
111 struct icmphdr *icmph;
112 unsigned int len;
113 int dataoff;
114 __wsum csum;
115 u8 proto;
116
117 if (!nf_reject_iphdr_validate(skb: oldskb))
118 return NULL;
119
120 /* IP header checks: fragment. */
121 if (ip_hdr(skb: oldskb)->frag_off & htons(IP_OFFSET))
122 return NULL;
123
124 /* don't reply to ICMP_DEST_UNREACH with ICMP_DEST_UNREACH. */
125 if (nf_skb_is_icmp_unreach(skb: oldskb))
126 return NULL;
127
128 /* RFC says return as much as we can without exceeding 576 bytes. */
129 len = min_t(unsigned int, 536, oldskb->len);
130
131 if (!pskb_may_pull(skb: oldskb, len))
132 return NULL;
133
134 if (pskb_trim_rcsum(skb: oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
135 return NULL;
136
137 dataoff = ip_hdrlen(skb: oldskb);
138 proto = ip_hdr(skb: oldskb)->protocol;
139
140 if (!skb_csum_unnecessary(skb: oldskb) &&
141 nf_reject_verify_csum(skb: oldskb, dataoff, proto) &&
142 nf_ip_checksum(skb: oldskb, hook, dataoff: ip_hdrlen(skb: oldskb), protocol: proto))
143 return NULL;
144
145 nskb = alloc_skb(size: sizeof(struct iphdr) + sizeof(struct icmphdr) +
146 LL_MAX_HEADER + len, GFP_ATOMIC);
147 if (!nskb)
148 return NULL;
149
150 nskb->dev = (struct net_device *)dev;
151
152 skb_reserve(skb: nskb, LL_MAX_HEADER);
153 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
154 READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
155
156 skb_reset_transport_header(skb: nskb);
157 icmph = skb_put_zero(skb: nskb, len: sizeof(struct icmphdr));
158 icmph->type = ICMP_DEST_UNREACH;
159 icmph->code = code;
160
161 skb_put_data(skb: nskb, data: skb_network_header(skb: oldskb), len);
162
163 csum = csum_partial(buff: (void *)icmph, len: len + sizeof(struct icmphdr), sum: 0);
164 icmph->checksum = csum_fold(sum: csum);
165
166 niph->tot_len = htons(nskb->len);
167 ip_send_check(ip: niph);
168
169 return nskb;
170}
171EXPORT_SYMBOL_GPL(nf_reject_skb_v4_unreach);
172
173static const struct tcphdr *
174nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
175 struct tcphdr *_oth, int hook)
176{
177 const struct tcphdr *oth;
178
179 /* IP header checks: fragment. */
180 if (ip_hdr(skb: oldskb)->frag_off & htons(IP_OFFSET))
181 return NULL;
182
183 if (ip_hdr(skb: oldskb)->protocol != IPPROTO_TCP)
184 return NULL;
185
186 oth = skb_header_pointer(skb: oldskb, offset: ip_hdrlen(skb: oldskb),
187 len: sizeof(struct tcphdr), buffer: _oth);
188 if (oth == NULL)
189 return NULL;
190
191 /* No RST for RST. */
192 if (oth->rst)
193 return NULL;
194
195 /* Check checksum */
196 if (nf_ip_checksum(skb: oldskb, hook, dataoff: ip_hdrlen(skb: oldskb), IPPROTO_TCP))
197 return NULL;
198
199 return oth;
200}
201
202static struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
203 const struct sk_buff *oldskb,
204 __u8 protocol, int ttl)
205{
206 struct iphdr *niph, *oiph = ip_hdr(skb: oldskb);
207
208 skb_reset_network_header(skb: nskb);
209 niph = skb_put(skb: nskb, len: sizeof(struct iphdr));
210 niph->version = 4;
211 niph->ihl = sizeof(struct iphdr) / 4;
212 niph->tos = 0;
213 niph->id = 0;
214 niph->frag_off = htons(IP_DF);
215 niph->protocol = protocol;
216 niph->check = 0;
217 niph->saddr = oiph->daddr;
218 niph->daddr = oiph->saddr;
219 niph->ttl = ttl;
220
221 nskb->protocol = htons(ETH_P_IP);
222
223 return niph;
224}
225
226static void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
227 const struct tcphdr *oth)
228{
229 struct iphdr *niph = ip_hdr(skb: nskb);
230 struct tcphdr *tcph;
231
232 skb_reset_transport_header(skb: nskb);
233 tcph = skb_put_zero(skb: nskb, len: sizeof(struct tcphdr));
234 tcph->source = oth->dest;
235 tcph->dest = oth->source;
236 tcph->doff = sizeof(struct tcphdr) / 4;
237
238 if (oth->ack) {
239 tcph->seq = oth->ack_seq;
240 } else {
241 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
242 oldskb->len - ip_hdrlen(oldskb) -
243 (oth->doff << 2));
244 tcph->ack = 1;
245 }
246
247 tcph->rst = 1;
248 tcph->check = ~tcp_v4_check(len: sizeof(struct tcphdr), saddr: niph->saddr,
249 daddr: niph->daddr, base: 0);
250 nskb->ip_summed = CHECKSUM_PARTIAL;
251 nskb->csum_start = (unsigned char *)tcph - nskb->head;
252 nskb->csum_offset = offsetof(struct tcphdr, check);
253}
254
255static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
256{
257 struct dst_entry *dst = NULL;
258 struct flowi fl;
259
260 memset(s: &fl, c: 0, n: sizeof(struct flowi));
261 fl.u.ip4.daddr = ip_hdr(skb: skb_in)->saddr;
262 nf_ip_route(net: dev_net(dev: skb_in->dev), dst: &dst, fl: &fl, strict: false);
263 if (!dst)
264 return -1;
265
266 skb_dst_set(skb: skb_in, dst);
267 return 0;
268}
269
270/* Send RST reply */
271void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
272 int hook)
273{
274 const struct tcphdr *oth;
275 struct sk_buff *nskb;
276 struct tcphdr _oth;
277
278 oth = nf_reject_ip_tcphdr_get(oldskb, oth: &_oth, hook);
279 if (!oth)
280 return;
281
282 if (!skb_dst(skb: oldskb) && nf_reject_fill_skb_dst(skb_in: oldskb) < 0)
283 return;
284
285 if (skb_rtable(skb: oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
286 return;
287
288 nskb = alloc_skb(size: sizeof(struct iphdr) + sizeof(struct tcphdr) +
289 LL_MAX_HEADER, GFP_ATOMIC);
290 if (!nskb)
291 return;
292
293 /* ip_route_me_harder expects skb->dst to be set */
294 skb_dst_set_noref(skb: nskb, dst: skb_dst(skb: oldskb));
295
296 nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
297
298 skb_reserve(skb: nskb, LL_MAX_HEADER);
299 nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
300 ttl: ip4_dst_hoplimit(dst: skb_dst(skb: nskb)));
301 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
302 if (ip_route_me_harder(net, sk, skb: nskb, addr_type: RTN_UNSPEC))
303 goto free_nskb;
304
305 /* "Never happens" */
306 if (nskb->len > dst_mtu(dst: skb_dst(skb: nskb)))
307 goto free_nskb;
308
309 nf_ct_attach(nskb, oldskb);
310 nf_ct_set_closing(nfct: skb_nfct(skb: oldskb));
311
312#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
313 /* If we use ip_local_out for bridged traffic, the MAC source on
314 * the RST will be ours, instead of the destination's. This confuses
315 * some routers/firewalls, and they drop the packet. So we need to
316 * build the eth header using the original destination's MAC as the
317 * source, and send the RST packet directly.
318 */
319 if (nf_bridge_info_exists(oldskb)) {
320 struct ethhdr *oeth = eth_hdr(oldskb);
321 struct iphdr *niph = ip_hdr(nskb);
322 struct net_device *br_indev;
323
324 br_indev = nf_bridge_get_physindev(oldskb, net);
325 if (!br_indev)
326 goto free_nskb;
327
328 nskb->dev = br_indev;
329 niph->tot_len = htons(nskb->len);
330 ip_send_check(niph);
331 if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
332 oeth->h_source, oeth->h_dest, nskb->len) < 0)
333 goto free_nskb;
334 dev_queue_xmit(nskb);
335 } else
336#endif
337 ip_local_out(net, sk: nskb->sk, skb: nskb);
338
339 return;
340
341 free_nskb:
342 kfree_skb(skb: nskb);
343}
344EXPORT_SYMBOL_GPL(nf_send_reset);
345
346void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
347{
348 struct iphdr *iph = ip_hdr(skb: skb_in);
349 int dataoff = ip_hdrlen(skb: skb_in);
350 u8 proto = iph->protocol;
351
352 if (iph->frag_off & htons(IP_OFFSET))
353 return;
354
355 if (!skb_dst(skb: skb_in) && nf_reject_fill_skb_dst(skb_in) < 0)
356 return;
357
358 if (skb_csum_unnecessary(skb: skb_in) ||
359 !nf_reject_verify_csum(skb: skb_in, dataoff, proto)) {
360 icmp_send(skb_in, ICMP_DEST_UNREACH, code, info: 0);
361 return;
362 }
363
364 if (nf_ip_checksum(skb: skb_in, hook, dataoff, protocol: proto) == 0)
365 icmp_send(skb_in, ICMP_DEST_UNREACH, code, info: 0);
366}
367EXPORT_SYMBOL_GPL(nf_send_unreach);
368
369MODULE_LICENSE("GPL");
370MODULE_DESCRIPTION("IPv4 packet rejection core");
371