| 1 | // SPDX-License-Identifier: GPL-2.0-or-later | 
|---|
| 2 | /* | 
|---|
| 3 | * INET		An implementation of the TCP/IP protocol suite for the LINUX | 
|---|
| 4 | *		operating system.  INET is implemented using the  BSD Socket | 
|---|
| 5 | *		interface as the means of communication with the user level. | 
|---|
| 6 | * | 
|---|
| 7 | *		PACKET - implements raw packet sockets. | 
|---|
| 8 | * | 
|---|
| 9 | * Authors:	Ross Biro | 
|---|
| 10 | *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 
|---|
| 11 | *		Alan Cox, <gw4pts@gw4pts.ampr.org> | 
|---|
| 12 | * | 
|---|
| 13 | * Fixes: | 
|---|
| 14 | *		Alan Cox	:	verify_area() now used correctly | 
|---|
| 15 | *		Alan Cox	:	new skbuff lists, look ma no backlogs! | 
|---|
| 16 | *		Alan Cox	:	tidied skbuff lists. | 
|---|
| 17 | *		Alan Cox	:	Now uses generic datagram routines I | 
|---|
| 18 | *					added. Also fixed the peek/read crash | 
|---|
| 19 | *					from all old Linux datagram code. | 
|---|
| 20 | *		Alan Cox	:	Uses the improved datagram code. | 
|---|
| 21 | *		Alan Cox	:	Added NULL's for socket options. | 
|---|
| 22 | *		Alan Cox	:	Re-commented the code. | 
|---|
| 23 | *		Alan Cox	:	Use new kernel side addressing | 
|---|
| 24 | *		Rob Janssen	:	Correct MTU usage. | 
|---|
| 25 | *		Dave Platt	:	Counter leaks caused by incorrect | 
|---|
| 26 | *					interrupt locking and some slightly | 
|---|
| 27 | *					dubious gcc output. Can you read | 
|---|
| 28 | *					compiler: it said _VOLATILE_ | 
|---|
| 29 | *	Richard Kooijman	:	Timestamp fixes. | 
|---|
| 30 | *		Alan Cox	:	New buffers. Use sk->mac.raw. | 
|---|
| 31 | *		Alan Cox	:	sendmsg/recvmsg support. | 
|---|
| 32 | *		Alan Cox	:	Protocol setting support | 
|---|
| 33 | *	Alexey Kuznetsov	:	Untied from IPv4 stack. | 
|---|
| 34 | *	Cyrus Durgin		:	Fixed kerneld for kmod. | 
|---|
| 35 | *	Michal Ostrowski        :       Module initialization cleanup. | 
|---|
| 36 | *         Ulises Alonso        :       Frame number limit removal and | 
|---|
| 37 | *                                      packet_set_ring memory leak. | 
|---|
| 38 | *		Eric Biederman	:	Allow for > 8 byte hardware addresses. | 
|---|
| 39 | *					The convention is that longer addresses | 
|---|
| 40 | *					will simply extend the hardware address | 
|---|
| 41 | *					byte arrays at the end of sockaddr_ll | 
|---|
| 42 | *					and packet_mreq. | 
|---|
| 43 | *		Johann Baudy	:	Added TX RING. | 
|---|
| 44 | *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction | 
|---|
| 45 | *					layer. | 
|---|
| 46 | *					Copyright (C) 2011, <lokec@ccs.neu.edu> | 
|---|
| 47 | */ | 
|---|
| 48 |  | 
|---|
| 49 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 
|---|
| 50 |  | 
|---|
| 51 | #include <linux/ethtool.h> | 
|---|
| 52 | #include <linux/filter.h> | 
|---|
| 53 | #include <linux/types.h> | 
|---|
| 54 | #include <linux/mm.h> | 
|---|
| 55 | #include <linux/capability.h> | 
|---|
| 56 | #include <linux/fcntl.h> | 
|---|
| 57 | #include <linux/socket.h> | 
|---|
| 58 | #include <linux/in.h> | 
|---|
| 59 | #include <linux/inet.h> | 
|---|
| 60 | #include <linux/netdevice.h> | 
|---|
| 61 | #include <linux/if_packet.h> | 
|---|
| 62 | #include <linux/wireless.h> | 
|---|
| 63 | #include <linux/kernel.h> | 
|---|
| 64 | #include <linux/kmod.h> | 
|---|
| 65 | #include <linux/slab.h> | 
|---|
| 66 | #include <linux/vmalloc.h> | 
|---|
| 67 | #include <net/net_namespace.h> | 
|---|
| 68 | #include <net/ip.h> | 
|---|
| 69 | #include <net/protocol.h> | 
|---|
| 70 | #include <linux/skbuff.h> | 
|---|
| 71 | #include <net/sock.h> | 
|---|
| 72 | #include <linux/errno.h> | 
|---|
| 73 | #include <linux/timer.h> | 
|---|
| 74 | #include <linux/uaccess.h> | 
|---|
| 75 | #include <asm/ioctls.h> | 
|---|
| 76 | #include <asm/page.h> | 
|---|
| 77 | #include <asm/cacheflush.h> | 
|---|
| 78 | #include <asm/io.h> | 
|---|
| 79 | #include <linux/proc_fs.h> | 
|---|
| 80 | #include <linux/seq_file.h> | 
|---|
| 81 | #include <linux/poll.h> | 
|---|
| 82 | #include <linux/module.h> | 
|---|
| 83 | #include <linux/init.h> | 
|---|
| 84 | #include <linux/mutex.h> | 
|---|
| 85 | #include <linux/if_vlan.h> | 
|---|
| 86 | #include <linux/virtio_net.h> | 
|---|
| 87 | #include <linux/errqueue.h> | 
|---|
| 88 | #include <linux/net_tstamp.h> | 
|---|
| 89 | #include <linux/percpu.h> | 
|---|
| 90 | #ifdef CONFIG_INET | 
|---|
| 91 | #include <net/inet_common.h> | 
|---|
| 92 | #endif | 
|---|
| 93 | #include <linux/bpf.h> | 
|---|
| 94 | #include <net/compat.h> | 
|---|
| 95 | #include <linux/netfilter_netdev.h> | 
|---|
| 96 |  | 
|---|
| 97 | #include "internal.h" | 
|---|
| 98 |  | 
|---|
| 99 | /* | 
|---|
| 100 | Assumptions: | 
|---|
| 101 | - If the device has no dev->header_ops->create, there is no LL header | 
|---|
| 102 | visible above the device. In this case, its hard_header_len should be 0. | 
|---|
| 103 | The device may prepend its own header internally. In this case, its | 
|---|
| 104 | needed_headroom should be set to the space needed for it to add its | 
|---|
| 105 | internal header. | 
|---|
| 106 | For example, a WiFi driver pretending to be an Ethernet driver should | 
|---|
| 107 | set its hard_header_len to be the Ethernet header length, and set its | 
|---|
| 108 | needed_headroom to be (the real WiFi header length - the fake Ethernet | 
|---|
| 109 | header length). | 
|---|
| 110 | - packet socket receives packets with pulled ll header, | 
|---|
| 111 | so that SOCK_RAW should push it back. | 
|---|
| 112 |  | 
|---|
| 113 | On receive: | 
|---|
| 114 | ----------- | 
|---|
| 115 |  | 
|---|
| 116 | Incoming, dev_has_header(dev) == true | 
|---|
| 117 | mac_header -> ll header | 
|---|
| 118 | data       -> data | 
|---|
| 119 |  | 
|---|
| 120 | Outgoing, dev_has_header(dev) == true | 
|---|
| 121 | mac_header -> ll header | 
|---|
| 122 | data       -> ll header | 
|---|
| 123 |  | 
|---|
| 124 | Incoming, dev_has_header(dev) == false | 
|---|
| 125 | mac_header -> data | 
|---|
| 126 | However drivers often make it point to the ll header. | 
|---|
| 127 | This is incorrect because the ll header should be invisible to us. | 
|---|
| 128 | data       -> data | 
|---|
| 129 |  | 
|---|
| 130 | Outgoing, dev_has_header(dev) == false | 
|---|
| 131 | mac_header -> data. ll header is invisible to us. | 
|---|
| 132 | data       -> data | 
|---|
| 133 |  | 
|---|
| 134 | Resume | 
|---|
| 135 | If dev_has_header(dev) == false we are unable to restore the ll header, | 
|---|
| 136 | because it is invisible to us. | 
|---|
| 137 |  | 
|---|
| 138 |  | 
|---|
| 139 | On transmit: | 
|---|
| 140 | ------------ | 
|---|
| 141 |  | 
|---|
| 142 | dev_has_header(dev) == true | 
|---|
| 143 | mac_header -> ll header | 
|---|
| 144 | data       -> ll header | 
|---|
| 145 |  | 
|---|
| 146 | dev_has_header(dev) == false (ll header is invisible to us) | 
|---|
| 147 | mac_header -> data | 
|---|
| 148 | data       -> data | 
|---|
| 149 |  | 
|---|
| 150 | We should set network_header on output to the correct position, | 
|---|
| 151 | packet classifier depends on it. | 
|---|
| 152 | */ | 
|---|
| 153 |  | 
|---|
| 154 | /* Private packet socket structures. */ | 
|---|
| 155 |  | 
|---|
| 156 | /* identical to struct packet_mreq except it has | 
|---|
| 157 | * a longer address field. | 
|---|
| 158 | */ | 
|---|
| 159 | struct packet_mreq_max { | 
|---|
| 160 | int		mr_ifindex; | 
|---|
| 161 | unsigned short	mr_type; | 
|---|
| 162 | unsigned short	mr_alen; | 
|---|
| 163 | unsigned char	mr_address[MAX_ADDR_LEN]; | 
|---|
| 164 | }; | 
|---|
| 165 |  | 
|---|
| 166 | union tpacket_uhdr { | 
|---|
| 167 | struct tpacket_hdr  *h1; | 
|---|
| 168 | struct tpacket2_hdr *h2; | 
|---|
| 169 | struct tpacket3_hdr *h3; | 
|---|
| 170 | void *raw; | 
|---|
| 171 | }; | 
|---|
| 172 |  | 
|---|
| 173 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | 
|---|
| 174 | int closing, int tx_ring); | 
|---|
| 175 |  | 
|---|
| 176 | #define V3_ALIGNMENT	(8) | 
|---|
| 177 |  | 
|---|
| 178 | #define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT)) | 
|---|
| 179 |  | 
|---|
| 180 | #define BLK_PLUS_PRIV(sz_of_priv) \ | 
|---|
| 181 | (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) | 
|---|
| 182 |  | 
|---|
| 183 | #define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status) | 
|---|
| 184 | #define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts) | 
|---|
| 185 | #define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt) | 
|---|
| 186 | #define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len) | 
|---|
| 187 | #define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num) | 
|---|
| 188 | #define BLOCK_O2PRIV(x)	((x)->offset_to_priv) | 
|---|
| 189 |  | 
|---|
| 190 | struct packet_sock; | 
|---|
| 191 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | 
|---|
| 192 | struct packet_type *pt, struct net_device *orig_dev); | 
|---|
| 193 |  | 
|---|
| 194 | static void *packet_previous_frame(struct packet_sock *po, | 
|---|
| 195 | struct packet_ring_buffer *rb, | 
|---|
| 196 | int status); | 
|---|
| 197 | static void packet_increment_head(struct packet_ring_buffer *buff); | 
|---|
| 198 | static int prb_curr_blk_in_use(struct tpacket_block_desc *); | 
|---|
| 199 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *, | 
|---|
| 200 | struct packet_sock *); | 
|---|
| 201 | static void prb_retire_current_block(struct tpacket_kbdq_core *, | 
|---|
| 202 | struct packet_sock *, unsigned int status); | 
|---|
| 203 | static int prb_queue_frozen(struct tpacket_kbdq_core *); | 
|---|
| 204 | static void prb_open_block(struct tpacket_kbdq_core *, | 
|---|
| 205 | struct tpacket_block_desc *); | 
|---|
| 206 | static enum hrtimer_restart prb_retire_rx_blk_timer_expired(struct hrtimer *); | 
|---|
| 207 | static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *); | 
|---|
| 208 | static void prb_clear_rxhash(struct tpacket_kbdq_core *, | 
|---|
| 209 | struct tpacket3_hdr *); | 
|---|
| 210 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *, | 
|---|
| 211 | struct tpacket3_hdr *); | 
|---|
| 212 | static void packet_flush_mclist(struct sock *sk); | 
|---|
| 213 | static u16 packet_pick_tx_queue(struct sk_buff *skb); | 
|---|
| 214 |  | 
|---|
| 215 | struct packet_skb_cb { | 
|---|
| 216 | union { | 
|---|
| 217 | struct sockaddr_pkt pkt; | 
|---|
| 218 | union { | 
|---|
| 219 | /* Trick: alias skb original length with | 
|---|
| 220 | * ll.sll_family and ll.protocol in order | 
|---|
| 221 | * to save room. | 
|---|
| 222 | */ | 
|---|
| 223 | unsigned int origlen; | 
|---|
| 224 | struct sockaddr_ll ll; | 
|---|
| 225 | }; | 
|---|
| 226 | } sa; | 
|---|
| 227 | }; | 
|---|
| 228 |  | 
|---|
| 229 | #define vio_le() virtio_legacy_is_little_endian() | 
|---|
| 230 |  | 
|---|
| 231 | #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb)) | 
|---|
| 232 |  | 
|---|
| 233 | #define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) | 
|---|
| 234 | #define GET_PBLOCK_DESC(x, bid)	\ | 
|---|
| 235 | ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer)) | 
|---|
| 236 | #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\ | 
|---|
| 237 | ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer)) | 
|---|
| 238 | #define GET_NEXT_PRB_BLK_NUM(x) \ | 
|---|
| 239 | (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \ | 
|---|
| 240 | ((x)->kactive_blk_num+1) : 0) | 
|---|
| 241 |  | 
|---|
| 242 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po); | 
|---|
| 243 | static void __fanout_link(struct sock *sk, struct packet_sock *po); | 
|---|
| 244 |  | 
|---|
| 245 | #ifdef CONFIG_NETFILTER_EGRESS | 
|---|
| 246 | static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb) | 
|---|
| 247 | { | 
|---|
| 248 | struct sk_buff *next, *head = NULL, *tail; | 
|---|
| 249 | int rc; | 
|---|
| 250 |  | 
|---|
| 251 | rcu_read_lock(); | 
|---|
| 252 | for (; skb != NULL; skb = next) { | 
|---|
| 253 | next = skb->next; | 
|---|
| 254 | skb_mark_not_on_list(skb); | 
|---|
| 255 |  | 
|---|
| 256 | if (!nf_hook_egress(skb, rc: &rc, dev: skb->dev)) | 
|---|
| 257 | continue; | 
|---|
| 258 |  | 
|---|
| 259 | if (!head) | 
|---|
| 260 | head = skb; | 
|---|
| 261 | else | 
|---|
| 262 | tail->next = skb; | 
|---|
| 263 |  | 
|---|
| 264 | tail = skb; | 
|---|
| 265 | } | 
|---|
| 266 | rcu_read_unlock(); | 
|---|
| 267 |  | 
|---|
| 268 | return head; | 
|---|
| 269 | } | 
|---|
| 270 | #endif | 
|---|
| 271 |  | 
|---|
| 272 | static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb) | 
|---|
| 273 | { | 
|---|
| 274 | if (!packet_sock_flag(po, flag: PACKET_SOCK_QDISC_BYPASS)) | 
|---|
| 275 | return dev_queue_xmit(skb); | 
|---|
| 276 |  | 
|---|
| 277 | #ifdef CONFIG_NETFILTER_EGRESS | 
|---|
| 278 | if (nf_hook_egress_active()) { | 
|---|
| 279 | skb = nf_hook_direct_egress(skb); | 
|---|
| 280 | if (!skb) | 
|---|
| 281 | return NET_XMIT_DROP; | 
|---|
| 282 | } | 
|---|
| 283 | #endif | 
|---|
| 284 | return dev_direct_xmit(skb, queue_id: packet_pick_tx_queue(skb)); | 
|---|
| 285 | } | 
|---|
| 286 |  | 
|---|
| 287 | static struct net_device *packet_cached_dev_get(struct packet_sock *po) | 
|---|
| 288 | { | 
|---|
| 289 | struct net_device *dev; | 
|---|
| 290 |  | 
|---|
| 291 | rcu_read_lock(); | 
|---|
| 292 | dev = rcu_dereference(po->cached_dev); | 
|---|
| 293 | dev_hold(dev); | 
|---|
| 294 | rcu_read_unlock(); | 
|---|
| 295 |  | 
|---|
| 296 | return dev; | 
|---|
| 297 | } | 
|---|
| 298 |  | 
|---|
| 299 | static void packet_cached_dev_assign(struct packet_sock *po, | 
|---|
| 300 | struct net_device *dev) | 
|---|
| 301 | { | 
|---|
| 302 | rcu_assign_pointer(po->cached_dev, dev); | 
|---|
| 303 | } | 
|---|
| 304 |  | 
|---|
| 305 | static void packet_cached_dev_reset(struct packet_sock *po) | 
|---|
| 306 | { | 
|---|
| 307 | RCU_INIT_POINTER(po->cached_dev, NULL); | 
|---|
| 308 | } | 
|---|
| 309 |  | 
|---|
| 310 | static u16 packet_pick_tx_queue(struct sk_buff *skb) | 
|---|
| 311 | { | 
|---|
| 312 | struct net_device *dev = skb->dev; | 
|---|
| 313 | const struct net_device_ops *ops = dev->netdev_ops; | 
|---|
| 314 | int cpu = raw_smp_processor_id(); | 
|---|
| 315 | u16 queue_index; | 
|---|
| 316 |  | 
|---|
| 317 | #ifdef CONFIG_XPS | 
|---|
| 318 | skb->sender_cpu = cpu + 1; | 
|---|
| 319 | #endif | 
|---|
| 320 | skb_record_rx_queue(skb, rx_queue: cpu % dev->real_num_tx_queues); | 
|---|
| 321 | if (ops->ndo_select_queue) { | 
|---|
| 322 | queue_index = ops->ndo_select_queue(dev, skb, NULL); | 
|---|
| 323 | queue_index = netdev_cap_txqueue(dev, queue_index); | 
|---|
| 324 | } else { | 
|---|
| 325 | queue_index = netdev_pick_tx(dev, skb, NULL); | 
|---|
| 326 | } | 
|---|
| 327 |  | 
|---|
| 328 | return queue_index; | 
|---|
| 329 | } | 
|---|
| 330 |  | 
|---|
| 331 | /* __register_prot_hook must be invoked through register_prot_hook | 
|---|
| 332 | * or from a context in which asynchronous accesses to the packet | 
|---|
| 333 | * socket is not possible (packet_create()). | 
|---|
| 334 | */ | 
|---|
| 335 | static void __register_prot_hook(struct sock *sk) | 
|---|
| 336 | { | 
|---|
| 337 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 338 |  | 
|---|
| 339 | if (!packet_sock_flag(po, flag: PACKET_SOCK_RUNNING)) { | 
|---|
| 340 | if (po->fanout) | 
|---|
| 341 | __fanout_link(sk, po); | 
|---|
| 342 | else | 
|---|
| 343 | dev_add_pack(pt: &po->prot_hook); | 
|---|
| 344 |  | 
|---|
| 345 | sock_hold(sk); | 
|---|
| 346 | packet_sock_flag_set(po, flag: PACKET_SOCK_RUNNING, val: 1); | 
|---|
| 347 | } | 
|---|
| 348 | } | 
|---|
| 349 |  | 
|---|
| 350 | static void register_prot_hook(struct sock *sk) | 
|---|
| 351 | { | 
|---|
| 352 | lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); | 
|---|
| 353 | __register_prot_hook(sk); | 
|---|
| 354 | } | 
|---|
| 355 |  | 
|---|
| 356 | /* If the sync parameter is true, we will temporarily drop | 
|---|
| 357 | * the po->bind_lock and do a synchronize_net to make sure no | 
|---|
| 358 | * asynchronous packet processing paths still refer to the elements | 
|---|
| 359 | * of po->prot_hook.  If the sync parameter is false, it is the | 
|---|
| 360 | * callers responsibility to take care of this. | 
|---|
| 361 | */ | 
|---|
| 362 | static void __unregister_prot_hook(struct sock *sk, bool sync) | 
|---|
| 363 | { | 
|---|
| 364 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 365 |  | 
|---|
| 366 | lockdep_assert_held_once(&po->bind_lock); | 
|---|
| 367 |  | 
|---|
| 368 | packet_sock_flag_set(po, flag: PACKET_SOCK_RUNNING, val: 0); | 
|---|
| 369 |  | 
|---|
| 370 | if (po->fanout) | 
|---|
| 371 | __fanout_unlink(sk, po); | 
|---|
| 372 | else | 
|---|
| 373 | __dev_remove_pack(pt: &po->prot_hook); | 
|---|
| 374 |  | 
|---|
| 375 | __sock_put(sk); | 
|---|
| 376 |  | 
|---|
| 377 | if (sync) { | 
|---|
| 378 | spin_unlock(lock: &po->bind_lock); | 
|---|
| 379 | synchronize_net(); | 
|---|
| 380 | spin_lock(lock: &po->bind_lock); | 
|---|
| 381 | } | 
|---|
| 382 | } | 
|---|
| 383 |  | 
|---|
| 384 | static void unregister_prot_hook(struct sock *sk, bool sync) | 
|---|
| 385 | { | 
|---|
| 386 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 387 |  | 
|---|
| 388 | if (packet_sock_flag(po, flag: PACKET_SOCK_RUNNING)) | 
|---|
| 389 | __unregister_prot_hook(sk, sync); | 
|---|
| 390 | } | 
|---|
| 391 |  | 
|---|
| 392 | static inline struct page * __pure pgv_to_page(void *addr) | 
|---|
| 393 | { | 
|---|
| 394 | if (is_vmalloc_addr(x: addr)) | 
|---|
| 395 | return vmalloc_to_page(addr); | 
|---|
| 396 | return virt_to_page(addr); | 
|---|
| 397 | } | 
|---|
| 398 |  | 
|---|
| 399 | static void __packet_set_status(struct packet_sock *po, void *frame, int status) | 
|---|
| 400 | { | 
|---|
| 401 | union tpacket_uhdr h; | 
|---|
| 402 |  | 
|---|
| 403 | /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */ | 
|---|
| 404 |  | 
|---|
| 405 | h.raw = frame; | 
|---|
| 406 | switch (po->tp_version) { | 
|---|
| 407 | case TPACKET_V1: | 
|---|
| 408 | WRITE_ONCE(h.h1->tp_status, status); | 
|---|
| 409 | flush_dcache_page(page: pgv_to_page(addr: &h.h1->tp_status)); | 
|---|
| 410 | break; | 
|---|
| 411 | case TPACKET_V2: | 
|---|
| 412 | WRITE_ONCE(h.h2->tp_status, status); | 
|---|
| 413 | flush_dcache_page(page: pgv_to_page(addr: &h.h2->tp_status)); | 
|---|
| 414 | break; | 
|---|
| 415 | case TPACKET_V3: | 
|---|
| 416 | WRITE_ONCE(h.h3->tp_status, status); | 
|---|
| 417 | flush_dcache_page(page: pgv_to_page(addr: &h.h3->tp_status)); | 
|---|
| 418 | break; | 
|---|
| 419 | default: | 
|---|
| 420 | WARN(1, "TPACKET version not supported.\n"); | 
|---|
| 421 | BUG(); | 
|---|
| 422 | } | 
|---|
| 423 |  | 
|---|
| 424 | smp_wmb(); | 
|---|
| 425 | } | 
|---|
| 426 |  | 
|---|
| 427 | static int __packet_get_status(const struct packet_sock *po, void *frame) | 
|---|
| 428 | { | 
|---|
| 429 | union tpacket_uhdr h; | 
|---|
| 430 |  | 
|---|
| 431 | smp_rmb(); | 
|---|
| 432 |  | 
|---|
| 433 | /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */ | 
|---|
| 434 |  | 
|---|
| 435 | h.raw = frame; | 
|---|
| 436 | switch (po->tp_version) { | 
|---|
| 437 | case TPACKET_V1: | 
|---|
| 438 | flush_dcache_page(page: pgv_to_page(addr: &h.h1->tp_status)); | 
|---|
| 439 | return READ_ONCE(h.h1->tp_status); | 
|---|
| 440 | case TPACKET_V2: | 
|---|
| 441 | flush_dcache_page(page: pgv_to_page(addr: &h.h2->tp_status)); | 
|---|
| 442 | return READ_ONCE(h.h2->tp_status); | 
|---|
| 443 | case TPACKET_V3: | 
|---|
| 444 | flush_dcache_page(page: pgv_to_page(addr: &h.h3->tp_status)); | 
|---|
| 445 | return READ_ONCE(h.h3->tp_status); | 
|---|
| 446 | default: | 
|---|
| 447 | WARN(1, "TPACKET version not supported.\n"); | 
|---|
| 448 | BUG(); | 
|---|
| 449 | return 0; | 
|---|
| 450 | } | 
|---|
| 451 | } | 
|---|
| 452 |  | 
|---|
| 453 | static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts, | 
|---|
| 454 | unsigned int flags) | 
|---|
| 455 | { | 
|---|
| 456 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | 
|---|
| 457 |  | 
|---|
| 458 | if (shhwtstamps && | 
|---|
| 459 | (flags & SOF_TIMESTAMPING_RAW_HARDWARE) && | 
|---|
| 460 | ktime_to_timespec64_cond(kt: shhwtstamps->hwtstamp, ts)) | 
|---|
| 461 | return TP_STATUS_TS_RAW_HARDWARE; | 
|---|
| 462 |  | 
|---|
| 463 | if ((flags & SOF_TIMESTAMPING_SOFTWARE) && | 
|---|
| 464 | ktime_to_timespec64_cond(kt: skb_tstamp(skb), ts)) | 
|---|
| 465 | return TP_STATUS_TS_SOFTWARE; | 
|---|
| 466 |  | 
|---|
| 467 | return 0; | 
|---|
| 468 | } | 
|---|
| 469 |  | 
|---|
| 470 | static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame, | 
|---|
| 471 | struct sk_buff *skb) | 
|---|
| 472 | { | 
|---|
| 473 | union tpacket_uhdr h; | 
|---|
| 474 | struct timespec64 ts; | 
|---|
| 475 | __u32 ts_status; | 
|---|
| 476 |  | 
|---|
| 477 | if (!(ts_status = tpacket_get_timestamp(skb, ts: &ts, READ_ONCE(po->tp_tstamp)))) | 
|---|
| 478 | return 0; | 
|---|
| 479 |  | 
|---|
| 480 | h.raw = frame; | 
|---|
| 481 | /* | 
|---|
| 482 | * versions 1 through 3 overflow the timestamps in y2106, since they | 
|---|
| 483 | * all store the seconds in a 32-bit unsigned integer. | 
|---|
| 484 | * If we create a version 4, that should have a 64-bit timestamp, | 
|---|
| 485 | * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit | 
|---|
| 486 | * nanoseconds. | 
|---|
| 487 | */ | 
|---|
| 488 | switch (po->tp_version) { | 
|---|
| 489 | case TPACKET_V1: | 
|---|
| 490 | h.h1->tp_sec = ts.tv_sec; | 
|---|
| 491 | h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; | 
|---|
| 492 | break; | 
|---|
| 493 | case TPACKET_V2: | 
|---|
| 494 | h.h2->tp_sec = ts.tv_sec; | 
|---|
| 495 | h.h2->tp_nsec = ts.tv_nsec; | 
|---|
| 496 | break; | 
|---|
| 497 | case TPACKET_V3: | 
|---|
| 498 | h.h3->tp_sec = ts.tv_sec; | 
|---|
| 499 | h.h3->tp_nsec = ts.tv_nsec; | 
|---|
| 500 | break; | 
|---|
| 501 | default: | 
|---|
| 502 | WARN(1, "TPACKET version not supported.\n"); | 
|---|
| 503 | BUG(); | 
|---|
| 504 | } | 
|---|
| 505 |  | 
|---|
| 506 | /* one flush is safe, as both fields always lie on the same cacheline */ | 
|---|
| 507 | flush_dcache_page(page: pgv_to_page(addr: &h.h1->tp_sec)); | 
|---|
| 508 | smp_wmb(); | 
|---|
| 509 |  | 
|---|
| 510 | return ts_status; | 
|---|
| 511 | } | 
|---|
| 512 |  | 
|---|
| 513 | static void *packet_lookup_frame(const struct packet_sock *po, | 
|---|
| 514 | const struct packet_ring_buffer *rb, | 
|---|
| 515 | unsigned int position, | 
|---|
| 516 | int status) | 
|---|
| 517 | { | 
|---|
| 518 | unsigned int pg_vec_pos, frame_offset; | 
|---|
| 519 | union tpacket_uhdr h; | 
|---|
| 520 |  | 
|---|
| 521 | pg_vec_pos = position / rb->frames_per_block; | 
|---|
| 522 | frame_offset = position % rb->frames_per_block; | 
|---|
| 523 |  | 
|---|
| 524 | h.raw = rb->pg_vec[pg_vec_pos].buffer + | 
|---|
| 525 | (frame_offset * rb->frame_size); | 
|---|
| 526 |  | 
|---|
| 527 | if (status != __packet_get_status(po, frame: h.raw)) | 
|---|
| 528 | return NULL; | 
|---|
| 529 |  | 
|---|
| 530 | return h.raw; | 
|---|
| 531 | } | 
|---|
| 532 |  | 
|---|
| 533 | static void *packet_current_frame(struct packet_sock *po, | 
|---|
| 534 | struct packet_ring_buffer *rb, | 
|---|
| 535 | int status) | 
|---|
| 536 | { | 
|---|
| 537 | return packet_lookup_frame(po, rb, position: rb->head, status); | 
|---|
| 538 | } | 
|---|
| 539 |  | 
|---|
| 540 | static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev) | 
|---|
| 541 | { | 
|---|
| 542 | struct vlan_hdr vhdr, *vh; | 
|---|
| 543 | unsigned int ; | 
|---|
| 544 |  | 
|---|
| 545 | if (!dev) | 
|---|
| 546 | return 0; | 
|---|
| 547 |  | 
|---|
| 548 | /* In the SOCK_DGRAM scenario, skb data starts at the network | 
|---|
| 549 | * protocol, which is after the VLAN headers. The outer VLAN | 
|---|
| 550 | * header is at the hard_header_len offset in non-variable | 
|---|
| 551 | * length link layer headers. If it's a VLAN device, the | 
|---|
| 552 | * min_header_len should be used to exclude the VLAN header | 
|---|
| 553 | * size. | 
|---|
| 554 | */ | 
|---|
| 555 | if (dev->min_header_len == dev->hard_header_len) | 
|---|
| 556 | header_len = dev->hard_header_len; | 
|---|
| 557 | else if (is_vlan_dev(dev)) | 
|---|
| 558 | header_len = dev->min_header_len; | 
|---|
| 559 | else | 
|---|
| 560 | return 0; | 
|---|
| 561 |  | 
|---|
| 562 | vh = skb_header_pointer(skb, offset: skb_mac_offset(skb) + header_len, | 
|---|
| 563 | len: sizeof(vhdr), buffer: &vhdr); | 
|---|
| 564 | if (unlikely(!vh)) | 
|---|
| 565 | return 0; | 
|---|
| 566 |  | 
|---|
| 567 | return ntohs(vh->h_vlan_TCI); | 
|---|
| 568 | } | 
|---|
| 569 |  | 
|---|
| 570 | static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb) | 
|---|
| 571 | { | 
|---|
| 572 | __be16 proto = skb->protocol; | 
|---|
| 573 |  | 
|---|
| 574 | if (unlikely(eth_type_vlan(proto))) | 
|---|
| 575 | proto = __vlan_get_protocol_offset(skb, type: proto, | 
|---|
| 576 | mac_offset: skb_mac_offset(skb), NULL); | 
|---|
| 577 |  | 
|---|
| 578 | return proto; | 
|---|
| 579 | } | 
|---|
| 580 |  | 
|---|
| 581 | static void prb_shutdown_retire_blk_timer(struct packet_sock *po, | 
|---|
| 582 | struct sk_buff_head *rb_queue) | 
|---|
| 583 | { | 
|---|
| 584 | struct tpacket_kbdq_core *pkc; | 
|---|
| 585 |  | 
|---|
| 586 | pkc = GET_PBDQC_FROM_RB(&po->rx_ring); | 
|---|
| 587 | hrtimer_cancel(timer: &pkc->retire_blk_timer); | 
|---|
| 588 | } | 
|---|
| 589 |  | 
|---|
| 590 | static int prb_calc_retire_blk_tmo(struct packet_sock *po, | 
|---|
| 591 | int blk_size_in_bytes) | 
|---|
| 592 | { | 
|---|
| 593 | struct net_device *dev; | 
|---|
| 594 | unsigned int mbits, div; | 
|---|
| 595 | struct ethtool_link_ksettings ecmd; | 
|---|
| 596 | int err; | 
|---|
| 597 |  | 
|---|
| 598 | rtnl_lock(); | 
|---|
| 599 | dev = __dev_get_by_index(net: sock_net(sk: &po->sk), ifindex: po->ifindex); | 
|---|
| 600 | if (unlikely(!dev)) { | 
|---|
| 601 | rtnl_unlock(); | 
|---|
| 602 | return DEFAULT_PRB_RETIRE_TOV; | 
|---|
| 603 | } | 
|---|
| 604 | err = __ethtool_get_link_ksettings(dev, link_ksettings: &ecmd); | 
|---|
| 605 | rtnl_unlock(); | 
|---|
| 606 | if (err) | 
|---|
| 607 | return DEFAULT_PRB_RETIRE_TOV; | 
|---|
| 608 |  | 
|---|
| 609 | /* If the link speed is so slow you don't really | 
|---|
| 610 | * need to worry about perf anyways | 
|---|
| 611 | */ | 
|---|
| 612 | if (ecmd.base.speed < SPEED_1000 || | 
|---|
| 613 | ecmd.base.speed == SPEED_UNKNOWN) | 
|---|
| 614 | return DEFAULT_PRB_RETIRE_TOV; | 
|---|
| 615 |  | 
|---|
| 616 | div = ecmd.base.speed / 1000; | 
|---|
| 617 | mbits = (blk_size_in_bytes * 8) / (1024 * 1024); | 
|---|
| 618 |  | 
|---|
| 619 | if (div) | 
|---|
| 620 | mbits /= div; | 
|---|
| 621 |  | 
|---|
| 622 | if (div) | 
|---|
| 623 | return mbits + 1; | 
|---|
| 624 | return mbits; | 
|---|
| 625 | } | 
|---|
| 626 |  | 
|---|
| 627 | static void prb_init_ft_ops(struct tpacket_kbdq_core *p1, | 
|---|
| 628 | union tpacket_req_u *req_u) | 
|---|
| 629 | { | 
|---|
| 630 | p1->feature_req_word = req_u->req3.tp_feature_req_word; | 
|---|
| 631 | } | 
|---|
| 632 |  | 
|---|
| 633 | static void init_prb_bdqc(struct packet_sock *po, | 
|---|
| 634 | struct packet_ring_buffer *rb, | 
|---|
| 635 | struct pgv *pg_vec, | 
|---|
| 636 | union tpacket_req_u *req_u) | 
|---|
| 637 | { | 
|---|
| 638 | struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb); | 
|---|
| 639 | struct tpacket_block_desc *pbd; | 
|---|
| 640 |  | 
|---|
| 641 | memset(s: p1, c: 0x0, n: sizeof(*p1)); | 
|---|
| 642 |  | 
|---|
| 643 | p1->knxt_seq_num = 1; | 
|---|
| 644 | p1->pkbdq = pg_vec; | 
|---|
| 645 | pbd = (struct tpacket_block_desc *)pg_vec[0].buffer; | 
|---|
| 646 | p1->pkblk_start	= pg_vec[0].buffer; | 
|---|
| 647 | p1->kblk_size = req_u->req3.tp_block_size; | 
|---|
| 648 | p1->knum_blocks	= req_u->req3.tp_block_nr; | 
|---|
| 649 | p1->hdrlen = po->tp_hdrlen; | 
|---|
| 650 | p1->version = po->tp_version; | 
|---|
| 651 | po->stats.stats3.tp_freeze_q_cnt = 0; | 
|---|
| 652 | if (req_u->req3.tp_retire_blk_tov) | 
|---|
| 653 | p1->interval_ktime = ms_to_ktime(ms: req_u->req3.tp_retire_blk_tov); | 
|---|
| 654 | else | 
|---|
| 655 | p1->interval_ktime = ms_to_ktime(ms: prb_calc_retire_blk_tmo(po, | 
|---|
| 656 | blk_size_in_bytes: req_u->req3.tp_block_size)); | 
|---|
| 657 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; | 
|---|
| 658 | rwlock_init(&p1->blk_fill_in_prog_lock); | 
|---|
| 659 |  | 
|---|
| 660 | p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); | 
|---|
| 661 | prb_init_ft_ops(p1, req_u); | 
|---|
| 662 | hrtimer_setup(timer: &p1->retire_blk_timer, function: prb_retire_rx_blk_timer_expired, | 
|---|
| 663 | CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL_SOFT); | 
|---|
| 664 | hrtimer_start(timer: &p1->retire_blk_timer, tim: p1->interval_ktime, | 
|---|
| 665 | mode: HRTIMER_MODE_REL_SOFT); | 
|---|
| 666 | prb_open_block(p1, pbd); | 
|---|
| 667 | } | 
|---|
| 668 |  | 
|---|
| 669 | /* | 
|---|
| 670 | * With a 1MB block-size, on a 1Gbps line, it will take | 
|---|
| 671 | * i) ~8 ms to fill a block + ii) memcpy etc. | 
|---|
| 672 | * In this cut we are not accounting for the memcpy time. | 
|---|
| 673 | * | 
|---|
| 674 | * Since the tmo granularity is in msecs, it is not too expensive | 
|---|
| 675 | * to refresh the timer, lets say every '8' msecs. | 
|---|
| 676 | * Either the user can set the 'tmo' or we can derive it based on | 
|---|
| 677 | * a) line-speed and b) block-size. | 
|---|
| 678 | * prb_calc_retire_blk_tmo() calculates the tmo. | 
|---|
| 679 | */ | 
|---|
| 680 | static enum hrtimer_restart prb_retire_rx_blk_timer_expired(struct hrtimer *t) | 
|---|
| 681 | { | 
|---|
| 682 | struct packet_sock *po = | 
|---|
| 683 | timer_container_of(po, t, rx_ring.prb_bdqc.retire_blk_timer); | 
|---|
| 684 | struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring); | 
|---|
| 685 | unsigned int frozen; | 
|---|
| 686 | struct tpacket_block_desc *pbd; | 
|---|
| 687 |  | 
|---|
| 688 | spin_lock(lock: &po->sk.sk_receive_queue.lock); | 
|---|
| 689 |  | 
|---|
| 690 | frozen = prb_queue_frozen(pkc); | 
|---|
| 691 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | 
|---|
| 692 |  | 
|---|
| 693 | /* We only need to plug the race when the block is partially filled. | 
|---|
| 694 | * tpacket_rcv: | 
|---|
| 695 | *		lock(); increment BLOCK_NUM_PKTS; unlock() | 
|---|
| 696 | *		copy_bits() is in progress ... | 
|---|
| 697 | *		timer fires on other cpu: | 
|---|
| 698 | *		we can't retire the current block because copy_bits | 
|---|
| 699 | *		is in progress. | 
|---|
| 700 | * | 
|---|
| 701 | */ | 
|---|
| 702 | if (BLOCK_NUM_PKTS(pbd)) { | 
|---|
| 703 | /* Waiting for skb_copy_bits to finish... */ | 
|---|
| 704 | write_lock(&pkc->blk_fill_in_prog_lock); | 
|---|
| 705 | write_unlock(&pkc->blk_fill_in_prog_lock); | 
|---|
| 706 | } | 
|---|
| 707 |  | 
|---|
| 708 | if (!frozen) { | 
|---|
| 709 | if (BLOCK_NUM_PKTS(pbd)) { | 
|---|
| 710 | /* Not an empty block. Need retire the block. */ | 
|---|
| 711 | prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); | 
|---|
| 712 | prb_dispatch_next_block(pkc, po); | 
|---|
| 713 | } | 
|---|
| 714 | } else { | 
|---|
| 715 | /* Case 1. Queue was frozen because user-space was | 
|---|
| 716 | * lagging behind. | 
|---|
| 717 | */ | 
|---|
| 718 | if (!prb_curr_blk_in_use(pbd)) { | 
|---|
| 719 | /* Case 2. queue was frozen,user-space caught up, | 
|---|
| 720 | * now the link went idle && the timer fired. | 
|---|
| 721 | * We don't have a block to close.So we open this | 
|---|
| 722 | * block and restart the timer. | 
|---|
| 723 | * opening a block thaws the queue,restarts timer | 
|---|
| 724 | * Thawing/timer-refresh is a side effect. | 
|---|
| 725 | */ | 
|---|
| 726 | prb_open_block(pkc, pbd); | 
|---|
| 727 | } | 
|---|
| 728 | } | 
|---|
| 729 |  | 
|---|
| 730 | hrtimer_forward_now(timer: &pkc->retire_blk_timer, interval: pkc->interval_ktime); | 
|---|
| 731 | spin_unlock(lock: &po->sk.sk_receive_queue.lock); | 
|---|
| 732 | return HRTIMER_RESTART; | 
|---|
| 733 | } | 
|---|
| 734 |  | 
|---|
| 735 | static void prb_flush_block(struct tpacket_kbdq_core *pkc1, | 
|---|
| 736 | struct tpacket_block_desc *pbd1, __u32 status) | 
|---|
| 737 | { | 
|---|
| 738 | /* Flush everything minus the block header */ | 
|---|
| 739 |  | 
|---|
| 740 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 | 
|---|
| 741 | u8 *start, *end; | 
|---|
| 742 |  | 
|---|
| 743 | start = (u8 *)pbd1; | 
|---|
| 744 |  | 
|---|
| 745 | /* Skip the block header(we know header WILL fit in 4K) */ | 
|---|
| 746 | start += PAGE_SIZE; | 
|---|
| 747 |  | 
|---|
| 748 | end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end); | 
|---|
| 749 | for (; start < end; start += PAGE_SIZE) | 
|---|
| 750 | flush_dcache_page(pgv_to_page(start)); | 
|---|
| 751 |  | 
|---|
| 752 | smp_wmb(); | 
|---|
| 753 | #endif | 
|---|
| 754 |  | 
|---|
| 755 | /* Now update the block status. */ | 
|---|
| 756 |  | 
|---|
| 757 | BLOCK_STATUS(pbd1) = status; | 
|---|
| 758 |  | 
|---|
| 759 | /* Flush the block header */ | 
|---|
| 760 |  | 
|---|
| 761 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 | 
|---|
| 762 | start = (u8 *)pbd1; | 
|---|
| 763 | flush_dcache_page(pgv_to_page(start)); | 
|---|
| 764 |  | 
|---|
| 765 | smp_wmb(); | 
|---|
| 766 | #endif | 
|---|
| 767 | } | 
|---|
| 768 |  | 
|---|
| 769 | /* | 
|---|
| 770 | * Side effect: | 
|---|
| 771 | * | 
|---|
| 772 | * 1) flush the block | 
|---|
| 773 | * 2) Increment active_blk_num | 
|---|
| 774 | * | 
|---|
| 775 | * Note:We DONT refresh the timer on purpose. | 
|---|
| 776 | *	Because almost always the next block will be opened. | 
|---|
| 777 | */ | 
|---|
| 778 | static void prb_close_block(struct tpacket_kbdq_core *pkc1, | 
|---|
| 779 | struct tpacket_block_desc *pbd1, | 
|---|
| 780 | struct packet_sock *po, unsigned int stat) | 
|---|
| 781 | { | 
|---|
| 782 | __u32 status = TP_STATUS_USER | stat; | 
|---|
| 783 |  | 
|---|
| 784 | struct tpacket3_hdr *last_pkt; | 
|---|
| 785 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; | 
|---|
| 786 | struct sock *sk = &po->sk; | 
|---|
| 787 |  | 
|---|
| 788 | if (atomic_read(v: &po->tp_drops)) | 
|---|
| 789 | status |= TP_STATUS_LOSING; | 
|---|
| 790 |  | 
|---|
| 791 | last_pkt = (struct tpacket3_hdr *)pkc1->prev; | 
|---|
| 792 | last_pkt->tp_next_offset = 0; | 
|---|
| 793 |  | 
|---|
| 794 | /* Get the ts of the last pkt */ | 
|---|
| 795 | if (BLOCK_NUM_PKTS(pbd1)) { | 
|---|
| 796 | h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; | 
|---|
| 797 | h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec; | 
|---|
| 798 | } else { | 
|---|
| 799 | /* Ok, we tmo'd - so get the current time. | 
|---|
| 800 | * | 
|---|
| 801 | * It shouldn't really happen as we don't close empty | 
|---|
| 802 | * blocks. See prb_retire_rx_blk_timer_expired(). | 
|---|
| 803 | */ | 
|---|
| 804 | struct timespec64 ts; | 
|---|
| 805 | ktime_get_real_ts64(tv: &ts); | 
|---|
| 806 | h1->ts_last_pkt.ts_sec = ts.tv_sec; | 
|---|
| 807 | h1->ts_last_pkt.ts_nsec	= ts.tv_nsec; | 
|---|
| 808 | } | 
|---|
| 809 |  | 
|---|
| 810 | smp_wmb(); | 
|---|
| 811 |  | 
|---|
| 812 | /* Flush the block */ | 
|---|
| 813 | prb_flush_block(pkc1, pbd1, status); | 
|---|
| 814 |  | 
|---|
| 815 | sk->sk_data_ready(sk); | 
|---|
| 816 |  | 
|---|
| 817 | pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); | 
|---|
| 818 | } | 
|---|
| 819 |  | 
|---|
| 820 | static void prb_thaw_queue(struct tpacket_kbdq_core *pkc) | 
|---|
| 821 | { | 
|---|
| 822 | pkc->reset_pending_on_curr_blk = 0; | 
|---|
| 823 | } | 
|---|
| 824 |  | 
|---|
| 825 | /* | 
|---|
| 826 | * prb_open_block is called by tpacket_rcv or timer callback. | 
|---|
| 827 | * | 
|---|
| 828 | * Reasons why NOT update hrtimer in prb_open_block: | 
|---|
| 829 | * 1) It will increase complexity to distinguish the two caller scenario. | 
|---|
| 830 | * 2) hrtimer_cancel and hrtimer_start need to be called if you want to update | 
|---|
| 831 | * TMO of an already enqueued hrtimer, leading to complex shutdown logic. | 
|---|
| 832 | * | 
|---|
| 833 | * One side effect of NOT update hrtimer when called by tpacket_rcv is that | 
|---|
| 834 | * a newly opened block triggered by tpacket_rcv may be retired earlier than | 
|---|
| 835 | * expected. On the other hand, if timeout is updated in prb_open_block, the | 
|---|
| 836 | * frequent reception of network packets that leads to prb_open_block being | 
|---|
| 837 | * called may cause hrtimer to be removed and enqueued repeatedly. | 
|---|
| 838 | */ | 
|---|
| 839 | static void prb_open_block(struct tpacket_kbdq_core *pkc1, | 
|---|
| 840 | struct tpacket_block_desc *pbd1) | 
|---|
| 841 | { | 
|---|
| 842 | struct timespec64 ts; | 
|---|
| 843 | struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; | 
|---|
| 844 |  | 
|---|
| 845 | smp_rmb(); | 
|---|
| 846 |  | 
|---|
| 847 | /* We could have just memset this but we will lose the | 
|---|
| 848 | * flexibility of making the priv area sticky | 
|---|
| 849 | */ | 
|---|
| 850 |  | 
|---|
| 851 | BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; | 
|---|
| 852 | BLOCK_NUM_PKTS(pbd1) = 0; | 
|---|
| 853 | BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | 
|---|
| 854 |  | 
|---|
| 855 | ktime_get_real_ts64(tv: &ts); | 
|---|
| 856 |  | 
|---|
| 857 | h1->ts_first_pkt.ts_sec = ts.tv_sec; | 
|---|
| 858 | h1->ts_first_pkt.ts_nsec = ts.tv_nsec; | 
|---|
| 859 |  | 
|---|
| 860 | pkc1->pkblk_start = (char *)pbd1; | 
|---|
| 861 | pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | 
|---|
| 862 |  | 
|---|
| 863 | BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); | 
|---|
| 864 | BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; | 
|---|
| 865 |  | 
|---|
| 866 | pbd1->version = pkc1->version; | 
|---|
| 867 | pkc1->prev = pkc1->nxt_offset; | 
|---|
| 868 | pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; | 
|---|
| 869 |  | 
|---|
| 870 | prb_thaw_queue(pkc: pkc1); | 
|---|
| 871 |  | 
|---|
| 872 | smp_wmb(); | 
|---|
| 873 | } | 
|---|
| 874 |  | 
|---|
| 875 | /* | 
|---|
| 876 | * Queue freeze logic: | 
|---|
| 877 | * 1) Assume tp_block_nr = 8 blocks. | 
|---|
| 878 | * 2) At time 't0', user opens Rx ring. | 
|---|
| 879 | * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7 | 
|---|
| 880 | * 4) user-space is either sleeping or processing block '0'. | 
|---|
| 881 | * 5) tpacket_rcv is currently filling block '7', since there is no space left, | 
|---|
| 882 | *    it will close block-7,loop around and try to fill block '0'. | 
|---|
| 883 | *    call-flow: | 
|---|
| 884 | *    __packet_lookup_frame_in_block | 
|---|
| 885 | *      prb_retire_current_block() | 
|---|
| 886 | *      prb_dispatch_next_block() | 
|---|
| 887 | *        |->(BLOCK_STATUS == USER) evaluates to true | 
|---|
| 888 | *    5.1) Since block-0 is currently in-use, we just freeze the queue. | 
|---|
| 889 | * 6) Now there are two cases: | 
|---|
| 890 | *    6.1) Link goes idle right after the queue is frozen. | 
|---|
| 891 | *         But remember, the last open_block() refreshed the timer. | 
|---|
| 892 | *         When this timer expires,it will refresh itself so that we can | 
|---|
| 893 | *         re-open block-0 in near future. | 
|---|
| 894 | *    6.2) Link is busy and keeps on receiving packets. This is a simple | 
|---|
| 895 | *         case and __packet_lookup_frame_in_block will check if block-0 | 
|---|
| 896 | *         is free and can now be re-used. | 
|---|
| 897 | */ | 
|---|
| 898 | static void prb_freeze_queue(struct tpacket_kbdq_core *pkc, | 
|---|
| 899 | struct packet_sock *po) | 
|---|
| 900 | { | 
|---|
| 901 | pkc->reset_pending_on_curr_blk = 1; | 
|---|
| 902 | po->stats.stats3.tp_freeze_q_cnt++; | 
|---|
| 903 | } | 
|---|
| 904 |  | 
|---|
| 905 | #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT)) | 
|---|
| 906 |  | 
|---|
| 907 | /* | 
|---|
| 908 | * If the next block is free then we will dispatch it | 
|---|
| 909 | * and return a good offset. | 
|---|
| 910 | * Else, we will freeze the queue. | 
|---|
| 911 | * So, caller must check the return value. | 
|---|
| 912 | */ | 
|---|
| 913 | static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc, | 
|---|
| 914 | struct packet_sock *po) | 
|---|
| 915 | { | 
|---|
| 916 | struct tpacket_block_desc *pbd; | 
|---|
| 917 |  | 
|---|
| 918 | smp_rmb(); | 
|---|
| 919 |  | 
|---|
| 920 | /* 1. Get current block num */ | 
|---|
| 921 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | 
|---|
| 922 |  | 
|---|
| 923 | /* 2. If this block is currently in_use then freeze the queue */ | 
|---|
| 924 | if (TP_STATUS_USER & BLOCK_STATUS(pbd)) { | 
|---|
| 925 | prb_freeze_queue(pkc, po); | 
|---|
| 926 | return NULL; | 
|---|
| 927 | } | 
|---|
| 928 |  | 
|---|
| 929 | /* | 
|---|
| 930 | * 3. | 
|---|
| 931 | * open this block and return the offset where the first packet | 
|---|
| 932 | * needs to get stored. | 
|---|
| 933 | */ | 
|---|
| 934 | prb_open_block(pkc1: pkc, pbd1: pbd); | 
|---|
| 935 | return (void *)pkc->nxt_offset; | 
|---|
| 936 | } | 
|---|
| 937 |  | 
|---|
| 938 | static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, | 
|---|
| 939 | struct packet_sock *po, unsigned int status) | 
|---|
| 940 | { | 
|---|
| 941 | struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | 
|---|
| 942 |  | 
|---|
| 943 | /* retire/close the current block */ | 
|---|
| 944 | if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) { | 
|---|
| 945 | /* | 
|---|
| 946 | * Plug the case where copy_bits() is in progress on | 
|---|
| 947 | * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't | 
|---|
| 948 | * have space to copy the pkt in the current block and | 
|---|
| 949 | * called prb_retire_current_block() | 
|---|
| 950 | * | 
|---|
| 951 | * We don't need to worry about the TMO case because | 
|---|
| 952 | * the timer-handler already handled this case. | 
|---|
| 953 | */ | 
|---|
| 954 | if (!(status & TP_STATUS_BLK_TMO)) { | 
|---|
| 955 | /* Waiting for skb_copy_bits to finish... */ | 
|---|
| 956 | write_lock(&pkc->blk_fill_in_prog_lock); | 
|---|
| 957 | write_unlock(&pkc->blk_fill_in_prog_lock); | 
|---|
| 958 | } | 
|---|
| 959 | prb_close_block(pkc1: pkc, pbd1: pbd, po, stat: status); | 
|---|
| 960 | return; | 
|---|
| 961 | } | 
|---|
| 962 | } | 
|---|
| 963 |  | 
|---|
| 964 | static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd) | 
|---|
| 965 | { | 
|---|
| 966 | return TP_STATUS_USER & BLOCK_STATUS(pbd); | 
|---|
| 967 | } | 
|---|
| 968 |  | 
|---|
| 969 | static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) | 
|---|
| 970 | { | 
|---|
| 971 | return pkc->reset_pending_on_curr_blk; | 
|---|
| 972 | } | 
|---|
| 973 |  | 
|---|
| 974 | static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) | 
|---|
| 975 | __releases(&pkc->blk_fill_in_prog_lock) | 
|---|
| 976 | { | 
|---|
| 977 | struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb); | 
|---|
| 978 |  | 
|---|
| 979 | read_unlock(&pkc->blk_fill_in_prog_lock); | 
|---|
| 980 | } | 
|---|
| 981 |  | 
|---|
| 982 | static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, | 
|---|
| 983 | struct tpacket3_hdr *ppd) | 
|---|
| 984 | { | 
|---|
| 985 | ppd->hv1.tp_rxhash = skb_get_hash(skb: pkc->skb); | 
|---|
| 986 | } | 
|---|
| 987 |  | 
|---|
| 988 | static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, | 
|---|
| 989 | struct tpacket3_hdr *ppd) | 
|---|
| 990 | { | 
|---|
| 991 | ppd->hv1.tp_rxhash = 0; | 
|---|
| 992 | } | 
|---|
| 993 |  | 
|---|
| 994 | static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, | 
|---|
| 995 | struct tpacket3_hdr *ppd) | 
|---|
| 996 | { | 
|---|
| 997 | struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc); | 
|---|
| 998 |  | 
|---|
| 999 | if (skb_vlan_tag_present(pkc->skb)) { | 
|---|
| 1000 | ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); | 
|---|
| 1001 | ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); | 
|---|
| 1002 | ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | 
|---|
| 1003 | } else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) { | 
|---|
| 1004 | ppd->hv1.tp_vlan_tci = vlan_get_tci(skb: pkc->skb, dev: pkc->skb->dev); | 
|---|
| 1005 | ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol); | 
|---|
| 1006 | ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | 
|---|
| 1007 | } else { | 
|---|
| 1008 | ppd->hv1.tp_vlan_tci = 0; | 
|---|
| 1009 | ppd->hv1.tp_vlan_tpid = 0; | 
|---|
| 1010 | ppd->tp_status = TP_STATUS_AVAILABLE; | 
|---|
| 1011 | } | 
|---|
| 1012 | } | 
|---|
| 1013 |  | 
|---|
| 1014 | static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc, | 
|---|
| 1015 | struct tpacket3_hdr *ppd) | 
|---|
| 1016 | { | 
|---|
| 1017 | ppd->hv1.tp_padding = 0; | 
|---|
| 1018 | prb_fill_vlan_info(pkc, ppd); | 
|---|
| 1019 |  | 
|---|
| 1020 | if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH) | 
|---|
| 1021 | prb_fill_rxhash(pkc, ppd); | 
|---|
| 1022 | else | 
|---|
| 1023 | prb_clear_rxhash(pkc, ppd); | 
|---|
| 1024 | } | 
|---|
| 1025 |  | 
|---|
| 1026 | static void prb_fill_curr_block(char *curr, | 
|---|
| 1027 | struct tpacket_kbdq_core *pkc, | 
|---|
| 1028 | struct tpacket_block_desc *pbd, | 
|---|
| 1029 | unsigned int len) | 
|---|
| 1030 | __acquires(&pkc->blk_fill_in_prog_lock) | 
|---|
| 1031 | { | 
|---|
| 1032 | struct tpacket3_hdr *ppd; | 
|---|
| 1033 |  | 
|---|
| 1034 | ppd  = (struct tpacket3_hdr *)curr; | 
|---|
| 1035 | ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len); | 
|---|
| 1036 | pkc->prev = curr; | 
|---|
| 1037 | pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); | 
|---|
| 1038 | BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); | 
|---|
| 1039 | BLOCK_NUM_PKTS(pbd) += 1; | 
|---|
| 1040 | read_lock(&pkc->blk_fill_in_prog_lock); | 
|---|
| 1041 | prb_run_all_ft_ops(pkc, ppd); | 
|---|
| 1042 | } | 
|---|
| 1043 |  | 
|---|
| 1044 | /* Assumes caller has the sk->rx_queue.lock */ | 
|---|
| 1045 | static void *__packet_lookup_frame_in_block(struct packet_sock *po, | 
|---|
| 1046 | struct sk_buff *skb, | 
|---|
| 1047 | unsigned int len | 
|---|
| 1048 | ) | 
|---|
| 1049 | { | 
|---|
| 1050 | struct tpacket_kbdq_core *pkc; | 
|---|
| 1051 | struct tpacket_block_desc *pbd; | 
|---|
| 1052 | char *curr, *end; | 
|---|
| 1053 |  | 
|---|
| 1054 | pkc = GET_PBDQC_FROM_RB(&po->rx_ring); | 
|---|
| 1055 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | 
|---|
| 1056 |  | 
|---|
| 1057 | /* Queue is frozen when user space is lagging behind */ | 
|---|
| 1058 | if (prb_queue_frozen(pkc)) { | 
|---|
| 1059 | /* | 
|---|
| 1060 | * Check if that last block which caused the queue to freeze, | 
|---|
| 1061 | * is still in_use by user-space. | 
|---|
| 1062 | */ | 
|---|
| 1063 | if (prb_curr_blk_in_use(pbd)) { | 
|---|
| 1064 | /* Can't record this packet */ | 
|---|
| 1065 | return NULL; | 
|---|
| 1066 | } else { | 
|---|
| 1067 | /* | 
|---|
| 1068 | * Ok, the block was released by user-space. | 
|---|
| 1069 | * Now let's open that block. | 
|---|
| 1070 | * opening a block also thaws the queue. | 
|---|
| 1071 | * Thawing is a side effect. | 
|---|
| 1072 | */ | 
|---|
| 1073 | prb_open_block(pkc1: pkc, pbd1: pbd); | 
|---|
| 1074 | } | 
|---|
| 1075 | } | 
|---|
| 1076 |  | 
|---|
| 1077 | smp_mb(); | 
|---|
| 1078 | curr = pkc->nxt_offset; | 
|---|
| 1079 | pkc->skb = skb; | 
|---|
| 1080 | end = (char *)pbd + pkc->kblk_size; | 
|---|
| 1081 |  | 
|---|
| 1082 | /* first try the current block */ | 
|---|
| 1083 | if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { | 
|---|
| 1084 | prb_fill_curr_block(curr, pkc, pbd, len); | 
|---|
| 1085 | return (void *)curr; | 
|---|
| 1086 | } | 
|---|
| 1087 |  | 
|---|
| 1088 | /* Ok, close the current block */ | 
|---|
| 1089 | prb_retire_current_block(pkc, po, status: 0); | 
|---|
| 1090 |  | 
|---|
| 1091 | /* Now, try to dispatch the next block */ | 
|---|
| 1092 | curr = (char *)prb_dispatch_next_block(pkc, po); | 
|---|
| 1093 | if (curr) { | 
|---|
| 1094 | pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc); | 
|---|
| 1095 | prb_fill_curr_block(curr, pkc, pbd, len); | 
|---|
| 1096 | return (void *)curr; | 
|---|
| 1097 | } | 
|---|
| 1098 |  | 
|---|
| 1099 | /* | 
|---|
| 1100 | * No free blocks are available.user_space hasn't caught up yet. | 
|---|
| 1101 | * Queue was just frozen and now this packet will get dropped. | 
|---|
| 1102 | */ | 
|---|
| 1103 | return NULL; | 
|---|
| 1104 | } | 
|---|
| 1105 |  | 
|---|
| 1106 | static void *packet_current_rx_frame(struct packet_sock *po, | 
|---|
| 1107 | struct sk_buff *skb, | 
|---|
| 1108 | int status, unsigned int len) | 
|---|
| 1109 | { | 
|---|
| 1110 | char *curr = NULL; | 
|---|
| 1111 | switch (po->tp_version) { | 
|---|
| 1112 | case TPACKET_V1: | 
|---|
| 1113 | case TPACKET_V2: | 
|---|
| 1114 | curr = packet_lookup_frame(po, rb: &po->rx_ring, | 
|---|
| 1115 | position: po->rx_ring.head, status); | 
|---|
| 1116 | return curr; | 
|---|
| 1117 | case TPACKET_V3: | 
|---|
| 1118 | return __packet_lookup_frame_in_block(po, skb, len); | 
|---|
| 1119 | default: | 
|---|
| 1120 | WARN(1, "TPACKET version not supported\n"); | 
|---|
| 1121 | BUG(); | 
|---|
| 1122 | return NULL; | 
|---|
| 1123 | } | 
|---|
| 1124 | } | 
|---|
| 1125 |  | 
|---|
| 1126 | static void *prb_lookup_block(const struct packet_sock *po, | 
|---|
| 1127 | const struct packet_ring_buffer *rb, | 
|---|
| 1128 | unsigned int idx, | 
|---|
| 1129 | int status) | 
|---|
| 1130 | { | 
|---|
| 1131 | struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb); | 
|---|
| 1132 | struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx); | 
|---|
| 1133 |  | 
|---|
| 1134 | if (status != BLOCK_STATUS(pbd)) | 
|---|
| 1135 | return NULL; | 
|---|
| 1136 | return pbd; | 
|---|
| 1137 | } | 
|---|
| 1138 |  | 
|---|
| 1139 | static int prb_previous_blk_num(struct packet_ring_buffer *rb) | 
|---|
| 1140 | { | 
|---|
| 1141 | unsigned int prev; | 
|---|
| 1142 | if (rb->prb_bdqc.kactive_blk_num) | 
|---|
| 1143 | prev = rb->prb_bdqc.kactive_blk_num-1; | 
|---|
| 1144 | else | 
|---|
| 1145 | prev = rb->prb_bdqc.knum_blocks-1; | 
|---|
| 1146 | return prev; | 
|---|
| 1147 | } | 
|---|
| 1148 |  | 
|---|
| 1149 | /* Assumes caller has held the rx_queue.lock */ | 
|---|
| 1150 | static void *__prb_previous_block(struct packet_sock *po, | 
|---|
| 1151 | struct packet_ring_buffer *rb, | 
|---|
| 1152 | int status) | 
|---|
| 1153 | { | 
|---|
| 1154 | unsigned int previous = prb_previous_blk_num(rb); | 
|---|
| 1155 | return prb_lookup_block(po, rb, idx: previous, status); | 
|---|
| 1156 | } | 
|---|
| 1157 |  | 
|---|
| 1158 | static void *packet_previous_rx_frame(struct packet_sock *po, | 
|---|
| 1159 | struct packet_ring_buffer *rb, | 
|---|
| 1160 | int status) | 
|---|
| 1161 | { | 
|---|
| 1162 | if (po->tp_version <= TPACKET_V2) | 
|---|
| 1163 | return packet_previous_frame(po, rb, status); | 
|---|
| 1164 |  | 
|---|
| 1165 | return __prb_previous_block(po, rb, status); | 
|---|
| 1166 | } | 
|---|
| 1167 |  | 
|---|
| 1168 | static void packet_increment_rx_head(struct packet_sock *po, | 
|---|
| 1169 | struct packet_ring_buffer *rb) | 
|---|
| 1170 | { | 
|---|
| 1171 | switch (po->tp_version) { | 
|---|
| 1172 | case TPACKET_V1: | 
|---|
| 1173 | case TPACKET_V2: | 
|---|
| 1174 | return packet_increment_head(buff: rb); | 
|---|
| 1175 | case TPACKET_V3: | 
|---|
| 1176 | default: | 
|---|
| 1177 | WARN(1, "TPACKET version not supported.\n"); | 
|---|
| 1178 | BUG(); | 
|---|
| 1179 | return; | 
|---|
| 1180 | } | 
|---|
| 1181 | } | 
|---|
| 1182 |  | 
|---|
| 1183 | static void *packet_previous_frame(struct packet_sock *po, | 
|---|
| 1184 | struct packet_ring_buffer *rb, | 
|---|
| 1185 | int status) | 
|---|
| 1186 | { | 
|---|
| 1187 | unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; | 
|---|
| 1188 | return packet_lookup_frame(po, rb, position: previous, status); | 
|---|
| 1189 | } | 
|---|
| 1190 |  | 
|---|
| 1191 | static void packet_increment_head(struct packet_ring_buffer *buff) | 
|---|
| 1192 | { | 
|---|
| 1193 | buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; | 
|---|
| 1194 | } | 
|---|
| 1195 |  | 
|---|
| 1196 | static void packet_inc_pending(struct packet_ring_buffer *rb) | 
|---|
| 1197 | { | 
|---|
| 1198 | this_cpu_inc(*rb->pending_refcnt); | 
|---|
| 1199 | } | 
|---|
| 1200 |  | 
|---|
| 1201 | static void packet_dec_pending(struct packet_ring_buffer *rb) | 
|---|
| 1202 | { | 
|---|
| 1203 | this_cpu_dec(*rb->pending_refcnt); | 
|---|
| 1204 | } | 
|---|
| 1205 |  | 
|---|
| 1206 | static unsigned int packet_read_pending(const struct packet_ring_buffer *rb) | 
|---|
| 1207 | { | 
|---|
| 1208 | unsigned int refcnt = 0; | 
|---|
| 1209 | int cpu; | 
|---|
| 1210 |  | 
|---|
| 1211 | /* We don't use pending refcount in rx_ring. */ | 
|---|
| 1212 | if (rb->pending_refcnt == NULL) | 
|---|
| 1213 | return 0; | 
|---|
| 1214 |  | 
|---|
| 1215 | for_each_possible_cpu(cpu) | 
|---|
| 1216 | refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); | 
|---|
| 1217 |  | 
|---|
| 1218 | return refcnt; | 
|---|
| 1219 | } | 
|---|
| 1220 |  | 
|---|
| 1221 | static int packet_alloc_pending(struct packet_sock *po) | 
|---|
| 1222 | { | 
|---|
| 1223 | po->rx_ring.pending_refcnt = NULL; | 
|---|
| 1224 |  | 
|---|
| 1225 | po->tx_ring.pending_refcnt = alloc_percpu(unsigned int); | 
|---|
| 1226 | if (unlikely(po->tx_ring.pending_refcnt == NULL)) | 
|---|
| 1227 | return -ENOBUFS; | 
|---|
| 1228 |  | 
|---|
| 1229 | return 0; | 
|---|
| 1230 | } | 
|---|
| 1231 |  | 
|---|
| 1232 | static void packet_free_pending(struct packet_sock *po) | 
|---|
| 1233 | { | 
|---|
| 1234 | free_percpu(pdata: po->tx_ring.pending_refcnt); | 
|---|
| 1235 | } | 
|---|
| 1236 |  | 
|---|
| 1237 | #define ROOM_POW_OFF	2 | 
|---|
| 1238 | #define ROOM_NONE	0x0 | 
|---|
| 1239 | #define ROOM_LOW	0x1 | 
|---|
| 1240 | #define ROOM_NORMAL	0x2 | 
|---|
| 1241 |  | 
|---|
| 1242 | static bool __tpacket_has_room(const struct packet_sock *po, int pow_off) | 
|---|
| 1243 | { | 
|---|
| 1244 | int idx, len; | 
|---|
| 1245 |  | 
|---|
| 1246 | len = READ_ONCE(po->rx_ring.frame_max) + 1; | 
|---|
| 1247 | idx = READ_ONCE(po->rx_ring.head); | 
|---|
| 1248 | if (pow_off) | 
|---|
| 1249 | idx += len >> pow_off; | 
|---|
| 1250 | if (idx >= len) | 
|---|
| 1251 | idx -= len; | 
|---|
| 1252 | return packet_lookup_frame(po, rb: &po->rx_ring, position: idx, TP_STATUS_KERNEL); | 
|---|
| 1253 | } | 
|---|
| 1254 |  | 
|---|
| 1255 | static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off) | 
|---|
| 1256 | { | 
|---|
| 1257 | int idx, len; | 
|---|
| 1258 |  | 
|---|
| 1259 | len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks); | 
|---|
| 1260 | idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num); | 
|---|
| 1261 | if (pow_off) | 
|---|
| 1262 | idx += len >> pow_off; | 
|---|
| 1263 | if (idx >= len) | 
|---|
| 1264 | idx -= len; | 
|---|
| 1265 | return prb_lookup_block(po, rb: &po->rx_ring, idx, TP_STATUS_KERNEL); | 
|---|
| 1266 | } | 
|---|
| 1267 |  | 
|---|
| 1268 | static int __packet_rcv_has_room(const struct packet_sock *po, | 
|---|
| 1269 | const struct sk_buff *skb) | 
|---|
| 1270 | { | 
|---|
| 1271 | const struct sock *sk = &po->sk; | 
|---|
| 1272 | int ret = ROOM_NONE; | 
|---|
| 1273 |  | 
|---|
| 1274 | if (po->prot_hook.func != tpacket_rcv) { | 
|---|
| 1275 | int rcvbuf = READ_ONCE(sk->sk_rcvbuf); | 
|---|
| 1276 | int avail = rcvbuf - atomic_read(v: &sk->sk_rmem_alloc) | 
|---|
| 1277 | - (skb ? skb->truesize : 0); | 
|---|
| 1278 |  | 
|---|
| 1279 | if (avail > (rcvbuf >> ROOM_POW_OFF)) | 
|---|
| 1280 | return ROOM_NORMAL; | 
|---|
| 1281 | else if (avail > 0) | 
|---|
| 1282 | return ROOM_LOW; | 
|---|
| 1283 | else | 
|---|
| 1284 | return ROOM_NONE; | 
|---|
| 1285 | } | 
|---|
| 1286 |  | 
|---|
| 1287 | if (po->tp_version == TPACKET_V3) { | 
|---|
| 1288 | if (__tpacket_v3_has_room(po, ROOM_POW_OFF)) | 
|---|
| 1289 | ret = ROOM_NORMAL; | 
|---|
| 1290 | else if (__tpacket_v3_has_room(po, pow_off: 0)) | 
|---|
| 1291 | ret = ROOM_LOW; | 
|---|
| 1292 | } else { | 
|---|
| 1293 | if (__tpacket_has_room(po, ROOM_POW_OFF)) | 
|---|
| 1294 | ret = ROOM_NORMAL; | 
|---|
| 1295 | else if (__tpacket_has_room(po, pow_off: 0)) | 
|---|
| 1296 | ret = ROOM_LOW; | 
|---|
| 1297 | } | 
|---|
| 1298 |  | 
|---|
| 1299 | return ret; | 
|---|
| 1300 | } | 
|---|
| 1301 |  | 
|---|
| 1302 | static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb) | 
|---|
| 1303 | { | 
|---|
| 1304 | bool pressure; | 
|---|
| 1305 | int ret; | 
|---|
| 1306 |  | 
|---|
| 1307 | ret = __packet_rcv_has_room(po, skb); | 
|---|
| 1308 | pressure = ret != ROOM_NORMAL; | 
|---|
| 1309 |  | 
|---|
| 1310 | if (packet_sock_flag(po, flag: PACKET_SOCK_PRESSURE) != pressure) | 
|---|
| 1311 | packet_sock_flag_set(po, flag: PACKET_SOCK_PRESSURE, val: pressure); | 
|---|
| 1312 |  | 
|---|
| 1313 | return ret; | 
|---|
| 1314 | } | 
|---|
| 1315 |  | 
|---|
| 1316 | static void packet_rcv_try_clear_pressure(struct packet_sock *po) | 
|---|
| 1317 | { | 
|---|
| 1318 | if (packet_sock_flag(po, flag: PACKET_SOCK_PRESSURE) && | 
|---|
| 1319 | __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) | 
|---|
| 1320 | packet_sock_flag_set(po, flag: PACKET_SOCK_PRESSURE, val: false); | 
|---|
| 1321 | } | 
|---|
| 1322 |  | 
|---|
| 1323 | static void packet_sock_destruct(struct sock *sk) | 
|---|
| 1324 | { | 
|---|
| 1325 | skb_queue_purge(list: &sk->sk_error_queue); | 
|---|
| 1326 |  | 
|---|
| 1327 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); | 
|---|
| 1328 | WARN_ON(refcount_read(&sk->sk_wmem_alloc)); | 
|---|
| 1329 |  | 
|---|
| 1330 | if (!sock_flag(sk, flag: SOCK_DEAD)) { | 
|---|
| 1331 | pr_err( "Attempt to release alive packet socket: %p\n", sk); | 
|---|
| 1332 | return; | 
|---|
| 1333 | } | 
|---|
| 1334 | } | 
|---|
| 1335 |  | 
|---|
| 1336 | static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb) | 
|---|
| 1337 | { | 
|---|
| 1338 | u32 *history = po->rollover->history; | 
|---|
| 1339 | u32 victim, rxhash; | 
|---|
| 1340 | int i, count = 0; | 
|---|
| 1341 |  | 
|---|
| 1342 | rxhash = skb_get_hash(skb); | 
|---|
| 1343 | for (i = 0; i < ROLLOVER_HLEN; i++) | 
|---|
| 1344 | if (READ_ONCE(history[i]) == rxhash) | 
|---|
| 1345 | count++; | 
|---|
| 1346 |  | 
|---|
| 1347 | victim = get_random_u32_below(ROLLOVER_HLEN); | 
|---|
| 1348 |  | 
|---|
| 1349 | /* Avoid dirtying the cache line if possible */ | 
|---|
| 1350 | if (READ_ONCE(history[victim]) != rxhash) | 
|---|
| 1351 | WRITE_ONCE(history[victim], rxhash); | 
|---|
| 1352 |  | 
|---|
| 1353 | return count > (ROLLOVER_HLEN >> 1); | 
|---|
| 1354 | } | 
|---|
| 1355 |  | 
|---|
| 1356 | static unsigned int fanout_demux_hash(struct packet_fanout *f, | 
|---|
| 1357 | struct sk_buff *skb, | 
|---|
| 1358 | unsigned int num) | 
|---|
| 1359 | { | 
|---|
| 1360 | return reciprocal_scale(val: __skb_get_hash_symmetric(skb), ep_ro: num); | 
|---|
| 1361 | } | 
|---|
| 1362 |  | 
|---|
| 1363 | static unsigned int fanout_demux_lb(struct packet_fanout *f, | 
|---|
| 1364 | struct sk_buff *skb, | 
|---|
| 1365 | unsigned int num) | 
|---|
| 1366 | { | 
|---|
| 1367 | unsigned int val = atomic_inc_return(v: &f->rr_cur); | 
|---|
| 1368 |  | 
|---|
| 1369 | return val % num; | 
|---|
| 1370 | } | 
|---|
| 1371 |  | 
|---|
| 1372 | static unsigned int fanout_demux_cpu(struct packet_fanout *f, | 
|---|
| 1373 | struct sk_buff *skb, | 
|---|
| 1374 | unsigned int num) | 
|---|
| 1375 | { | 
|---|
| 1376 | return smp_processor_id() % num; | 
|---|
| 1377 | } | 
|---|
| 1378 |  | 
|---|
| 1379 | static unsigned int fanout_demux_rnd(struct packet_fanout *f, | 
|---|
| 1380 | struct sk_buff *skb, | 
|---|
| 1381 | unsigned int num) | 
|---|
| 1382 | { | 
|---|
| 1383 | return get_random_u32_below(ceil: num); | 
|---|
| 1384 | } | 
|---|
| 1385 |  | 
|---|
| 1386 | static unsigned int fanout_demux_rollover(struct packet_fanout *f, | 
|---|
| 1387 | struct sk_buff *skb, | 
|---|
| 1388 | unsigned int idx, bool try_self, | 
|---|
| 1389 | unsigned int num) | 
|---|
| 1390 | { | 
|---|
| 1391 | struct packet_sock *po, *po_next, *po_skip = NULL; | 
|---|
| 1392 | unsigned int i, j, room = ROOM_NONE; | 
|---|
| 1393 |  | 
|---|
| 1394 | po = pkt_sk(rcu_dereference(f->arr[idx])); | 
|---|
| 1395 |  | 
|---|
| 1396 | if (try_self) { | 
|---|
| 1397 | room = packet_rcv_has_room(po, skb); | 
|---|
| 1398 | if (room == ROOM_NORMAL || | 
|---|
| 1399 | (room == ROOM_LOW && !fanout_flow_is_huge(po, skb))) | 
|---|
| 1400 | return idx; | 
|---|
| 1401 | po_skip = po; | 
|---|
| 1402 | } | 
|---|
| 1403 |  | 
|---|
| 1404 | i = j = min_t(int, po->rollover->sock, num - 1); | 
|---|
| 1405 | do { | 
|---|
| 1406 | po_next = pkt_sk(rcu_dereference(f->arr[i])); | 
|---|
| 1407 | if (po_next != po_skip && | 
|---|
| 1408 | !packet_sock_flag(po: po_next, flag: PACKET_SOCK_PRESSURE) && | 
|---|
| 1409 | packet_rcv_has_room(po: po_next, skb) == ROOM_NORMAL) { | 
|---|
| 1410 | if (i != j) | 
|---|
| 1411 | po->rollover->sock = i; | 
|---|
| 1412 | atomic_long_inc(v: &po->rollover->num); | 
|---|
| 1413 | if (room == ROOM_LOW) | 
|---|
| 1414 | atomic_long_inc(v: &po->rollover->num_huge); | 
|---|
| 1415 | return i; | 
|---|
| 1416 | } | 
|---|
| 1417 |  | 
|---|
| 1418 | if (++i == num) | 
|---|
| 1419 | i = 0; | 
|---|
| 1420 | } while (i != j); | 
|---|
| 1421 |  | 
|---|
| 1422 | atomic_long_inc(v: &po->rollover->num_failed); | 
|---|
| 1423 | return idx; | 
|---|
| 1424 | } | 
|---|
| 1425 |  | 
|---|
| 1426 | static unsigned int fanout_demux_qm(struct packet_fanout *f, | 
|---|
| 1427 | struct sk_buff *skb, | 
|---|
| 1428 | unsigned int num) | 
|---|
| 1429 | { | 
|---|
| 1430 | return skb_get_queue_mapping(skb) % num; | 
|---|
| 1431 | } | 
|---|
| 1432 |  | 
|---|
| 1433 | static unsigned int fanout_demux_bpf(struct packet_fanout *f, | 
|---|
| 1434 | struct sk_buff *skb, | 
|---|
| 1435 | unsigned int num) | 
|---|
| 1436 | { | 
|---|
| 1437 | struct bpf_prog *prog; | 
|---|
| 1438 | unsigned int ret = 0; | 
|---|
| 1439 |  | 
|---|
| 1440 | rcu_read_lock(); | 
|---|
| 1441 | prog = rcu_dereference(f->bpf_prog); | 
|---|
| 1442 | if (prog) | 
|---|
| 1443 | ret = bpf_prog_run_clear_cb(prog, skb) % num; | 
|---|
| 1444 | rcu_read_unlock(); | 
|---|
| 1445 |  | 
|---|
| 1446 | return ret; | 
|---|
| 1447 | } | 
|---|
| 1448 |  | 
|---|
| 1449 | static bool fanout_has_flag(struct packet_fanout *f, u16 flag) | 
|---|
| 1450 | { | 
|---|
| 1451 | return f->flags & (flag >> 8); | 
|---|
| 1452 | } | 
|---|
| 1453 |  | 
|---|
| 1454 | static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, | 
|---|
| 1455 | struct packet_type *pt, struct net_device *orig_dev) | 
|---|
| 1456 | { | 
|---|
| 1457 | struct packet_fanout *f = pt->af_packet_priv; | 
|---|
| 1458 | unsigned int num = READ_ONCE(f->num_members); | 
|---|
| 1459 | struct net *net = read_pnet(pnet: &f->net); | 
|---|
| 1460 | struct packet_sock *po; | 
|---|
| 1461 | unsigned int idx; | 
|---|
| 1462 |  | 
|---|
| 1463 | if (!net_eq(net1: dev_net(dev), net2: net) || !num) { | 
|---|
| 1464 | kfree_skb(skb); | 
|---|
| 1465 | return 0; | 
|---|
| 1466 | } | 
|---|
| 1467 |  | 
|---|
| 1468 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { | 
|---|
| 1469 | skb = ip_check_defrag(net, skb, user: IP_DEFRAG_AF_PACKET); | 
|---|
| 1470 | if (!skb) | 
|---|
| 1471 | return 0; | 
|---|
| 1472 | } | 
|---|
| 1473 | switch (f->type) { | 
|---|
| 1474 | case PACKET_FANOUT_HASH: | 
|---|
| 1475 | default: | 
|---|
| 1476 | idx = fanout_demux_hash(f, skb, num); | 
|---|
| 1477 | break; | 
|---|
| 1478 | case PACKET_FANOUT_LB: | 
|---|
| 1479 | idx = fanout_demux_lb(f, skb, num); | 
|---|
| 1480 | break; | 
|---|
| 1481 | case PACKET_FANOUT_CPU: | 
|---|
| 1482 | idx = fanout_demux_cpu(f, skb, num); | 
|---|
| 1483 | break; | 
|---|
| 1484 | case PACKET_FANOUT_RND: | 
|---|
| 1485 | idx = fanout_demux_rnd(f, skb, num); | 
|---|
| 1486 | break; | 
|---|
| 1487 | case PACKET_FANOUT_QM: | 
|---|
| 1488 | idx = fanout_demux_qm(f, skb, num); | 
|---|
| 1489 | break; | 
|---|
| 1490 | case PACKET_FANOUT_ROLLOVER: | 
|---|
| 1491 | idx = fanout_demux_rollover(f, skb, idx: 0, try_self: false, num); | 
|---|
| 1492 | break; | 
|---|
| 1493 | case PACKET_FANOUT_CBPF: | 
|---|
| 1494 | case PACKET_FANOUT_EBPF: | 
|---|
| 1495 | idx = fanout_demux_bpf(f, skb, num); | 
|---|
| 1496 | break; | 
|---|
| 1497 | } | 
|---|
| 1498 |  | 
|---|
| 1499 | if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) | 
|---|
| 1500 | idx = fanout_demux_rollover(f, skb, idx, try_self: true, num); | 
|---|
| 1501 |  | 
|---|
| 1502 | po = pkt_sk(rcu_dereference(f->arr[idx])); | 
|---|
| 1503 | return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); | 
|---|
| 1504 | } | 
|---|
| 1505 |  | 
|---|
| 1506 | DEFINE_MUTEX(fanout_mutex); | 
|---|
| 1507 | EXPORT_SYMBOL_GPL(fanout_mutex); | 
|---|
| 1508 | static LIST_HEAD(fanout_list); | 
|---|
| 1509 | static u16 fanout_next_id; | 
|---|
| 1510 |  | 
|---|
| 1511 | static void __fanout_link(struct sock *sk, struct packet_sock *po) | 
|---|
| 1512 | { | 
|---|
| 1513 | struct packet_fanout *f = po->fanout; | 
|---|
| 1514 |  | 
|---|
| 1515 | spin_lock(lock: &f->lock); | 
|---|
| 1516 | rcu_assign_pointer(f->arr[f->num_members], sk); | 
|---|
| 1517 | smp_wmb(); | 
|---|
| 1518 | f->num_members++; | 
|---|
| 1519 | if (f->num_members == 1) | 
|---|
| 1520 | dev_add_pack(pt: &f->prot_hook); | 
|---|
| 1521 | spin_unlock(lock: &f->lock); | 
|---|
| 1522 | } | 
|---|
| 1523 |  | 
|---|
| 1524 | static void __fanout_unlink(struct sock *sk, struct packet_sock *po) | 
|---|
| 1525 | { | 
|---|
| 1526 | struct packet_fanout *f = po->fanout; | 
|---|
| 1527 | int i; | 
|---|
| 1528 |  | 
|---|
| 1529 | spin_lock(lock: &f->lock); | 
|---|
| 1530 | for (i = 0; i < f->num_members; i++) { | 
|---|
| 1531 | if (rcu_dereference_protected(f->arr[i], | 
|---|
| 1532 | lockdep_is_held(&f->lock)) == sk) | 
|---|
| 1533 | break; | 
|---|
| 1534 | } | 
|---|
| 1535 | BUG_ON(i >= f->num_members); | 
|---|
| 1536 | rcu_assign_pointer(f->arr[i], | 
|---|
| 1537 | rcu_dereference_protected(f->arr[f->num_members - 1], | 
|---|
| 1538 | lockdep_is_held(&f->lock))); | 
|---|
| 1539 | f->num_members--; | 
|---|
| 1540 | if (f->num_members == 0) | 
|---|
| 1541 | __dev_remove_pack(pt: &f->prot_hook); | 
|---|
| 1542 | spin_unlock(lock: &f->lock); | 
|---|
| 1543 | } | 
|---|
| 1544 |  | 
|---|
| 1545 | static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) | 
|---|
| 1546 | { | 
|---|
| 1547 | if (sk->sk_family != PF_PACKET) | 
|---|
| 1548 | return false; | 
|---|
| 1549 |  | 
|---|
| 1550 | return ptype->af_packet_priv == pkt_sk(sk)->fanout; | 
|---|
| 1551 | } | 
|---|
| 1552 |  | 
|---|
| 1553 | static void fanout_init_data(struct packet_fanout *f) | 
|---|
| 1554 | { | 
|---|
| 1555 | switch (f->type) { | 
|---|
| 1556 | case PACKET_FANOUT_LB: | 
|---|
| 1557 | atomic_set(v: &f->rr_cur, i: 0); | 
|---|
| 1558 | break; | 
|---|
| 1559 | case PACKET_FANOUT_CBPF: | 
|---|
| 1560 | case PACKET_FANOUT_EBPF: | 
|---|
| 1561 | RCU_INIT_POINTER(f->bpf_prog, NULL); | 
|---|
| 1562 | break; | 
|---|
| 1563 | } | 
|---|
| 1564 | } | 
|---|
| 1565 |  | 
|---|
| 1566 | static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) | 
|---|
| 1567 | { | 
|---|
| 1568 | struct bpf_prog *old; | 
|---|
| 1569 |  | 
|---|
| 1570 | spin_lock(lock: &f->lock); | 
|---|
| 1571 | old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); | 
|---|
| 1572 | rcu_assign_pointer(f->bpf_prog, new); | 
|---|
| 1573 | spin_unlock(lock: &f->lock); | 
|---|
| 1574 |  | 
|---|
| 1575 | if (old) { | 
|---|
| 1576 | synchronize_net(); | 
|---|
| 1577 | bpf_prog_destroy(fp: old); | 
|---|
| 1578 | } | 
|---|
| 1579 | } | 
|---|
| 1580 |  | 
|---|
| 1581 | static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data, | 
|---|
| 1582 | unsigned int len) | 
|---|
| 1583 | { | 
|---|
| 1584 | struct bpf_prog *new; | 
|---|
| 1585 | struct sock_fprog fprog; | 
|---|
| 1586 | int ret; | 
|---|
| 1587 |  | 
|---|
| 1588 | if (sock_flag(sk: &po->sk, flag: SOCK_FILTER_LOCKED)) | 
|---|
| 1589 | return -EPERM; | 
|---|
| 1590 |  | 
|---|
| 1591 | ret = copy_bpf_fprog_from_user(dst: &fprog, src: data, len); | 
|---|
| 1592 | if (ret) | 
|---|
| 1593 | return ret; | 
|---|
| 1594 |  | 
|---|
| 1595 | ret = bpf_prog_create_from_user(pfp: &new, fprog: &fprog, NULL, save_orig: false); | 
|---|
| 1596 | if (ret) | 
|---|
| 1597 | return ret; | 
|---|
| 1598 |  | 
|---|
| 1599 | __fanout_set_data_bpf(f: po->fanout, new); | 
|---|
| 1600 | return 0; | 
|---|
| 1601 | } | 
|---|
| 1602 |  | 
|---|
| 1603 | static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data, | 
|---|
| 1604 | unsigned int len) | 
|---|
| 1605 | { | 
|---|
| 1606 | struct bpf_prog *new; | 
|---|
| 1607 | u32 fd; | 
|---|
| 1608 |  | 
|---|
| 1609 | if (sock_flag(sk: &po->sk, flag: SOCK_FILTER_LOCKED)) | 
|---|
| 1610 | return -EPERM; | 
|---|
| 1611 | if (len != sizeof(fd)) | 
|---|
| 1612 | return -EINVAL; | 
|---|
| 1613 | if (copy_from_sockptr(dst: &fd, src: data, size: len)) | 
|---|
| 1614 | return -EFAULT; | 
|---|
| 1615 |  | 
|---|
| 1616 | new = bpf_prog_get_type(ufd: fd, type: BPF_PROG_TYPE_SOCKET_FILTER); | 
|---|
| 1617 | if (IS_ERR(ptr: new)) | 
|---|
| 1618 | return PTR_ERR(ptr: new); | 
|---|
| 1619 |  | 
|---|
| 1620 | __fanout_set_data_bpf(f: po->fanout, new); | 
|---|
| 1621 | return 0; | 
|---|
| 1622 | } | 
|---|
| 1623 |  | 
|---|
| 1624 | static int fanout_set_data(struct packet_sock *po, sockptr_t data, | 
|---|
| 1625 | unsigned int len) | 
|---|
| 1626 | { | 
|---|
| 1627 | switch (po->fanout->type) { | 
|---|
| 1628 | case PACKET_FANOUT_CBPF: | 
|---|
| 1629 | return fanout_set_data_cbpf(po, data, len); | 
|---|
| 1630 | case PACKET_FANOUT_EBPF: | 
|---|
| 1631 | return fanout_set_data_ebpf(po, data, len); | 
|---|
| 1632 | default: | 
|---|
| 1633 | return -EINVAL; | 
|---|
| 1634 | } | 
|---|
| 1635 | } | 
|---|
| 1636 |  | 
|---|
| 1637 | static void fanout_release_data(struct packet_fanout *f) | 
|---|
| 1638 | { | 
|---|
| 1639 | switch (f->type) { | 
|---|
| 1640 | case PACKET_FANOUT_CBPF: | 
|---|
| 1641 | case PACKET_FANOUT_EBPF: | 
|---|
| 1642 | __fanout_set_data_bpf(f, NULL); | 
|---|
| 1643 | } | 
|---|
| 1644 | } | 
|---|
| 1645 |  | 
|---|
| 1646 | static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id) | 
|---|
| 1647 | { | 
|---|
| 1648 | struct packet_fanout *f; | 
|---|
| 1649 |  | 
|---|
| 1650 | list_for_each_entry(f, &fanout_list, list) { | 
|---|
| 1651 | if (f->id == candidate_id && | 
|---|
| 1652 | read_pnet(pnet: &f->net) == sock_net(sk)) { | 
|---|
| 1653 | return false; | 
|---|
| 1654 | } | 
|---|
| 1655 | } | 
|---|
| 1656 | return true; | 
|---|
| 1657 | } | 
|---|
| 1658 |  | 
|---|
| 1659 | static bool fanout_find_new_id(struct sock *sk, u16 *new_id) | 
|---|
| 1660 | { | 
|---|
| 1661 | u16 id = fanout_next_id; | 
|---|
| 1662 |  | 
|---|
| 1663 | do { | 
|---|
| 1664 | if (__fanout_id_is_free(sk, candidate_id: id)) { | 
|---|
| 1665 | *new_id = id; | 
|---|
| 1666 | fanout_next_id = id + 1; | 
|---|
| 1667 | return true; | 
|---|
| 1668 | } | 
|---|
| 1669 |  | 
|---|
| 1670 | id++; | 
|---|
| 1671 | } while (id != fanout_next_id); | 
|---|
| 1672 |  | 
|---|
| 1673 | return false; | 
|---|
| 1674 | } | 
|---|
| 1675 |  | 
|---|
| 1676 | static int fanout_add(struct sock *sk, struct fanout_args *args) | 
|---|
| 1677 | { | 
|---|
| 1678 | struct packet_rollover *rollover = NULL; | 
|---|
| 1679 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 1680 | u16 type_flags = args->type_flags; | 
|---|
| 1681 | struct packet_fanout *f, *match; | 
|---|
| 1682 | u8 type = type_flags & 0xff; | 
|---|
| 1683 | u8 flags = type_flags >> 8; | 
|---|
| 1684 | u16 id = args->id; | 
|---|
| 1685 | int err; | 
|---|
| 1686 |  | 
|---|
| 1687 | switch (type) { | 
|---|
| 1688 | case PACKET_FANOUT_ROLLOVER: | 
|---|
| 1689 | if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) | 
|---|
| 1690 | return -EINVAL; | 
|---|
| 1691 | break; | 
|---|
| 1692 | case PACKET_FANOUT_HASH: | 
|---|
| 1693 | case PACKET_FANOUT_LB: | 
|---|
| 1694 | case PACKET_FANOUT_CPU: | 
|---|
| 1695 | case PACKET_FANOUT_RND: | 
|---|
| 1696 | case PACKET_FANOUT_QM: | 
|---|
| 1697 | case PACKET_FANOUT_CBPF: | 
|---|
| 1698 | case PACKET_FANOUT_EBPF: | 
|---|
| 1699 | break; | 
|---|
| 1700 | default: | 
|---|
| 1701 | return -EINVAL; | 
|---|
| 1702 | } | 
|---|
| 1703 |  | 
|---|
| 1704 | mutex_lock(lock: &fanout_mutex); | 
|---|
| 1705 |  | 
|---|
| 1706 | err = -EALREADY; | 
|---|
| 1707 | if (po->fanout) | 
|---|
| 1708 | goto out; | 
|---|
| 1709 |  | 
|---|
| 1710 | if (type == PACKET_FANOUT_ROLLOVER || | 
|---|
| 1711 | (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { | 
|---|
| 1712 | err = -ENOMEM; | 
|---|
| 1713 | rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); | 
|---|
| 1714 | if (!rollover) | 
|---|
| 1715 | goto out; | 
|---|
| 1716 | atomic_long_set(v: &rollover->num, i: 0); | 
|---|
| 1717 | atomic_long_set(v: &rollover->num_huge, i: 0); | 
|---|
| 1718 | atomic_long_set(v: &rollover->num_failed, i: 0); | 
|---|
| 1719 | } | 
|---|
| 1720 |  | 
|---|
| 1721 | if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { | 
|---|
| 1722 | if (id != 0) { | 
|---|
| 1723 | err = -EINVAL; | 
|---|
| 1724 | goto out; | 
|---|
| 1725 | } | 
|---|
| 1726 | if (!fanout_find_new_id(sk, new_id: &id)) { | 
|---|
| 1727 | err = -ENOMEM; | 
|---|
| 1728 | goto out; | 
|---|
| 1729 | } | 
|---|
| 1730 | /* ephemeral flag for the first socket in the group: drop it */ | 
|---|
| 1731 | flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8); | 
|---|
| 1732 | } | 
|---|
| 1733 |  | 
|---|
| 1734 | match = NULL; | 
|---|
| 1735 | list_for_each_entry(f, &fanout_list, list) { | 
|---|
| 1736 | if (f->id == id && | 
|---|
| 1737 | read_pnet(pnet: &f->net) == sock_net(sk)) { | 
|---|
| 1738 | match = f; | 
|---|
| 1739 | break; | 
|---|
| 1740 | } | 
|---|
| 1741 | } | 
|---|
| 1742 | err = -EINVAL; | 
|---|
| 1743 | if (match) { | 
|---|
| 1744 | if (match->flags != flags) | 
|---|
| 1745 | goto out; | 
|---|
| 1746 | if (args->max_num_members && | 
|---|
| 1747 | args->max_num_members != match->max_num_members) | 
|---|
| 1748 | goto out; | 
|---|
| 1749 | } else { | 
|---|
| 1750 | if (args->max_num_members > PACKET_FANOUT_MAX) | 
|---|
| 1751 | goto out; | 
|---|
| 1752 | if (!args->max_num_members) | 
|---|
| 1753 | /* legacy PACKET_FANOUT_MAX */ | 
|---|
| 1754 | args->max_num_members = 256; | 
|---|
| 1755 | err = -ENOMEM; | 
|---|
| 1756 | match = kvzalloc(struct_size(match, arr, args->max_num_members), | 
|---|
| 1757 | GFP_KERNEL); | 
|---|
| 1758 | if (!match) | 
|---|
| 1759 | goto out; | 
|---|
| 1760 | write_pnet(pnet: &match->net, net: sock_net(sk)); | 
|---|
| 1761 | match->id = id; | 
|---|
| 1762 | match->type = type; | 
|---|
| 1763 | match->flags = flags; | 
|---|
| 1764 | INIT_LIST_HEAD(list: &match->list); | 
|---|
| 1765 | spin_lock_init(&match->lock); | 
|---|
| 1766 | refcount_set(r: &match->sk_ref, n: 0); | 
|---|
| 1767 | fanout_init_data(f: match); | 
|---|
| 1768 | match->prot_hook.type = po->prot_hook.type; | 
|---|
| 1769 | match->prot_hook.dev = po->prot_hook.dev; | 
|---|
| 1770 | match->prot_hook.func = packet_rcv_fanout; | 
|---|
| 1771 | match->prot_hook.af_packet_priv = match; | 
|---|
| 1772 | match->prot_hook.af_packet_net = read_pnet(pnet: &match->net); | 
|---|
| 1773 | match->prot_hook.id_match = match_fanout_group; | 
|---|
| 1774 | match->max_num_members = args->max_num_members; | 
|---|
| 1775 | match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING; | 
|---|
| 1776 | list_add(new: &match->list, head: &fanout_list); | 
|---|
| 1777 | } | 
|---|
| 1778 | err = -EINVAL; | 
|---|
| 1779 |  | 
|---|
| 1780 | spin_lock(lock: &po->bind_lock); | 
|---|
| 1781 | if (po->num && | 
|---|
| 1782 | match->type == type && | 
|---|
| 1783 | match->prot_hook.type == po->prot_hook.type && | 
|---|
| 1784 | match->prot_hook.dev == po->prot_hook.dev) { | 
|---|
| 1785 | err = -ENOSPC; | 
|---|
| 1786 | if (refcount_read(r: &match->sk_ref) < match->max_num_members) { | 
|---|
| 1787 | /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */ | 
|---|
| 1788 | WRITE_ONCE(po->fanout, match); | 
|---|
| 1789 |  | 
|---|
| 1790 | po->rollover = rollover; | 
|---|
| 1791 | rollover = NULL; | 
|---|
| 1792 | refcount_set(r: &match->sk_ref, n: refcount_read(r: &match->sk_ref) + 1); | 
|---|
| 1793 | if (packet_sock_flag(po, flag: PACKET_SOCK_RUNNING)) { | 
|---|
| 1794 | __dev_remove_pack(pt: &po->prot_hook); | 
|---|
| 1795 | __fanout_link(sk, po); | 
|---|
| 1796 | } | 
|---|
| 1797 | err = 0; | 
|---|
| 1798 | } | 
|---|
| 1799 | } | 
|---|
| 1800 | spin_unlock(lock: &po->bind_lock); | 
|---|
| 1801 |  | 
|---|
| 1802 | if (err && !refcount_read(r: &match->sk_ref)) { | 
|---|
| 1803 | list_del(entry: &match->list); | 
|---|
| 1804 | kvfree(addr: match); | 
|---|
| 1805 | } | 
|---|
| 1806 |  | 
|---|
| 1807 | out: | 
|---|
| 1808 | kfree(objp: rollover); | 
|---|
| 1809 | mutex_unlock(lock: &fanout_mutex); | 
|---|
| 1810 | return err; | 
|---|
| 1811 | } | 
|---|
| 1812 |  | 
|---|
| 1813 | /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes | 
|---|
| 1814 | * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. | 
|---|
| 1815 | * It is the responsibility of the caller to call fanout_release_data() and | 
|---|
| 1816 | * free the returned packet_fanout (after synchronize_net()) | 
|---|
| 1817 | */ | 
|---|
| 1818 | static struct packet_fanout *fanout_release(struct sock *sk) | 
|---|
| 1819 | { | 
|---|
| 1820 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 1821 | struct packet_fanout *f; | 
|---|
| 1822 |  | 
|---|
| 1823 | mutex_lock(lock: &fanout_mutex); | 
|---|
| 1824 | f = po->fanout; | 
|---|
| 1825 | if (f) { | 
|---|
| 1826 | po->fanout = NULL; | 
|---|
| 1827 |  | 
|---|
| 1828 | if (refcount_dec_and_test(r: &f->sk_ref)) | 
|---|
| 1829 | list_del(entry: &f->list); | 
|---|
| 1830 | else | 
|---|
| 1831 | f = NULL; | 
|---|
| 1832 | } | 
|---|
| 1833 | mutex_unlock(lock: &fanout_mutex); | 
|---|
| 1834 |  | 
|---|
| 1835 | return f; | 
|---|
| 1836 | } | 
|---|
| 1837 |  | 
|---|
| 1838 | static bool (const struct net_device *dev, | 
|---|
| 1839 | struct sk_buff *skb) | 
|---|
| 1840 | { | 
|---|
| 1841 | /* Earlier code assumed this would be a VLAN pkt, double-check | 
|---|
| 1842 | * this now that we have the actual packet in hand. We can only | 
|---|
| 1843 | * do this check on Ethernet devices. | 
|---|
| 1844 | */ | 
|---|
| 1845 | if (unlikely(dev->type != ARPHRD_ETHER)) | 
|---|
| 1846 | return false; | 
|---|
| 1847 |  | 
|---|
| 1848 | skb_reset_mac_header(skb); | 
|---|
| 1849 | return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)); | 
|---|
| 1850 | } | 
|---|
| 1851 |  | 
|---|
| 1852 | static const struct proto_ops packet_ops; | 
|---|
| 1853 |  | 
|---|
| 1854 | static const struct proto_ops packet_ops_spkt; | 
|---|
| 1855 |  | 
|---|
| 1856 | static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, | 
|---|
| 1857 | struct packet_type *pt, struct net_device *orig_dev) | 
|---|
| 1858 | { | 
|---|
| 1859 | struct sock *sk; | 
|---|
| 1860 | struct sockaddr_pkt *spkt; | 
|---|
| 1861 |  | 
|---|
| 1862 | /* | 
|---|
| 1863 | *	When we registered the protocol we saved the socket in the data | 
|---|
| 1864 | *	field for just this event. | 
|---|
| 1865 | */ | 
|---|
| 1866 |  | 
|---|
| 1867 | sk = pt->af_packet_priv; | 
|---|
| 1868 |  | 
|---|
| 1869 | /* | 
|---|
| 1870 | *	Yank back the headers [hope the device set this | 
|---|
| 1871 | *	right or kerboom...] | 
|---|
| 1872 | * | 
|---|
| 1873 | *	Incoming packets have ll header pulled, | 
|---|
| 1874 | *	push it back. | 
|---|
| 1875 | * | 
|---|
| 1876 | *	For outgoing ones skb->data == skb_mac_header(skb) | 
|---|
| 1877 | *	so that this procedure is noop. | 
|---|
| 1878 | */ | 
|---|
| 1879 |  | 
|---|
| 1880 | if (skb->pkt_type == PACKET_LOOPBACK) | 
|---|
| 1881 | goto out; | 
|---|
| 1882 |  | 
|---|
| 1883 | if (!net_eq(net1: dev_net(dev), net2: sock_net(sk))) | 
|---|
| 1884 | goto out; | 
|---|
| 1885 |  | 
|---|
| 1886 | skb = skb_share_check(skb, GFP_ATOMIC); | 
|---|
| 1887 | if (skb == NULL) | 
|---|
| 1888 | goto oom; | 
|---|
| 1889 |  | 
|---|
| 1890 | /* drop any routing info */ | 
|---|
| 1891 | skb_dst_drop(skb); | 
|---|
| 1892 |  | 
|---|
| 1893 | /* drop conntrack reference */ | 
|---|
| 1894 | nf_reset_ct(skb); | 
|---|
| 1895 |  | 
|---|
| 1896 | spkt = &PACKET_SKB_CB(skb)->sa.pkt; | 
|---|
| 1897 |  | 
|---|
| 1898 | skb_push(skb, len: skb->data - skb_mac_header(skb)); | 
|---|
| 1899 |  | 
|---|
| 1900 | /* | 
|---|
| 1901 | *	The SOCK_PACKET socket receives _all_ frames. | 
|---|
| 1902 | */ | 
|---|
| 1903 |  | 
|---|
| 1904 | spkt->spkt_family = dev->type; | 
|---|
| 1905 | strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); | 
|---|
| 1906 | spkt->spkt_protocol = skb->protocol; | 
|---|
| 1907 |  | 
|---|
| 1908 | /* | 
|---|
| 1909 | *	Charge the memory to the socket. This is done specifically | 
|---|
| 1910 | *	to prevent sockets using all the memory up. | 
|---|
| 1911 | */ | 
|---|
| 1912 |  | 
|---|
| 1913 | if (sock_queue_rcv_skb(sk, skb) == 0) | 
|---|
| 1914 | return 0; | 
|---|
| 1915 |  | 
|---|
| 1916 | out: | 
|---|
| 1917 | kfree_skb(skb); | 
|---|
| 1918 | oom: | 
|---|
| 1919 | return 0; | 
|---|
| 1920 | } | 
|---|
| 1921 |  | 
|---|
| 1922 | static void (struct sk_buff *skb, struct socket *sock) | 
|---|
| 1923 | { | 
|---|
| 1924 | int depth; | 
|---|
| 1925 |  | 
|---|
| 1926 | if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) && | 
|---|
| 1927 | sock->type == SOCK_RAW) { | 
|---|
| 1928 | skb_reset_mac_header(skb); | 
|---|
| 1929 | skb->protocol = dev_parse_header_protocol(skb); | 
|---|
| 1930 | } | 
|---|
| 1931 |  | 
|---|
| 1932 | /* Move network header to the right position for VLAN tagged packets */ | 
|---|
| 1933 | if (likely(skb->dev->type == ARPHRD_ETHER) && | 
|---|
| 1934 | eth_type_vlan(ethertype: skb->protocol) && | 
|---|
| 1935 | vlan_get_protocol_and_depth(skb, type: skb->protocol, depth: &depth) != 0) | 
|---|
| 1936 | skb_set_network_header(skb, offset: depth); | 
|---|
| 1937 |  | 
|---|
| 1938 | skb_probe_transport_header(skb); | 
|---|
| 1939 | } | 
|---|
| 1940 |  | 
|---|
| 1941 | /* | 
|---|
| 1942 | *	Output a raw packet to a device layer. This bypasses all the other | 
|---|
| 1943 | *	protocol layers and you must therefore supply it with a complete frame | 
|---|
| 1944 | */ | 
|---|
| 1945 |  | 
|---|
| 1946 | static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, | 
|---|
| 1947 | size_t len) | 
|---|
| 1948 | { | 
|---|
| 1949 | struct sock *sk = sock->sk; | 
|---|
| 1950 | DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); | 
|---|
| 1951 | struct sk_buff *skb = NULL; | 
|---|
| 1952 | struct net_device *dev; | 
|---|
| 1953 | struct sockcm_cookie sockc; | 
|---|
| 1954 | __be16 proto = 0; | 
|---|
| 1955 | int err; | 
|---|
| 1956 | int  = 0; | 
|---|
| 1957 |  | 
|---|
| 1958 | /* | 
|---|
| 1959 | *	Get and verify the address. | 
|---|
| 1960 | */ | 
|---|
| 1961 |  | 
|---|
| 1962 | if (saddr) { | 
|---|
| 1963 | if (msg->msg_namelen < sizeof(struct sockaddr)) | 
|---|
| 1964 | return -EINVAL; | 
|---|
| 1965 | if (msg->msg_namelen == sizeof(struct sockaddr_pkt)) | 
|---|
| 1966 | proto = saddr->spkt_protocol; | 
|---|
| 1967 | } else | 
|---|
| 1968 | return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */ | 
|---|
| 1969 |  | 
|---|
| 1970 | /* | 
|---|
| 1971 | *	Find the device first to size check it | 
|---|
| 1972 | */ | 
|---|
| 1973 |  | 
|---|
| 1974 | saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0; | 
|---|
| 1975 | retry: | 
|---|
| 1976 | rcu_read_lock(); | 
|---|
| 1977 | dev = dev_get_by_name_rcu(net: sock_net(sk), name: saddr->spkt_device); | 
|---|
| 1978 | err = -ENODEV; | 
|---|
| 1979 | if (dev == NULL) | 
|---|
| 1980 | goto out_unlock; | 
|---|
| 1981 |  | 
|---|
| 1982 | err = -ENETDOWN; | 
|---|
| 1983 | if (!(dev->flags & IFF_UP)) | 
|---|
| 1984 | goto out_unlock; | 
|---|
| 1985 |  | 
|---|
| 1986 | /* | 
|---|
| 1987 | * You may not queue a frame bigger than the mtu. This is the lowest level | 
|---|
| 1988 | * raw protocol and you must do your own fragmentation at this level. | 
|---|
| 1989 | */ | 
|---|
| 1990 |  | 
|---|
| 1991 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { | 
|---|
| 1992 | if (!netif_supports_nofcs(dev)) { | 
|---|
| 1993 | err = -EPROTONOSUPPORT; | 
|---|
| 1994 | goto out_unlock; | 
|---|
| 1995 | } | 
|---|
| 1996 | extra_len = 4; /* We're doing our own CRC */ | 
|---|
| 1997 | } | 
|---|
| 1998 |  | 
|---|
| 1999 | err = -EMSGSIZE; | 
|---|
| 2000 | if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len) | 
|---|
| 2001 | goto out_unlock; | 
|---|
| 2002 |  | 
|---|
| 2003 | if (!skb) { | 
|---|
| 2004 | size_t reserved = LL_RESERVED_SPACE(dev); | 
|---|
| 2005 | int tlen = dev->needed_tailroom; | 
|---|
| 2006 | unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; | 
|---|
| 2007 |  | 
|---|
| 2008 | rcu_read_unlock(); | 
|---|
| 2009 | skb = sock_wmalloc(sk, size: len + reserved + tlen, force: 0, GFP_KERNEL); | 
|---|
| 2010 | if (skb == NULL) | 
|---|
| 2011 | return -ENOBUFS; | 
|---|
| 2012 | /* FIXME: Save some space for broken drivers that write a hard | 
|---|
| 2013 | * header at transmission time by themselves. PPP is the notable | 
|---|
| 2014 | * one here. This should really be fixed at the driver level. | 
|---|
| 2015 | */ | 
|---|
| 2016 | skb_reserve(skb, len: reserved); | 
|---|
| 2017 | skb_reset_network_header(skb); | 
|---|
| 2018 |  | 
|---|
| 2019 | /* Try to align data part correctly */ | 
|---|
| 2020 | if (hhlen) { | 
|---|
| 2021 | skb->data -= hhlen; | 
|---|
| 2022 | skb->tail -= hhlen; | 
|---|
| 2023 | if (len < hhlen) | 
|---|
| 2024 | skb_reset_network_header(skb); | 
|---|
| 2025 | } | 
|---|
| 2026 | err = memcpy_from_msg(data: skb_put(skb, len), msg, len); | 
|---|
| 2027 | if (err) | 
|---|
| 2028 | goto out_free; | 
|---|
| 2029 | goto retry; | 
|---|
| 2030 | } | 
|---|
| 2031 |  | 
|---|
| 2032 | if (!dev_validate_header(dev, ll_header: skb->data, len) || !skb->len) { | 
|---|
| 2033 | err = -EINVAL; | 
|---|
| 2034 | goto out_unlock; | 
|---|
| 2035 | } | 
|---|
| 2036 | if (len > (dev->mtu + dev->hard_header_len + extra_len) && | 
|---|
| 2037 | !packet_extra_vlan_len_allowed(dev, skb)) { | 
|---|
| 2038 | err = -EMSGSIZE; | 
|---|
| 2039 | goto out_unlock; | 
|---|
| 2040 | } | 
|---|
| 2041 |  | 
|---|
| 2042 | sockcm_init(sockc: &sockc, sk); | 
|---|
| 2043 | if (msg->msg_controllen) { | 
|---|
| 2044 | err = sock_cmsg_send(sk, msg, sockc: &sockc); | 
|---|
| 2045 | if (unlikely(err)) | 
|---|
| 2046 | goto out_unlock; | 
|---|
| 2047 | } | 
|---|
| 2048 |  | 
|---|
| 2049 | skb->protocol = proto; | 
|---|
| 2050 | skb->dev = dev; | 
|---|
| 2051 | skb->priority = sockc.priority; | 
|---|
| 2052 | skb->mark = sockc.mark; | 
|---|
| 2053 | skb_set_delivery_type_by_clockid(skb, kt: sockc.transmit_time, clockid: sk->sk_clockid); | 
|---|
| 2054 | skb_setup_tx_timestamp(skb, sockc: &sockc); | 
|---|
| 2055 |  | 
|---|
| 2056 | if (unlikely(extra_len == 4)) | 
|---|
| 2057 | skb->no_fcs = 1; | 
|---|
| 2058 |  | 
|---|
| 2059 | packet_parse_headers(skb, sock); | 
|---|
| 2060 |  | 
|---|
| 2061 | dev_queue_xmit(skb); | 
|---|
| 2062 | rcu_read_unlock(); | 
|---|
| 2063 | return len; | 
|---|
| 2064 |  | 
|---|
| 2065 | out_unlock: | 
|---|
| 2066 | rcu_read_unlock(); | 
|---|
| 2067 | out_free: | 
|---|
| 2068 | kfree_skb(skb); | 
|---|
| 2069 | return err; | 
|---|
| 2070 | } | 
|---|
| 2071 |  | 
|---|
| 2072 | static unsigned int run_filter(struct sk_buff *skb, | 
|---|
| 2073 | const struct sock *sk, | 
|---|
| 2074 | unsigned int res) | 
|---|
| 2075 | { | 
|---|
| 2076 | struct sk_filter *filter; | 
|---|
| 2077 |  | 
|---|
| 2078 | rcu_read_lock(); | 
|---|
| 2079 | filter = rcu_dereference(sk->sk_filter); | 
|---|
| 2080 | if (filter != NULL) | 
|---|
| 2081 | res = bpf_prog_run_clear_cb(prog: filter->prog, skb); | 
|---|
| 2082 | rcu_read_unlock(); | 
|---|
| 2083 |  | 
|---|
| 2084 | return res; | 
|---|
| 2085 | } | 
|---|
| 2086 |  | 
|---|
| 2087 | static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, | 
|---|
| 2088 | size_t *len, int vnet_hdr_sz) | 
|---|
| 2089 | { | 
|---|
| 2090 | struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 }; | 
|---|
| 2091 |  | 
|---|
| 2092 | if (*len < vnet_hdr_sz) | 
|---|
| 2093 | return -EINVAL; | 
|---|
| 2094 | *len -= vnet_hdr_sz; | 
|---|
| 2095 |  | 
|---|
| 2096 | if (virtio_net_hdr_from_skb(skb, hdr: (struct virtio_net_hdr *)&vnet_hdr, vio_le(), has_data_valid: true, vlan_hlen: 0)) | 
|---|
| 2097 | return -EINVAL; | 
|---|
| 2098 |  | 
|---|
| 2099 | return memcpy_to_msg(msg, data: (void *)&vnet_hdr, len: vnet_hdr_sz); | 
|---|
| 2100 | } | 
|---|
| 2101 |  | 
|---|
| 2102 | /* | 
|---|
| 2103 | * This function makes lazy skb cloning in hope that most of packets | 
|---|
| 2104 | * are discarded by BPF. | 
|---|
| 2105 | * | 
|---|
| 2106 | * Note tricky part: we DO mangle shared skb! skb->data, skb->len | 
|---|
| 2107 | * and skb->cb are mangled. It works because (and until) packets | 
|---|
| 2108 | * falling here are owned by current CPU. Output packets are cloned | 
|---|
| 2109 | * by dev_queue_xmit_nit(), input packets are processed by net_bh | 
|---|
| 2110 | * sequentially, so that if we return skb to original state on exit, | 
|---|
| 2111 | * we will not harm anyone. | 
|---|
| 2112 | */ | 
|---|
| 2113 |  | 
|---|
| 2114 | static int packet_rcv(struct sk_buff *skb, struct net_device *dev, | 
|---|
| 2115 | struct packet_type *pt, struct net_device *orig_dev) | 
|---|
| 2116 | { | 
|---|
| 2117 | enum skb_drop_reason drop_reason = SKB_CONSUMED; | 
|---|
| 2118 | struct sock *sk = NULL; | 
|---|
| 2119 | struct sockaddr_ll *sll; | 
|---|
| 2120 | struct packet_sock *po; | 
|---|
| 2121 | u8 *skb_head = skb->data; | 
|---|
| 2122 | int skb_len = skb->len; | 
|---|
| 2123 | unsigned int snaplen, res; | 
|---|
| 2124 |  | 
|---|
| 2125 | if (skb->pkt_type == PACKET_LOOPBACK) | 
|---|
| 2126 | goto drop; | 
|---|
| 2127 |  | 
|---|
| 2128 | sk = pt->af_packet_priv; | 
|---|
| 2129 | po = pkt_sk(sk); | 
|---|
| 2130 |  | 
|---|
| 2131 | if (!net_eq(net1: dev_net(dev), net2: sock_net(sk))) | 
|---|
| 2132 | goto drop; | 
|---|
| 2133 |  | 
|---|
| 2134 | skb->dev = dev; | 
|---|
| 2135 |  | 
|---|
| 2136 | if (dev_has_header(dev)) { | 
|---|
| 2137 | /* The device has an explicit notion of ll header, | 
|---|
| 2138 | * exported to higher levels. | 
|---|
| 2139 | * | 
|---|
| 2140 | * Otherwise, the device hides details of its frame | 
|---|
| 2141 | * structure, so that corresponding packet head is | 
|---|
| 2142 | * never delivered to user. | 
|---|
| 2143 | */ | 
|---|
| 2144 | if (sk->sk_type != SOCK_DGRAM) | 
|---|
| 2145 | skb_push(skb, len: skb->data - skb_mac_header(skb)); | 
|---|
| 2146 | else if (skb->pkt_type == PACKET_OUTGOING) { | 
|---|
| 2147 | /* Special case: outgoing packets have ll header at head */ | 
|---|
| 2148 | skb_pull(skb, len: skb_network_offset(skb)); | 
|---|
| 2149 | } | 
|---|
| 2150 | } | 
|---|
| 2151 |  | 
|---|
| 2152 | snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb); | 
|---|
| 2153 |  | 
|---|
| 2154 | res = run_filter(skb, sk, res: snaplen); | 
|---|
| 2155 | if (!res) | 
|---|
| 2156 | goto drop_n_restore; | 
|---|
| 2157 | if (snaplen > res) | 
|---|
| 2158 | snaplen = res; | 
|---|
| 2159 |  | 
|---|
| 2160 | if (atomic_read(v: &sk->sk_rmem_alloc) >= sk->sk_rcvbuf) | 
|---|
| 2161 | goto drop_n_acct; | 
|---|
| 2162 |  | 
|---|
| 2163 | if (skb_shared(skb)) { | 
|---|
| 2164 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | 
|---|
| 2165 | if (nskb == NULL) | 
|---|
| 2166 | goto drop_n_acct; | 
|---|
| 2167 |  | 
|---|
| 2168 | if (skb_head != skb->data) { | 
|---|
| 2169 | skb->data = skb_head; | 
|---|
| 2170 | skb->len = skb_len; | 
|---|
| 2171 | } | 
|---|
| 2172 | consume_skb(skb); | 
|---|
| 2173 | skb = nskb; | 
|---|
| 2174 | } | 
|---|
| 2175 |  | 
|---|
| 2176 | sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8); | 
|---|
| 2177 |  | 
|---|
| 2178 | sll = &PACKET_SKB_CB(skb)->sa.ll; | 
|---|
| 2179 | sll->sll_hatype = dev->type; | 
|---|
| 2180 | sll->sll_pkttype = skb->pkt_type; | 
|---|
| 2181 | if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV))) | 
|---|
| 2182 | sll->sll_ifindex = orig_dev->ifindex; | 
|---|
| 2183 | else | 
|---|
| 2184 | sll->sll_ifindex = dev->ifindex; | 
|---|
| 2185 |  | 
|---|
| 2186 | sll->sll_halen = dev_parse_header(skb, haddr: sll->sll_addr); | 
|---|
| 2187 |  | 
|---|
| 2188 | /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg(). | 
|---|
| 2189 | * Use their space for storing the original skb length. | 
|---|
| 2190 | */ | 
|---|
| 2191 | PACKET_SKB_CB(skb)->sa.origlen = skb->len; | 
|---|
| 2192 |  | 
|---|
| 2193 | if (pskb_trim(skb, len: snaplen)) | 
|---|
| 2194 | goto drop_n_acct; | 
|---|
| 2195 |  | 
|---|
| 2196 | skb_set_owner_r(skb, sk); | 
|---|
| 2197 | skb->dev = NULL; | 
|---|
| 2198 | skb_dst_drop(skb); | 
|---|
| 2199 |  | 
|---|
| 2200 | /* drop conntrack reference */ | 
|---|
| 2201 | nf_reset_ct(skb); | 
|---|
| 2202 |  | 
|---|
| 2203 | spin_lock(lock: &sk->sk_receive_queue.lock); | 
|---|
| 2204 | po->stats.stats1.tp_packets++; | 
|---|
| 2205 | sock_skb_set_dropcount(sk, skb); | 
|---|
| 2206 | skb_clear_delivery_time(skb); | 
|---|
| 2207 | __skb_queue_tail(list: &sk->sk_receive_queue, newsk: skb); | 
|---|
| 2208 | spin_unlock(lock: &sk->sk_receive_queue.lock); | 
|---|
| 2209 | sk->sk_data_ready(sk); | 
|---|
| 2210 | return 0; | 
|---|
| 2211 |  | 
|---|
| 2212 | drop_n_acct: | 
|---|
| 2213 | atomic_inc(v: &po->tp_drops); | 
|---|
| 2214 | sk_drops_inc(sk); | 
|---|
| 2215 | drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR; | 
|---|
| 2216 |  | 
|---|
| 2217 | drop_n_restore: | 
|---|
| 2218 | if (skb_head != skb->data && skb_shared(skb)) { | 
|---|
| 2219 | skb->data = skb_head; | 
|---|
| 2220 | skb->len = skb_len; | 
|---|
| 2221 | } | 
|---|
| 2222 | drop: | 
|---|
| 2223 | sk_skb_reason_drop(sk, skb, reason: drop_reason); | 
|---|
| 2224 | return 0; | 
|---|
| 2225 | } | 
|---|
| 2226 |  | 
|---|
| 2227 | static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | 
|---|
| 2228 | struct packet_type *pt, struct net_device *orig_dev) | 
|---|
| 2229 | { | 
|---|
| 2230 | enum skb_drop_reason drop_reason = SKB_CONSUMED; | 
|---|
| 2231 | struct sock *sk = NULL; | 
|---|
| 2232 | struct packet_sock *po; | 
|---|
| 2233 | struct sockaddr_ll *sll; | 
|---|
| 2234 | union tpacket_uhdr h; | 
|---|
| 2235 | u8 *skb_head = skb->data; | 
|---|
| 2236 | int skb_len = skb->len; | 
|---|
| 2237 | unsigned int snaplen, res; | 
|---|
| 2238 | unsigned long status = TP_STATUS_USER; | 
|---|
| 2239 | unsigned short macoff, hdrlen; | 
|---|
| 2240 | unsigned int netoff; | 
|---|
| 2241 | struct sk_buff *copy_skb = NULL; | 
|---|
| 2242 | struct timespec64 ts; | 
|---|
| 2243 | __u32 ts_status; | 
|---|
| 2244 | unsigned int slot_id = 0; | 
|---|
| 2245 | int vnet_hdr_sz = 0; | 
|---|
| 2246 |  | 
|---|
| 2247 | /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. | 
|---|
| 2248 | * We may add members to them until current aligned size without forcing | 
|---|
| 2249 | * userspace to call getsockopt(..., PACKET_HDRLEN, ...). | 
|---|
| 2250 | */ | 
|---|
| 2251 | BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32); | 
|---|
| 2252 | BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48); | 
|---|
| 2253 |  | 
|---|
| 2254 | if (skb->pkt_type == PACKET_LOOPBACK) | 
|---|
| 2255 | goto drop; | 
|---|
| 2256 |  | 
|---|
| 2257 | sk = pt->af_packet_priv; | 
|---|
| 2258 | po = pkt_sk(sk); | 
|---|
| 2259 |  | 
|---|
| 2260 | if (!net_eq(net1: dev_net(dev), net2: sock_net(sk))) | 
|---|
| 2261 | goto drop; | 
|---|
| 2262 |  | 
|---|
| 2263 | if (dev_has_header(dev)) { | 
|---|
| 2264 | if (sk->sk_type != SOCK_DGRAM) | 
|---|
| 2265 | skb_push(skb, len: skb->data - skb_mac_header(skb)); | 
|---|
| 2266 | else if (skb->pkt_type == PACKET_OUTGOING) { | 
|---|
| 2267 | /* Special case: outgoing packets have ll header at head */ | 
|---|
| 2268 | skb_pull(skb, len: skb_network_offset(skb)); | 
|---|
| 2269 | } | 
|---|
| 2270 | } | 
|---|
| 2271 |  | 
|---|
| 2272 | snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb); | 
|---|
| 2273 |  | 
|---|
| 2274 | res = run_filter(skb, sk, res: snaplen); | 
|---|
| 2275 | if (!res) | 
|---|
| 2276 | goto drop_n_restore; | 
|---|
| 2277 |  | 
|---|
| 2278 | /* If we are flooded, just give up */ | 
|---|
| 2279 | if (__packet_rcv_has_room(po, skb) == ROOM_NONE) { | 
|---|
| 2280 | atomic_inc(v: &po->tp_drops); | 
|---|
| 2281 | goto drop_n_restore; | 
|---|
| 2282 | } | 
|---|
| 2283 |  | 
|---|
| 2284 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 
|---|
| 2285 | status |= TP_STATUS_CSUMNOTREADY; | 
|---|
| 2286 | else if (skb->pkt_type != PACKET_OUTGOING && | 
|---|
| 2287 | skb_csum_unnecessary(skb)) | 
|---|
| 2288 | status |= TP_STATUS_CSUM_VALID; | 
|---|
| 2289 | if (skb_is_gso(skb) && skb_is_gso_tcp(skb)) | 
|---|
| 2290 | status |= TP_STATUS_GSO_TCP; | 
|---|
| 2291 |  | 
|---|
| 2292 | if (snaplen > res) | 
|---|
| 2293 | snaplen = res; | 
|---|
| 2294 |  | 
|---|
| 2295 | if (sk->sk_type == SOCK_DGRAM) { | 
|---|
| 2296 | macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + | 
|---|
| 2297 | po->tp_reserve; | 
|---|
| 2298 | } else { | 
|---|
| 2299 | unsigned int maclen = skb_network_offset(skb); | 
|---|
| 2300 | netoff = TPACKET_ALIGN(po->tp_hdrlen + | 
|---|
| 2301 | (maclen < 16 ? 16 : maclen)) + | 
|---|
| 2302 | po->tp_reserve; | 
|---|
| 2303 | vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); | 
|---|
| 2304 | if (vnet_hdr_sz) | 
|---|
| 2305 | netoff += vnet_hdr_sz; | 
|---|
| 2306 | macoff = netoff - maclen; | 
|---|
| 2307 | } | 
|---|
| 2308 | if (netoff > USHRT_MAX) { | 
|---|
| 2309 | atomic_inc(v: &po->tp_drops); | 
|---|
| 2310 | goto drop_n_restore; | 
|---|
| 2311 | } | 
|---|
| 2312 | if (po->tp_version <= TPACKET_V2) { | 
|---|
| 2313 | if (macoff + snaplen > po->rx_ring.frame_size) { | 
|---|
| 2314 | if (READ_ONCE(po->copy_thresh) && | 
|---|
| 2315 | atomic_read(v: &sk->sk_rmem_alloc) < sk->sk_rcvbuf) { | 
|---|
| 2316 | if (skb_shared(skb)) { | 
|---|
| 2317 | copy_skb = skb_clone(skb, GFP_ATOMIC); | 
|---|
| 2318 | } else { | 
|---|
| 2319 | copy_skb = skb_get(skb); | 
|---|
| 2320 | skb_head = skb->data; | 
|---|
| 2321 | } | 
|---|
| 2322 | if (copy_skb) { | 
|---|
| 2323 | memset(s: &PACKET_SKB_CB(copy_skb)->sa.ll, c: 0, | 
|---|
| 2324 | n: sizeof(PACKET_SKB_CB(copy_skb)->sa.ll)); | 
|---|
| 2325 | skb_set_owner_r(skb: copy_skb, sk); | 
|---|
| 2326 | } | 
|---|
| 2327 | } | 
|---|
| 2328 | snaplen = po->rx_ring.frame_size - macoff; | 
|---|
| 2329 | if ((int)snaplen < 0) { | 
|---|
| 2330 | snaplen = 0; | 
|---|
| 2331 | vnet_hdr_sz = 0; | 
|---|
| 2332 | } | 
|---|
| 2333 | } | 
|---|
| 2334 | } else if (unlikely(macoff + snaplen > | 
|---|
| 2335 | GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { | 
|---|
| 2336 | u32 nval; | 
|---|
| 2337 |  | 
|---|
| 2338 | nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; | 
|---|
| 2339 | pr_err_once( "tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", | 
|---|
| 2340 | snaplen, nval, macoff); | 
|---|
| 2341 | snaplen = nval; | 
|---|
| 2342 | if (unlikely((int)snaplen < 0)) { | 
|---|
| 2343 | snaplen = 0; | 
|---|
| 2344 | macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; | 
|---|
| 2345 | vnet_hdr_sz = 0; | 
|---|
| 2346 | } | 
|---|
| 2347 | } | 
|---|
| 2348 | spin_lock(lock: &sk->sk_receive_queue.lock); | 
|---|
| 2349 | h.raw = packet_current_rx_frame(po, skb, | 
|---|
| 2350 | TP_STATUS_KERNEL, len: (macoff+snaplen)); | 
|---|
| 2351 | if (!h.raw) | 
|---|
| 2352 | goto drop_n_account; | 
|---|
| 2353 |  | 
|---|
| 2354 | if (po->tp_version <= TPACKET_V2) { | 
|---|
| 2355 | slot_id = po->rx_ring.head; | 
|---|
| 2356 | if (test_bit(slot_id, po->rx_ring.rx_owner_map)) | 
|---|
| 2357 | goto drop_n_account; | 
|---|
| 2358 | __set_bit(slot_id, po->rx_ring.rx_owner_map); | 
|---|
| 2359 | } | 
|---|
| 2360 |  | 
|---|
| 2361 | if (vnet_hdr_sz && | 
|---|
| 2362 | virtio_net_hdr_from_skb(skb, hdr: h.raw + macoff - | 
|---|
| 2363 | sizeof(struct virtio_net_hdr), | 
|---|
| 2364 | vio_le(), has_data_valid: true, vlan_hlen: 0)) { | 
|---|
| 2365 | if (po->tp_version == TPACKET_V3) | 
|---|
| 2366 | prb_clear_blk_fill_status(rb: &po->rx_ring); | 
|---|
| 2367 | goto drop_n_account; | 
|---|
| 2368 | } | 
|---|
| 2369 |  | 
|---|
| 2370 | if (po->tp_version <= TPACKET_V2) { | 
|---|
| 2371 | packet_increment_rx_head(po, rb: &po->rx_ring); | 
|---|
| 2372 | /* | 
|---|
| 2373 | * LOSING will be reported till you read the stats, | 
|---|
| 2374 | * because it's COR - Clear On Read. | 
|---|
| 2375 | * Anyways, moving it for V1/V2 only as V3 doesn't need this | 
|---|
| 2376 | * at packet level. | 
|---|
| 2377 | */ | 
|---|
| 2378 | if (atomic_read(v: &po->tp_drops)) | 
|---|
| 2379 | status |= TP_STATUS_LOSING; | 
|---|
| 2380 | } | 
|---|
| 2381 |  | 
|---|
| 2382 | po->stats.stats1.tp_packets++; | 
|---|
| 2383 | if (copy_skb) { | 
|---|
| 2384 | status |= TP_STATUS_COPY; | 
|---|
| 2385 | skb_clear_delivery_time(skb: copy_skb); | 
|---|
| 2386 | __skb_queue_tail(list: &sk->sk_receive_queue, newsk: copy_skb); | 
|---|
| 2387 | } | 
|---|
| 2388 | spin_unlock(lock: &sk->sk_receive_queue.lock); | 
|---|
| 2389 |  | 
|---|
| 2390 | skb_copy_bits(skb, offset: 0, to: h.raw + macoff, len: snaplen); | 
|---|
| 2391 |  | 
|---|
| 2392 | /* Always timestamp; prefer an existing software timestamp taken | 
|---|
| 2393 | * closer to the time of capture. | 
|---|
| 2394 | */ | 
|---|
| 2395 | ts_status = tpacket_get_timestamp(skb, ts: &ts, | 
|---|
| 2396 | READ_ONCE(po->tp_tstamp) | | 
|---|
| 2397 | SOF_TIMESTAMPING_SOFTWARE); | 
|---|
| 2398 | if (!ts_status) | 
|---|
| 2399 | ktime_get_real_ts64(tv: &ts); | 
|---|
| 2400 |  | 
|---|
| 2401 | status |= ts_status; | 
|---|
| 2402 |  | 
|---|
| 2403 | switch (po->tp_version) { | 
|---|
| 2404 | case TPACKET_V1: | 
|---|
| 2405 | h.h1->tp_len = skb->len; | 
|---|
| 2406 | h.h1->tp_snaplen = snaplen; | 
|---|
| 2407 | h.h1->tp_mac = macoff; | 
|---|
| 2408 | h.h1->tp_net = netoff; | 
|---|
| 2409 | h.h1->tp_sec = ts.tv_sec; | 
|---|
| 2410 | h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC; | 
|---|
| 2411 | hdrlen = sizeof(*h.h1); | 
|---|
| 2412 | break; | 
|---|
| 2413 | case TPACKET_V2: | 
|---|
| 2414 | h.h2->tp_len = skb->len; | 
|---|
| 2415 | h.h2->tp_snaplen = snaplen; | 
|---|
| 2416 | h.h2->tp_mac = macoff; | 
|---|
| 2417 | h.h2->tp_net = netoff; | 
|---|
| 2418 | h.h2->tp_sec = ts.tv_sec; | 
|---|
| 2419 | h.h2->tp_nsec = ts.tv_nsec; | 
|---|
| 2420 | if (skb_vlan_tag_present(skb)) { | 
|---|
| 2421 | h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); | 
|---|
| 2422 | h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); | 
|---|
| 2423 | status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | 
|---|
| 2424 | } else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) { | 
|---|
| 2425 | h.h2->tp_vlan_tci = vlan_get_tci(skb, dev: skb->dev); | 
|---|
| 2426 | h.h2->tp_vlan_tpid = ntohs(skb->protocol); | 
|---|
| 2427 | status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | 
|---|
| 2428 | } else { | 
|---|
| 2429 | h.h2->tp_vlan_tci = 0; | 
|---|
| 2430 | h.h2->tp_vlan_tpid = 0; | 
|---|
| 2431 | } | 
|---|
| 2432 | memset(s: h.h2->tp_padding, c: 0, n: sizeof(h.h2->tp_padding)); | 
|---|
| 2433 | hdrlen = sizeof(*h.h2); | 
|---|
| 2434 | break; | 
|---|
| 2435 | case TPACKET_V3: | 
|---|
| 2436 | /* tp_nxt_offset,vlan are already populated above. | 
|---|
| 2437 | * So DONT clear those fields here | 
|---|
| 2438 | */ | 
|---|
| 2439 | h.h3->tp_status |= status; | 
|---|
| 2440 | h.h3->tp_len = skb->len; | 
|---|
| 2441 | h.h3->tp_snaplen = snaplen; | 
|---|
| 2442 | h.h3->tp_mac = macoff; | 
|---|
| 2443 | h.h3->tp_net = netoff; | 
|---|
| 2444 | h.h3->tp_sec  = ts.tv_sec; | 
|---|
| 2445 | h.h3->tp_nsec = ts.tv_nsec; | 
|---|
| 2446 | memset(s: h.h3->tp_padding, c: 0, n: sizeof(h.h3->tp_padding)); | 
|---|
| 2447 | hdrlen = sizeof(*h.h3); | 
|---|
| 2448 | break; | 
|---|
| 2449 | default: | 
|---|
| 2450 | BUG(); | 
|---|
| 2451 | } | 
|---|
| 2452 |  | 
|---|
| 2453 | sll = h.raw + TPACKET_ALIGN(hdrlen); | 
|---|
| 2454 | sll->sll_halen = dev_parse_header(skb, haddr: sll->sll_addr); | 
|---|
| 2455 | sll->sll_family = AF_PACKET; | 
|---|
| 2456 | sll->sll_hatype = dev->type; | 
|---|
| 2457 | sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ? | 
|---|
| 2458 | vlan_get_protocol_dgram(skb) : skb->protocol; | 
|---|
| 2459 | sll->sll_pkttype = skb->pkt_type; | 
|---|
| 2460 | if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV))) | 
|---|
| 2461 | sll->sll_ifindex = orig_dev->ifindex; | 
|---|
| 2462 | else | 
|---|
| 2463 | sll->sll_ifindex = dev->ifindex; | 
|---|
| 2464 |  | 
|---|
| 2465 | smp_mb(); | 
|---|
| 2466 |  | 
|---|
| 2467 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 | 
|---|
| 2468 | if (po->tp_version <= TPACKET_V2) { | 
|---|
| 2469 | u8 *start, *end; | 
|---|
| 2470 |  | 
|---|
| 2471 | end = (u8 *) PAGE_ALIGN((unsigned long) h.raw + | 
|---|
| 2472 | macoff + snaplen); | 
|---|
| 2473 |  | 
|---|
| 2474 | for (start = h.raw; start < end; start += PAGE_SIZE) | 
|---|
| 2475 | flush_dcache_page(pgv_to_page(start)); | 
|---|
| 2476 | } | 
|---|
| 2477 | smp_wmb(); | 
|---|
| 2478 | #endif | 
|---|
| 2479 |  | 
|---|
| 2480 | if (po->tp_version <= TPACKET_V2) { | 
|---|
| 2481 | spin_lock(lock: &sk->sk_receive_queue.lock); | 
|---|
| 2482 | __packet_set_status(po, frame: h.raw, status); | 
|---|
| 2483 | __clear_bit(slot_id, po->rx_ring.rx_owner_map); | 
|---|
| 2484 | spin_unlock(lock: &sk->sk_receive_queue.lock); | 
|---|
| 2485 | sk->sk_data_ready(sk); | 
|---|
| 2486 | } else if (po->tp_version == TPACKET_V3) { | 
|---|
| 2487 | prb_clear_blk_fill_status(rb: &po->rx_ring); | 
|---|
| 2488 | } | 
|---|
| 2489 |  | 
|---|
| 2490 | drop_n_restore: | 
|---|
| 2491 | if (skb_head != skb->data && skb_shared(skb)) { | 
|---|
| 2492 | skb->data = skb_head; | 
|---|
| 2493 | skb->len = skb_len; | 
|---|
| 2494 | } | 
|---|
| 2495 | drop: | 
|---|
| 2496 | sk_skb_reason_drop(sk, skb, reason: drop_reason); | 
|---|
| 2497 | return 0; | 
|---|
| 2498 |  | 
|---|
| 2499 | drop_n_account: | 
|---|
| 2500 | spin_unlock(lock: &sk->sk_receive_queue.lock); | 
|---|
| 2501 | atomic_inc(v: &po->tp_drops); | 
|---|
| 2502 | drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR; | 
|---|
| 2503 |  | 
|---|
| 2504 | sk->sk_data_ready(sk); | 
|---|
| 2505 | sk_skb_reason_drop(sk, skb: copy_skb, reason: drop_reason); | 
|---|
| 2506 | goto drop_n_restore; | 
|---|
| 2507 | } | 
|---|
| 2508 |  | 
|---|
| 2509 | static void tpacket_destruct_skb(struct sk_buff *skb) | 
|---|
| 2510 | { | 
|---|
| 2511 | struct packet_sock *po = pkt_sk(skb->sk); | 
|---|
| 2512 |  | 
|---|
| 2513 | if (likely(po->tx_ring.pg_vec)) { | 
|---|
| 2514 | void *ph; | 
|---|
| 2515 | __u32 ts; | 
|---|
| 2516 |  | 
|---|
| 2517 | ph = skb_zcopy_get_nouarg(skb); | 
|---|
| 2518 | packet_dec_pending(rb: &po->tx_ring); | 
|---|
| 2519 |  | 
|---|
| 2520 | ts = __packet_set_timestamp(po, frame: ph, skb); | 
|---|
| 2521 | __packet_set_status(po, frame: ph, TP_STATUS_AVAILABLE | ts); | 
|---|
| 2522 |  | 
|---|
| 2523 | complete(&po->skb_completion); | 
|---|
| 2524 | } | 
|---|
| 2525 |  | 
|---|
| 2526 | sock_wfree(skb); | 
|---|
| 2527 | } | 
|---|
| 2528 |  | 
|---|
| 2529 | static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len) | 
|---|
| 2530 | { | 
|---|
| 2531 | if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | 
|---|
| 2532 | (__virtio16_to_cpu(vio_le(), val: vnet_hdr->csum_start) + | 
|---|
| 2533 | __virtio16_to_cpu(vio_le(), val: vnet_hdr->csum_offset) + 2 > | 
|---|
| 2534 | __virtio16_to_cpu(vio_le(), val: vnet_hdr->hdr_len))) | 
|---|
| 2535 | vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(), | 
|---|
| 2536 | val: __virtio16_to_cpu(vio_le(), val: vnet_hdr->csum_start) + | 
|---|
| 2537 | __virtio16_to_cpu(vio_le(), val: vnet_hdr->csum_offset) + 2); | 
|---|
| 2538 |  | 
|---|
| 2539 | if (__virtio16_to_cpu(vio_le(), val: vnet_hdr->hdr_len) > len) | 
|---|
| 2540 | return -EINVAL; | 
|---|
| 2541 |  | 
|---|
| 2542 | return 0; | 
|---|
| 2543 | } | 
|---|
| 2544 |  | 
|---|
| 2545 | static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len, | 
|---|
| 2546 | struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz) | 
|---|
| 2547 | { | 
|---|
| 2548 | int ret; | 
|---|
| 2549 |  | 
|---|
| 2550 | if (*len < vnet_hdr_sz) | 
|---|
| 2551 | return -EINVAL; | 
|---|
| 2552 | *len -= vnet_hdr_sz; | 
|---|
| 2553 |  | 
|---|
| 2554 | if (!copy_from_iter_full(addr: vnet_hdr, bytes: sizeof(*vnet_hdr), i: &msg->msg_iter)) | 
|---|
| 2555 | return -EFAULT; | 
|---|
| 2556 |  | 
|---|
| 2557 | ret = __packet_snd_vnet_parse(vnet_hdr, len: *len); | 
|---|
| 2558 | if (ret) | 
|---|
| 2559 | return ret; | 
|---|
| 2560 |  | 
|---|
| 2561 | /* move iter to point to the start of mac header */ | 
|---|
| 2562 | if (vnet_hdr_sz != sizeof(struct virtio_net_hdr)) | 
|---|
| 2563 | iov_iter_advance(i: &msg->msg_iter, bytes: vnet_hdr_sz - sizeof(struct virtio_net_hdr)); | 
|---|
| 2564 |  | 
|---|
| 2565 | return 0; | 
|---|
| 2566 | } | 
|---|
| 2567 |  | 
|---|
| 2568 | static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | 
|---|
| 2569 | void *frame, struct net_device *dev, void *data, int tp_len, | 
|---|
| 2570 | __be16 proto, unsigned char *addr, int hlen, int copylen, | 
|---|
| 2571 | const struct sockcm_cookie *sockc) | 
|---|
| 2572 | { | 
|---|
| 2573 | union tpacket_uhdr ph; | 
|---|
| 2574 | int to_write, offset, len, nr_frags, len_max; | 
|---|
| 2575 | struct socket *sock = po->sk.sk_socket; | 
|---|
| 2576 | struct page *page; | 
|---|
| 2577 | int err; | 
|---|
| 2578 |  | 
|---|
| 2579 | ph.raw = frame; | 
|---|
| 2580 |  | 
|---|
| 2581 | skb->protocol = proto; | 
|---|
| 2582 | skb->dev = dev; | 
|---|
| 2583 | skb->priority = sockc->priority; | 
|---|
| 2584 | skb->mark = sockc->mark; | 
|---|
| 2585 | skb_set_delivery_type_by_clockid(skb, kt: sockc->transmit_time, clockid: po->sk.sk_clockid); | 
|---|
| 2586 | skb_setup_tx_timestamp(skb, sockc); | 
|---|
| 2587 | skb_zcopy_set_nouarg(skb, val: ph.raw); | 
|---|
| 2588 |  | 
|---|
| 2589 | skb_reserve(skb, len: hlen); | 
|---|
| 2590 | skb_reset_network_header(skb); | 
|---|
| 2591 |  | 
|---|
| 2592 | to_write = tp_len; | 
|---|
| 2593 |  | 
|---|
| 2594 | if (sock->type == SOCK_DGRAM) { | 
|---|
| 2595 | err = dev_hard_header(skb, dev, ntohs(proto), daddr: addr, | 
|---|
| 2596 | NULL, len: tp_len); | 
|---|
| 2597 | if (unlikely(err < 0)) | 
|---|
| 2598 | return -EINVAL; | 
|---|
| 2599 | } else if (copylen) { | 
|---|
| 2600 | int hdrlen = min_t(int, copylen, tp_len); | 
|---|
| 2601 |  | 
|---|
| 2602 | skb_push(skb, len: dev->hard_header_len); | 
|---|
| 2603 | skb_put(skb, len: copylen - dev->hard_header_len); | 
|---|
| 2604 | err = skb_store_bits(skb, offset: 0, from: data, len: hdrlen); | 
|---|
| 2605 | if (unlikely(err)) | 
|---|
| 2606 | return err; | 
|---|
| 2607 | if (!dev_validate_header(dev, ll_header: skb->data, len: hdrlen)) | 
|---|
| 2608 | return -EINVAL; | 
|---|
| 2609 |  | 
|---|
| 2610 | data += hdrlen; | 
|---|
| 2611 | to_write -= hdrlen; | 
|---|
| 2612 | } | 
|---|
| 2613 |  | 
|---|
| 2614 | offset = offset_in_page(data); | 
|---|
| 2615 | len_max = PAGE_SIZE - offset; | 
|---|
| 2616 | len = ((to_write > len_max) ? len_max : to_write); | 
|---|
| 2617 |  | 
|---|
| 2618 | skb->data_len = to_write; | 
|---|
| 2619 | skb->len += to_write; | 
|---|
| 2620 | skb->truesize += to_write; | 
|---|
| 2621 | refcount_add(i: to_write, r: &po->sk.sk_wmem_alloc); | 
|---|
| 2622 |  | 
|---|
| 2623 | while (likely(to_write)) { | 
|---|
| 2624 | nr_frags = skb_shinfo(skb)->nr_frags; | 
|---|
| 2625 |  | 
|---|
| 2626 | if (unlikely(nr_frags >= MAX_SKB_FRAGS)) { | 
|---|
| 2627 | pr_err( "Packet exceed the number of skb frags(%u)\n", | 
|---|
| 2628 | (unsigned int)MAX_SKB_FRAGS); | 
|---|
| 2629 | return -EFAULT; | 
|---|
| 2630 | } | 
|---|
| 2631 |  | 
|---|
| 2632 | page = pgv_to_page(addr: data); | 
|---|
| 2633 | data += len; | 
|---|
| 2634 | flush_dcache_page(page); | 
|---|
| 2635 | get_page(page); | 
|---|
| 2636 | skb_fill_page_desc(skb, i: nr_frags, page, off: offset, size: len); | 
|---|
| 2637 | to_write -= len; | 
|---|
| 2638 | offset = 0; | 
|---|
| 2639 | len_max = PAGE_SIZE; | 
|---|
| 2640 | len = ((to_write > len_max) ? len_max : to_write); | 
|---|
| 2641 | } | 
|---|
| 2642 |  | 
|---|
| 2643 | packet_parse_headers(skb, sock); | 
|---|
| 2644 |  | 
|---|
| 2645 | return tp_len; | 
|---|
| 2646 | } | 
|---|
| 2647 |  | 
|---|
| 2648 | static int (struct packet_sock *po, void *frame, | 
|---|
| 2649 | int size_max, void **data) | 
|---|
| 2650 | { | 
|---|
| 2651 | union tpacket_uhdr ph; | 
|---|
| 2652 | int tp_len, off; | 
|---|
| 2653 |  | 
|---|
| 2654 | ph.raw = frame; | 
|---|
| 2655 |  | 
|---|
| 2656 | switch (po->tp_version) { | 
|---|
| 2657 | case TPACKET_V3: | 
|---|
| 2658 | if (ph.h3->tp_next_offset != 0) { | 
|---|
| 2659 | pr_warn_once( "variable sized slot not supported"); | 
|---|
| 2660 | return -EINVAL; | 
|---|
| 2661 | } | 
|---|
| 2662 | tp_len = ph.h3->tp_len; | 
|---|
| 2663 | break; | 
|---|
| 2664 | case TPACKET_V2: | 
|---|
| 2665 | tp_len = ph.h2->tp_len; | 
|---|
| 2666 | break; | 
|---|
| 2667 | default: | 
|---|
| 2668 | tp_len = ph.h1->tp_len; | 
|---|
| 2669 | break; | 
|---|
| 2670 | } | 
|---|
| 2671 | if (unlikely(tp_len > size_max)) { | 
|---|
| 2672 | pr_err( "packet size is too long (%d > %d)\n", tp_len, size_max); | 
|---|
| 2673 | return -EMSGSIZE; | 
|---|
| 2674 | } | 
|---|
| 2675 |  | 
|---|
| 2676 | if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) { | 
|---|
| 2677 | int off_min, off_max; | 
|---|
| 2678 |  | 
|---|
| 2679 | off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); | 
|---|
| 2680 | off_max = po->tx_ring.frame_size - tp_len; | 
|---|
| 2681 | if (po->sk.sk_type == SOCK_DGRAM) { | 
|---|
| 2682 | switch (po->tp_version) { | 
|---|
| 2683 | case TPACKET_V3: | 
|---|
| 2684 | off = ph.h3->tp_net; | 
|---|
| 2685 | break; | 
|---|
| 2686 | case TPACKET_V2: | 
|---|
| 2687 | off = ph.h2->tp_net; | 
|---|
| 2688 | break; | 
|---|
| 2689 | default: | 
|---|
| 2690 | off = ph.h1->tp_net; | 
|---|
| 2691 | break; | 
|---|
| 2692 | } | 
|---|
| 2693 | } else { | 
|---|
| 2694 | switch (po->tp_version) { | 
|---|
| 2695 | case TPACKET_V3: | 
|---|
| 2696 | off = ph.h3->tp_mac; | 
|---|
| 2697 | break; | 
|---|
| 2698 | case TPACKET_V2: | 
|---|
| 2699 | off = ph.h2->tp_mac; | 
|---|
| 2700 | break; | 
|---|
| 2701 | default: | 
|---|
| 2702 | off = ph.h1->tp_mac; | 
|---|
| 2703 | break; | 
|---|
| 2704 | } | 
|---|
| 2705 | } | 
|---|
| 2706 | if (unlikely((off < off_min) || (off_max < off))) | 
|---|
| 2707 | return -EINVAL; | 
|---|
| 2708 | } else { | 
|---|
| 2709 | off = po->tp_hdrlen - sizeof(struct sockaddr_ll); | 
|---|
| 2710 | } | 
|---|
| 2711 |  | 
|---|
| 2712 | *data = frame + off; | 
|---|
| 2713 | return tp_len; | 
|---|
| 2714 | } | 
|---|
| 2715 |  | 
|---|
| 2716 | static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | 
|---|
| 2717 | { | 
|---|
| 2718 | struct sk_buff *skb = NULL; | 
|---|
| 2719 | struct net_device *dev; | 
|---|
| 2720 | struct virtio_net_hdr *vnet_hdr = NULL; | 
|---|
| 2721 | struct sockcm_cookie sockc; | 
|---|
| 2722 | __be16 proto; | 
|---|
| 2723 | int err, reserve = 0; | 
|---|
| 2724 | void *ph; | 
|---|
| 2725 | DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); | 
|---|
| 2726 | bool need_wait = !(msg->msg_flags & MSG_DONTWAIT); | 
|---|
| 2727 | int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); | 
|---|
| 2728 | unsigned char *addr = NULL; | 
|---|
| 2729 | int tp_len, size_max; | 
|---|
| 2730 | void *data; | 
|---|
| 2731 | int len_sum = 0; | 
|---|
| 2732 | int status = TP_STATUS_AVAILABLE; | 
|---|
| 2733 | int hlen, tlen, copylen = 0; | 
|---|
| 2734 | long timeo; | 
|---|
| 2735 |  | 
|---|
| 2736 | mutex_lock(lock: &po->pg_vec_lock); | 
|---|
| 2737 |  | 
|---|
| 2738 | /* packet_sendmsg() check on tx_ring.pg_vec was lockless, | 
|---|
| 2739 | * we need to confirm it under protection of pg_vec_lock. | 
|---|
| 2740 | */ | 
|---|
| 2741 | if (unlikely(!po->tx_ring.pg_vec)) { | 
|---|
| 2742 | err = -EBUSY; | 
|---|
| 2743 | goto out; | 
|---|
| 2744 | } | 
|---|
| 2745 | if (likely(saddr == NULL)) { | 
|---|
| 2746 | dev	= packet_cached_dev_get(po); | 
|---|
| 2747 | proto	= READ_ONCE(po->num); | 
|---|
| 2748 | } else { | 
|---|
| 2749 | err = -EINVAL; | 
|---|
| 2750 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) | 
|---|
| 2751 | goto out; | 
|---|
| 2752 | if (msg->msg_namelen < (saddr->sll_halen | 
|---|
| 2753 | + offsetof(struct sockaddr_ll, | 
|---|
| 2754 | sll_addr))) | 
|---|
| 2755 | goto out; | 
|---|
| 2756 | proto	= saddr->sll_protocol; | 
|---|
| 2757 | dev = dev_get_by_index(net: sock_net(sk: &po->sk), ifindex: saddr->sll_ifindex); | 
|---|
| 2758 | if (po->sk.sk_socket->type == SOCK_DGRAM) { | 
|---|
| 2759 | if (dev && msg->msg_namelen < dev->addr_len + | 
|---|
| 2760 | offsetof(struct sockaddr_ll, sll_addr)) | 
|---|
| 2761 | goto out_put; | 
|---|
| 2762 | addr = saddr->sll_addr; | 
|---|
| 2763 | } | 
|---|
| 2764 | } | 
|---|
| 2765 |  | 
|---|
| 2766 | err = -ENXIO; | 
|---|
| 2767 | if (unlikely(dev == NULL)) | 
|---|
| 2768 | goto out; | 
|---|
| 2769 | err = -ENETDOWN; | 
|---|
| 2770 | if (unlikely(!(dev->flags & IFF_UP))) | 
|---|
| 2771 | goto out_put; | 
|---|
| 2772 |  | 
|---|
| 2773 | sockcm_init(sockc: &sockc, sk: &po->sk); | 
|---|
| 2774 | if (msg->msg_controllen) { | 
|---|
| 2775 | err = sock_cmsg_send(sk: &po->sk, msg, sockc: &sockc); | 
|---|
| 2776 | if (unlikely(err)) | 
|---|
| 2777 | goto out_put; | 
|---|
| 2778 | } | 
|---|
| 2779 |  | 
|---|
| 2780 | if (po->sk.sk_socket->type == SOCK_RAW) | 
|---|
| 2781 | reserve = dev->hard_header_len; | 
|---|
| 2782 | size_max = po->tx_ring.frame_size | 
|---|
| 2783 | - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); | 
|---|
| 2784 |  | 
|---|
| 2785 | if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz) | 
|---|
| 2786 | size_max = dev->mtu + reserve + VLAN_HLEN; | 
|---|
| 2787 |  | 
|---|
| 2788 | timeo = sock_sndtimeo(sk: &po->sk, noblock: msg->msg_flags & MSG_DONTWAIT); | 
|---|
| 2789 | reinit_completion(x: &po->skb_completion); | 
|---|
| 2790 |  | 
|---|
| 2791 | do { | 
|---|
| 2792 | ph = packet_current_frame(po, rb: &po->tx_ring, | 
|---|
| 2793 | TP_STATUS_SEND_REQUEST); | 
|---|
| 2794 | if (unlikely(ph == NULL)) { | 
|---|
| 2795 | /* Note: packet_read_pending() might be slow if we | 
|---|
| 2796 | * have to call it as it's per_cpu variable, but in | 
|---|
| 2797 | * fast-path we don't have to call it, only when ph | 
|---|
| 2798 | * is NULL, we need to check the pending_refcnt. | 
|---|
| 2799 | */ | 
|---|
| 2800 | if (need_wait && packet_read_pending(rb: &po->tx_ring)) { | 
|---|
| 2801 | timeo = wait_for_completion_interruptible_timeout(x: &po->skb_completion, timeout: timeo); | 
|---|
| 2802 | if (timeo <= 0) { | 
|---|
| 2803 | err = !timeo ? -ETIMEDOUT : -ERESTARTSYS; | 
|---|
| 2804 | goto out_put; | 
|---|
| 2805 | } | 
|---|
| 2806 | /* check for additional frames */ | 
|---|
| 2807 | continue; | 
|---|
| 2808 | } else | 
|---|
| 2809 | break; | 
|---|
| 2810 | } | 
|---|
| 2811 |  | 
|---|
| 2812 | skb = NULL; | 
|---|
| 2813 | tp_len = tpacket_parse_header(po, frame: ph, size_max, data: &data); | 
|---|
| 2814 | if (tp_len < 0) | 
|---|
| 2815 | goto tpacket_error; | 
|---|
| 2816 |  | 
|---|
| 2817 | status = TP_STATUS_SEND_REQUEST; | 
|---|
| 2818 | hlen = LL_RESERVED_SPACE(dev); | 
|---|
| 2819 | tlen = dev->needed_tailroom; | 
|---|
| 2820 | if (vnet_hdr_sz) { | 
|---|
| 2821 | vnet_hdr = data; | 
|---|
| 2822 | data += vnet_hdr_sz; | 
|---|
| 2823 | tp_len -= vnet_hdr_sz; | 
|---|
| 2824 | if (tp_len < 0 || | 
|---|
| 2825 | __packet_snd_vnet_parse(vnet_hdr, len: tp_len)) { | 
|---|
| 2826 | tp_len = -EINVAL; | 
|---|
| 2827 | goto tpacket_error; | 
|---|
| 2828 | } | 
|---|
| 2829 | copylen = __virtio16_to_cpu(vio_le(), | 
|---|
| 2830 | val: vnet_hdr->hdr_len); | 
|---|
| 2831 | } | 
|---|
| 2832 | copylen = max_t(int, copylen, dev->hard_header_len); | 
|---|
| 2833 | skb = sock_alloc_send_skb(sk: &po->sk, | 
|---|
| 2834 | size: hlen + tlen + sizeof(struct sockaddr_ll) + | 
|---|
| 2835 | (copylen - dev->hard_header_len), | 
|---|
| 2836 | noblock: !need_wait, errcode: &err); | 
|---|
| 2837 |  | 
|---|
| 2838 | if (unlikely(skb == NULL)) { | 
|---|
| 2839 | /* we assume the socket was initially writeable ... */ | 
|---|
| 2840 | if (likely(len_sum > 0)) | 
|---|
| 2841 | err = len_sum; | 
|---|
| 2842 | goto out_status; | 
|---|
| 2843 | } | 
|---|
| 2844 | tp_len = tpacket_fill_skb(po, skb, frame: ph, dev, data, tp_len, proto, | 
|---|
| 2845 | addr, hlen, copylen, sockc: &sockc); | 
|---|
| 2846 | if (likely(tp_len >= 0) && | 
|---|
| 2847 | tp_len > dev->mtu + reserve && | 
|---|
| 2848 | !vnet_hdr_sz && | 
|---|
| 2849 | !packet_extra_vlan_len_allowed(dev, skb)) | 
|---|
| 2850 | tp_len = -EMSGSIZE; | 
|---|
| 2851 |  | 
|---|
| 2852 | if (unlikely(tp_len < 0)) { | 
|---|
| 2853 | tpacket_error: | 
|---|
| 2854 | if (packet_sock_flag(po, flag: PACKET_SOCK_TP_LOSS)) { | 
|---|
| 2855 | __packet_set_status(po, frame: ph, | 
|---|
| 2856 | TP_STATUS_AVAILABLE); | 
|---|
| 2857 | packet_increment_head(buff: &po->tx_ring); | 
|---|
| 2858 | kfree_skb(skb); | 
|---|
| 2859 | continue; | 
|---|
| 2860 | } else { | 
|---|
| 2861 | status = TP_STATUS_WRONG_FORMAT; | 
|---|
| 2862 | err = tp_len; | 
|---|
| 2863 | goto out_status; | 
|---|
| 2864 | } | 
|---|
| 2865 | } | 
|---|
| 2866 |  | 
|---|
| 2867 | if (vnet_hdr_sz) { | 
|---|
| 2868 | if (virtio_net_hdr_to_skb(skb, hdr: vnet_hdr, vio_le())) { | 
|---|
| 2869 | tp_len = -EINVAL; | 
|---|
| 2870 | goto tpacket_error; | 
|---|
| 2871 | } | 
|---|
| 2872 | virtio_net_hdr_set_proto(skb, hdr: vnet_hdr); | 
|---|
| 2873 | } | 
|---|
| 2874 |  | 
|---|
| 2875 | skb->destructor = tpacket_destruct_skb; | 
|---|
| 2876 | __packet_set_status(po, frame: ph, TP_STATUS_SENDING); | 
|---|
| 2877 | packet_inc_pending(rb: &po->tx_ring); | 
|---|
| 2878 |  | 
|---|
| 2879 | status = TP_STATUS_SEND_REQUEST; | 
|---|
| 2880 | err = packet_xmit(po, skb); | 
|---|
| 2881 | if (unlikely(err != 0)) { | 
|---|
| 2882 | if (err > 0) | 
|---|
| 2883 | err = net_xmit_errno(err); | 
|---|
| 2884 | if (err && __packet_get_status(po, frame: ph) == | 
|---|
| 2885 | TP_STATUS_AVAILABLE) { | 
|---|
| 2886 | /* skb was destructed already */ | 
|---|
| 2887 | skb = NULL; | 
|---|
| 2888 | goto out_status; | 
|---|
| 2889 | } | 
|---|
| 2890 | /* | 
|---|
| 2891 | * skb was dropped but not destructed yet; | 
|---|
| 2892 | * let's treat it like congestion or err < 0 | 
|---|
| 2893 | */ | 
|---|
| 2894 | err = 0; | 
|---|
| 2895 | } | 
|---|
| 2896 | packet_increment_head(buff: &po->tx_ring); | 
|---|
| 2897 | len_sum += tp_len; | 
|---|
| 2898 | } while (1); | 
|---|
| 2899 |  | 
|---|
| 2900 | err = len_sum; | 
|---|
| 2901 | goto out_put; | 
|---|
| 2902 |  | 
|---|
| 2903 | out_status: | 
|---|
| 2904 | __packet_set_status(po, frame: ph, status); | 
|---|
| 2905 | kfree_skb(skb); | 
|---|
| 2906 | out_put: | 
|---|
| 2907 | dev_put(dev); | 
|---|
| 2908 | out: | 
|---|
| 2909 | mutex_unlock(lock: &po->pg_vec_lock); | 
|---|
| 2910 | return err; | 
|---|
| 2911 | } | 
|---|
| 2912 |  | 
|---|
| 2913 | static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, | 
|---|
| 2914 | size_t reserve, size_t len, | 
|---|
| 2915 | size_t linear, int noblock, | 
|---|
| 2916 | int *err) | 
|---|
| 2917 | { | 
|---|
| 2918 | struct sk_buff *skb; | 
|---|
| 2919 |  | 
|---|
| 2920 | /* Under a page?  Don't bother with paged skb. */ | 
|---|
| 2921 | if (prepad + len < PAGE_SIZE || !linear) | 
|---|
| 2922 | linear = len; | 
|---|
| 2923 |  | 
|---|
| 2924 | if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) | 
|---|
| 2925 | linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); | 
|---|
| 2926 | skb = sock_alloc_send_pskb(sk, header_len: prepad + linear, data_len: len - linear, noblock, | 
|---|
| 2927 | errcode: err, PAGE_ALLOC_COSTLY_ORDER); | 
|---|
| 2928 | if (!skb) | 
|---|
| 2929 | return NULL; | 
|---|
| 2930 |  | 
|---|
| 2931 | skb_reserve(skb, len: reserve); | 
|---|
| 2932 | skb_put(skb, len: linear); | 
|---|
| 2933 | skb->data_len = len - linear; | 
|---|
| 2934 | skb->len += len - linear; | 
|---|
| 2935 |  | 
|---|
| 2936 | return skb; | 
|---|
| 2937 | } | 
|---|
| 2938 |  | 
|---|
| 2939 | static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | 
|---|
| 2940 | { | 
|---|
| 2941 | struct sock *sk = sock->sk; | 
|---|
| 2942 | DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name); | 
|---|
| 2943 | struct sk_buff *skb; | 
|---|
| 2944 | struct net_device *dev; | 
|---|
| 2945 | __be16 proto; | 
|---|
| 2946 | unsigned char *addr = NULL; | 
|---|
| 2947 | int err, reserve = 0; | 
|---|
| 2948 | struct sockcm_cookie sockc; | 
|---|
| 2949 | struct virtio_net_hdr vnet_hdr = { 0 }; | 
|---|
| 2950 | int offset = 0; | 
|---|
| 2951 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 2952 | int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz); | 
|---|
| 2953 | int hlen, tlen, linear; | 
|---|
| 2954 | int  = 0; | 
|---|
| 2955 |  | 
|---|
| 2956 | /* | 
|---|
| 2957 | *	Get and verify the address. | 
|---|
| 2958 | */ | 
|---|
| 2959 |  | 
|---|
| 2960 | if (likely(saddr == NULL)) { | 
|---|
| 2961 | dev	= packet_cached_dev_get(po); | 
|---|
| 2962 | proto	= READ_ONCE(po->num); | 
|---|
| 2963 | } else { | 
|---|
| 2964 | err = -EINVAL; | 
|---|
| 2965 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) | 
|---|
| 2966 | goto out; | 
|---|
| 2967 | if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) | 
|---|
| 2968 | goto out; | 
|---|
| 2969 | proto	= saddr->sll_protocol; | 
|---|
| 2970 | dev = dev_get_by_index(net: sock_net(sk), ifindex: saddr->sll_ifindex); | 
|---|
| 2971 | if (sock->type == SOCK_DGRAM) { | 
|---|
| 2972 | if (dev && msg->msg_namelen < dev->addr_len + | 
|---|
| 2973 | offsetof(struct sockaddr_ll, sll_addr)) | 
|---|
| 2974 | goto out_unlock; | 
|---|
| 2975 | addr = saddr->sll_addr; | 
|---|
| 2976 | } | 
|---|
| 2977 | } | 
|---|
| 2978 |  | 
|---|
| 2979 | err = -ENXIO; | 
|---|
| 2980 | if (unlikely(dev == NULL)) | 
|---|
| 2981 | goto out_unlock; | 
|---|
| 2982 | err = -ENETDOWN; | 
|---|
| 2983 | if (unlikely(!(dev->flags & IFF_UP))) | 
|---|
| 2984 | goto out_unlock; | 
|---|
| 2985 |  | 
|---|
| 2986 | sockcm_init(sockc: &sockc, sk); | 
|---|
| 2987 | if (msg->msg_controllen) { | 
|---|
| 2988 | err = sock_cmsg_send(sk, msg, sockc: &sockc); | 
|---|
| 2989 | if (unlikely(err)) | 
|---|
| 2990 | goto out_unlock; | 
|---|
| 2991 | } | 
|---|
| 2992 |  | 
|---|
| 2993 | if (sock->type == SOCK_RAW) | 
|---|
| 2994 | reserve = dev->hard_header_len; | 
|---|
| 2995 | if (vnet_hdr_sz) { | 
|---|
| 2996 | err = packet_snd_vnet_parse(msg, len: &len, vnet_hdr: &vnet_hdr, vnet_hdr_sz); | 
|---|
| 2997 | if (err) | 
|---|
| 2998 | goto out_unlock; | 
|---|
| 2999 | } | 
|---|
| 3000 |  | 
|---|
| 3001 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { | 
|---|
| 3002 | if (!netif_supports_nofcs(dev)) { | 
|---|
| 3003 | err = -EPROTONOSUPPORT; | 
|---|
| 3004 | goto out_unlock; | 
|---|
| 3005 | } | 
|---|
| 3006 | extra_len = 4; /* We're doing our own CRC */ | 
|---|
| 3007 | } | 
|---|
| 3008 |  | 
|---|
| 3009 | err = -EMSGSIZE; | 
|---|
| 3010 | if (!vnet_hdr.gso_type && | 
|---|
| 3011 | (len > dev->mtu + reserve + VLAN_HLEN + extra_len)) | 
|---|
| 3012 | goto out_unlock; | 
|---|
| 3013 |  | 
|---|
| 3014 | err = -ENOBUFS; | 
|---|
| 3015 | hlen = LL_RESERVED_SPACE(dev); | 
|---|
| 3016 | tlen = dev->needed_tailroom; | 
|---|
| 3017 | linear = __virtio16_to_cpu(vio_le(), val: vnet_hdr.hdr_len); | 
|---|
| 3018 | linear = max(linear, min_t(int, len, dev->hard_header_len)); | 
|---|
| 3019 | skb = packet_alloc_skb(sk, prepad: hlen + tlen, reserve: hlen, len, linear, | 
|---|
| 3020 | noblock: msg->msg_flags & MSG_DONTWAIT, err: &err); | 
|---|
| 3021 | if (skb == NULL) | 
|---|
| 3022 | goto out_unlock; | 
|---|
| 3023 |  | 
|---|
| 3024 | skb_reset_network_header(skb); | 
|---|
| 3025 |  | 
|---|
| 3026 | err = -EINVAL; | 
|---|
| 3027 | if (sock->type == SOCK_DGRAM) { | 
|---|
| 3028 | offset = dev_hard_header(skb, dev, ntohs(proto), daddr: addr, NULL, len); | 
|---|
| 3029 | if (unlikely(offset < 0)) | 
|---|
| 3030 | goto out_free; | 
|---|
| 3031 | } else if (reserve) { | 
|---|
| 3032 | skb_reserve(skb, len: -reserve); | 
|---|
| 3033 | if (len < reserve + sizeof(struct ipv6hdr) && | 
|---|
| 3034 | dev->min_header_len != dev->hard_header_len) | 
|---|
| 3035 | skb_reset_network_header(skb); | 
|---|
| 3036 | } | 
|---|
| 3037 |  | 
|---|
| 3038 | /* Returns -EFAULT on error */ | 
|---|
| 3039 | err = skb_copy_datagram_from_iter(skb, offset, from: &msg->msg_iter, len); | 
|---|
| 3040 | if (err) | 
|---|
| 3041 | goto out_free; | 
|---|
| 3042 |  | 
|---|
| 3043 | if ((sock->type == SOCK_RAW && | 
|---|
| 3044 | !dev_validate_header(dev, ll_header: skb->data, len)) || !skb->len) { | 
|---|
| 3045 | err = -EINVAL; | 
|---|
| 3046 | goto out_free; | 
|---|
| 3047 | } | 
|---|
| 3048 |  | 
|---|
| 3049 | skb_setup_tx_timestamp(skb, sockc: &sockc); | 
|---|
| 3050 |  | 
|---|
| 3051 | if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && | 
|---|
| 3052 | !packet_extra_vlan_len_allowed(dev, skb)) { | 
|---|
| 3053 | err = -EMSGSIZE; | 
|---|
| 3054 | goto out_free; | 
|---|
| 3055 | } | 
|---|
| 3056 |  | 
|---|
| 3057 | skb->protocol = proto; | 
|---|
| 3058 | skb->dev = dev; | 
|---|
| 3059 | skb->priority = sockc.priority; | 
|---|
| 3060 | skb->mark = sockc.mark; | 
|---|
| 3061 | skb_set_delivery_type_by_clockid(skb, kt: sockc.transmit_time, clockid: sk->sk_clockid); | 
|---|
| 3062 |  | 
|---|
| 3063 | if (unlikely(extra_len == 4)) | 
|---|
| 3064 | skb->no_fcs = 1; | 
|---|
| 3065 |  | 
|---|
| 3066 | packet_parse_headers(skb, sock); | 
|---|
| 3067 |  | 
|---|
| 3068 | if (vnet_hdr_sz) { | 
|---|
| 3069 | err = virtio_net_hdr_to_skb(skb, hdr: &vnet_hdr, vio_le()); | 
|---|
| 3070 | if (err) | 
|---|
| 3071 | goto out_free; | 
|---|
| 3072 | len += vnet_hdr_sz; | 
|---|
| 3073 | virtio_net_hdr_set_proto(skb, hdr: &vnet_hdr); | 
|---|
| 3074 | } | 
|---|
| 3075 |  | 
|---|
| 3076 | err = packet_xmit(po, skb); | 
|---|
| 3077 |  | 
|---|
| 3078 | if (unlikely(err != 0)) { | 
|---|
| 3079 | if (err > 0) | 
|---|
| 3080 | err = net_xmit_errno(err); | 
|---|
| 3081 | if (err) | 
|---|
| 3082 | goto out_unlock; | 
|---|
| 3083 | } | 
|---|
| 3084 |  | 
|---|
| 3085 | dev_put(dev); | 
|---|
| 3086 |  | 
|---|
| 3087 | return len; | 
|---|
| 3088 |  | 
|---|
| 3089 | out_free: | 
|---|
| 3090 | kfree_skb(skb); | 
|---|
| 3091 | out_unlock: | 
|---|
| 3092 | dev_put(dev); | 
|---|
| 3093 | out: | 
|---|
| 3094 | return err; | 
|---|
| 3095 | } | 
|---|
| 3096 |  | 
|---|
| 3097 | static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | 
|---|
| 3098 | { | 
|---|
| 3099 | struct sock *sk = sock->sk; | 
|---|
| 3100 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 3101 |  | 
|---|
| 3102 | /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy. | 
|---|
| 3103 | * tpacket_snd() will redo the check safely. | 
|---|
| 3104 | */ | 
|---|
| 3105 | if (data_race(po->tx_ring.pg_vec)) | 
|---|
| 3106 | return tpacket_snd(po, msg); | 
|---|
| 3107 |  | 
|---|
| 3108 | return packet_snd(sock, msg, len); | 
|---|
| 3109 | } | 
|---|
| 3110 |  | 
|---|
| 3111 | /* | 
|---|
| 3112 | *	Close a PACKET socket. This is fairly simple. We immediately go | 
|---|
| 3113 | *	to 'closed' state and remove our protocol entry in the device list. | 
|---|
| 3114 | */ | 
|---|
| 3115 |  | 
|---|
| 3116 | static int packet_release(struct socket *sock) | 
|---|
| 3117 | { | 
|---|
| 3118 | struct sock *sk = sock->sk; | 
|---|
| 3119 | struct packet_sock *po; | 
|---|
| 3120 | struct packet_fanout *f; | 
|---|
| 3121 | struct net *net; | 
|---|
| 3122 | union tpacket_req_u req_u; | 
|---|
| 3123 |  | 
|---|
| 3124 | if (!sk) | 
|---|
| 3125 | return 0; | 
|---|
| 3126 |  | 
|---|
| 3127 | net = sock_net(sk); | 
|---|
| 3128 | po = pkt_sk(sk); | 
|---|
| 3129 |  | 
|---|
| 3130 | mutex_lock(lock: &net->packet.sklist_lock); | 
|---|
| 3131 | sk_del_node_init_rcu(sk); | 
|---|
| 3132 | mutex_unlock(lock: &net->packet.sklist_lock); | 
|---|
| 3133 |  | 
|---|
| 3134 | sock_prot_inuse_add(net, prot: sk->sk_prot, val: -1); | 
|---|
| 3135 |  | 
|---|
| 3136 | spin_lock(lock: &po->bind_lock); | 
|---|
| 3137 | unregister_prot_hook(sk, sync: false); | 
|---|
| 3138 | packet_cached_dev_reset(po); | 
|---|
| 3139 |  | 
|---|
| 3140 | if (po->prot_hook.dev) { | 
|---|
| 3141 | netdev_put(dev: po->prot_hook.dev, tracker: &po->prot_hook.dev_tracker); | 
|---|
| 3142 | po->prot_hook.dev = NULL; | 
|---|
| 3143 | } | 
|---|
| 3144 | spin_unlock(lock: &po->bind_lock); | 
|---|
| 3145 |  | 
|---|
| 3146 | packet_flush_mclist(sk); | 
|---|
| 3147 |  | 
|---|
| 3148 | lock_sock(sk); | 
|---|
| 3149 | if (po->rx_ring.pg_vec) { | 
|---|
| 3150 | memset(s: &req_u, c: 0, n: sizeof(req_u)); | 
|---|
| 3151 | packet_set_ring(sk, req_u: &req_u, closing: 1, tx_ring: 0); | 
|---|
| 3152 | } | 
|---|
| 3153 |  | 
|---|
| 3154 | if (po->tx_ring.pg_vec) { | 
|---|
| 3155 | memset(s: &req_u, c: 0, n: sizeof(req_u)); | 
|---|
| 3156 | packet_set_ring(sk, req_u: &req_u, closing: 1, tx_ring: 1); | 
|---|
| 3157 | } | 
|---|
| 3158 | release_sock(sk); | 
|---|
| 3159 |  | 
|---|
| 3160 | f = fanout_release(sk); | 
|---|
| 3161 |  | 
|---|
| 3162 | synchronize_net(); | 
|---|
| 3163 |  | 
|---|
| 3164 | kfree(objp: po->rollover); | 
|---|
| 3165 | if (f) { | 
|---|
| 3166 | fanout_release_data(f); | 
|---|
| 3167 | kvfree(addr: f); | 
|---|
| 3168 | } | 
|---|
| 3169 | /* | 
|---|
| 3170 | *	Now the socket is dead. No more input will appear. | 
|---|
| 3171 | */ | 
|---|
| 3172 | sock_orphan(sk); | 
|---|
| 3173 | sock->sk = NULL; | 
|---|
| 3174 |  | 
|---|
| 3175 | /* Purge queues */ | 
|---|
| 3176 |  | 
|---|
| 3177 | skb_queue_purge(list: &sk->sk_receive_queue); | 
|---|
| 3178 | packet_free_pending(po); | 
|---|
| 3179 |  | 
|---|
| 3180 | sock_put(sk); | 
|---|
| 3181 | return 0; | 
|---|
| 3182 | } | 
|---|
| 3183 |  | 
|---|
| 3184 | /* | 
|---|
| 3185 | *	Attach a packet hook. | 
|---|
| 3186 | */ | 
|---|
| 3187 |  | 
|---|
| 3188 | static int packet_do_bind(struct sock *sk, const char *name, int ifindex, | 
|---|
| 3189 | __be16 proto) | 
|---|
| 3190 | { | 
|---|
| 3191 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 3192 | struct net_device *dev = NULL; | 
|---|
| 3193 | bool unlisted = false; | 
|---|
| 3194 | bool need_rehook; | 
|---|
| 3195 | int ret = 0; | 
|---|
| 3196 |  | 
|---|
| 3197 | lock_sock(sk); | 
|---|
| 3198 | spin_lock(lock: &po->bind_lock); | 
|---|
| 3199 | if (!proto) | 
|---|
| 3200 | proto = po->num; | 
|---|
| 3201 |  | 
|---|
| 3202 | rcu_read_lock(); | 
|---|
| 3203 |  | 
|---|
| 3204 | if (po->fanout) { | 
|---|
| 3205 | ret = -EINVAL; | 
|---|
| 3206 | goto out_unlock; | 
|---|
| 3207 | } | 
|---|
| 3208 |  | 
|---|
| 3209 | if (name) { | 
|---|
| 3210 | dev = dev_get_by_name_rcu(net: sock_net(sk), name); | 
|---|
| 3211 | if (!dev) { | 
|---|
| 3212 | ret = -ENODEV; | 
|---|
| 3213 | goto out_unlock; | 
|---|
| 3214 | } | 
|---|
| 3215 | } else if (ifindex) { | 
|---|
| 3216 | dev = dev_get_by_index_rcu(net: sock_net(sk), ifindex); | 
|---|
| 3217 | if (!dev) { | 
|---|
| 3218 | ret = -ENODEV; | 
|---|
| 3219 | goto out_unlock; | 
|---|
| 3220 | } | 
|---|
| 3221 | } | 
|---|
| 3222 |  | 
|---|
| 3223 | need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev; | 
|---|
| 3224 |  | 
|---|
| 3225 | if (need_rehook) { | 
|---|
| 3226 | dev_hold(dev); | 
|---|
| 3227 | if (packet_sock_flag(po, flag: PACKET_SOCK_RUNNING)) { | 
|---|
| 3228 | rcu_read_unlock(); | 
|---|
| 3229 | /* prevents packet_notifier() from calling | 
|---|
| 3230 | * register_prot_hook() | 
|---|
| 3231 | */ | 
|---|
| 3232 | WRITE_ONCE(po->num, 0); | 
|---|
| 3233 | __unregister_prot_hook(sk, sync: true); | 
|---|
| 3234 | rcu_read_lock(); | 
|---|
| 3235 | if (dev) | 
|---|
| 3236 | unlisted = !dev_get_by_index_rcu(net: sock_net(sk), | 
|---|
| 3237 | ifindex: dev->ifindex); | 
|---|
| 3238 | } | 
|---|
| 3239 |  | 
|---|
| 3240 | BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING)); | 
|---|
| 3241 | WRITE_ONCE(po->num, proto); | 
|---|
| 3242 | po->prot_hook.type = proto; | 
|---|
| 3243 |  | 
|---|
| 3244 | netdev_put(dev: po->prot_hook.dev, tracker: &po->prot_hook.dev_tracker); | 
|---|
| 3245 |  | 
|---|
| 3246 | if (unlikely(unlisted)) { | 
|---|
| 3247 | po->prot_hook.dev = NULL; | 
|---|
| 3248 | WRITE_ONCE(po->ifindex, -1); | 
|---|
| 3249 | packet_cached_dev_reset(po); | 
|---|
| 3250 | } else { | 
|---|
| 3251 | netdev_hold(dev, tracker: &po->prot_hook.dev_tracker, | 
|---|
| 3252 | GFP_ATOMIC); | 
|---|
| 3253 | po->prot_hook.dev = dev; | 
|---|
| 3254 | WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0); | 
|---|
| 3255 | packet_cached_dev_assign(po, dev); | 
|---|
| 3256 | } | 
|---|
| 3257 | dev_put(dev); | 
|---|
| 3258 | } | 
|---|
| 3259 |  | 
|---|
| 3260 | if (proto == 0 || !need_rehook) | 
|---|
| 3261 | goto out_unlock; | 
|---|
| 3262 |  | 
|---|
| 3263 | if (!unlisted && (!dev || (dev->flags & IFF_UP))) { | 
|---|
| 3264 | register_prot_hook(sk); | 
|---|
| 3265 | } else { | 
|---|
| 3266 | sk->sk_err = ENETDOWN; | 
|---|
| 3267 | if (!sock_flag(sk, flag: SOCK_DEAD)) | 
|---|
| 3268 | sk_error_report(sk); | 
|---|
| 3269 | } | 
|---|
| 3270 |  | 
|---|
| 3271 | out_unlock: | 
|---|
| 3272 | rcu_read_unlock(); | 
|---|
| 3273 | spin_unlock(lock: &po->bind_lock); | 
|---|
| 3274 | release_sock(sk); | 
|---|
| 3275 | return ret; | 
|---|
| 3276 | } | 
|---|
| 3277 |  | 
|---|
| 3278 | /* | 
|---|
| 3279 | *	Bind a packet socket to a device | 
|---|
| 3280 | */ | 
|---|
| 3281 |  | 
|---|
| 3282 | static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, | 
|---|
| 3283 | int addr_len) | 
|---|
| 3284 | { | 
|---|
| 3285 | struct sock *sk = sock->sk; | 
|---|
| 3286 | char name[sizeof(uaddr->sa_data_min) + 1]; | 
|---|
| 3287 |  | 
|---|
| 3288 | /* | 
|---|
| 3289 | *	Check legality | 
|---|
| 3290 | */ | 
|---|
| 3291 |  | 
|---|
| 3292 | if (addr_len != sizeof(struct sockaddr)) | 
|---|
| 3293 | return -EINVAL; | 
|---|
| 3294 | /* uaddr->sa_data comes from the userspace, it's not guaranteed to be | 
|---|
| 3295 | * zero-terminated. | 
|---|
| 3296 | */ | 
|---|
| 3297 | memcpy(to: name, from: uaddr->sa_data, len: sizeof(uaddr->sa_data_min)); | 
|---|
| 3298 | name[sizeof(uaddr->sa_data_min)] = 0; | 
|---|
| 3299 |  | 
|---|
| 3300 | return packet_do_bind(sk, name, ifindex: 0, proto: 0); | 
|---|
| 3301 | } | 
|---|
| 3302 |  | 
|---|
| 3303 | static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | 
|---|
| 3304 | { | 
|---|
| 3305 | struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr; | 
|---|
| 3306 | struct sock *sk = sock->sk; | 
|---|
| 3307 |  | 
|---|
| 3308 | /* | 
|---|
| 3309 | *	Check legality | 
|---|
| 3310 | */ | 
|---|
| 3311 |  | 
|---|
| 3312 | if (addr_len < sizeof(struct sockaddr_ll)) | 
|---|
| 3313 | return -EINVAL; | 
|---|
| 3314 | if (sll->sll_family != AF_PACKET) | 
|---|
| 3315 | return -EINVAL; | 
|---|
| 3316 |  | 
|---|
| 3317 | return packet_do_bind(sk, NULL, ifindex: sll->sll_ifindex, proto: sll->sll_protocol); | 
|---|
| 3318 | } | 
|---|
| 3319 |  | 
|---|
| 3320 | static struct proto packet_proto = { | 
|---|
| 3321 | .name	  = "PACKET", | 
|---|
| 3322 | .owner	  = THIS_MODULE, | 
|---|
| 3323 | .obj_size = sizeof(struct packet_sock), | 
|---|
| 3324 | }; | 
|---|
| 3325 |  | 
|---|
| 3326 | /* | 
|---|
| 3327 | *	Create a packet of type SOCK_PACKET. | 
|---|
| 3328 | */ | 
|---|
| 3329 |  | 
|---|
| 3330 | static int packet_create(struct net *net, struct socket *sock, int protocol, | 
|---|
| 3331 | int kern) | 
|---|
| 3332 | { | 
|---|
| 3333 | struct sock *sk; | 
|---|
| 3334 | struct packet_sock *po; | 
|---|
| 3335 | __be16 proto = (__force __be16)protocol; /* weird, but documented */ | 
|---|
| 3336 | int err; | 
|---|
| 3337 |  | 
|---|
| 3338 | if (!ns_capable(ns: net->user_ns, CAP_NET_RAW)) | 
|---|
| 3339 | return -EPERM; | 
|---|
| 3340 | if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && | 
|---|
| 3341 | sock->type != SOCK_PACKET) | 
|---|
| 3342 | return -ESOCKTNOSUPPORT; | 
|---|
| 3343 |  | 
|---|
| 3344 | sock->state = SS_UNCONNECTED; | 
|---|
| 3345 |  | 
|---|
| 3346 | err = -ENOBUFS; | 
|---|
| 3347 | sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, prot: &packet_proto, kern); | 
|---|
| 3348 | if (sk == NULL) | 
|---|
| 3349 | goto out; | 
|---|
| 3350 |  | 
|---|
| 3351 | sock->ops = &packet_ops; | 
|---|
| 3352 | if (sock->type == SOCK_PACKET) | 
|---|
| 3353 | sock->ops = &packet_ops_spkt; | 
|---|
| 3354 |  | 
|---|
| 3355 | po = pkt_sk(sk); | 
|---|
| 3356 | err = packet_alloc_pending(po); | 
|---|
| 3357 | if (err) | 
|---|
| 3358 | goto out_sk_free; | 
|---|
| 3359 |  | 
|---|
| 3360 | sock_init_data(sock, sk); | 
|---|
| 3361 |  | 
|---|
| 3362 | init_completion(x: &po->skb_completion); | 
|---|
| 3363 | sk->sk_family = PF_PACKET; | 
|---|
| 3364 | po->num = proto; | 
|---|
| 3365 |  | 
|---|
| 3366 | packet_cached_dev_reset(po); | 
|---|
| 3367 |  | 
|---|
| 3368 | sk->sk_destruct = packet_sock_destruct; | 
|---|
| 3369 |  | 
|---|
| 3370 | /* | 
|---|
| 3371 | *	Attach a protocol block | 
|---|
| 3372 | */ | 
|---|
| 3373 |  | 
|---|
| 3374 | spin_lock_init(&po->bind_lock); | 
|---|
| 3375 | mutex_init(&po->pg_vec_lock); | 
|---|
| 3376 | po->rollover = NULL; | 
|---|
| 3377 | po->prot_hook.func = packet_rcv; | 
|---|
| 3378 |  | 
|---|
| 3379 | if (sock->type == SOCK_PACKET) | 
|---|
| 3380 | po->prot_hook.func = packet_rcv_spkt; | 
|---|
| 3381 |  | 
|---|
| 3382 | po->prot_hook.af_packet_priv = sk; | 
|---|
| 3383 | po->prot_hook.af_packet_net = sock_net(sk); | 
|---|
| 3384 |  | 
|---|
| 3385 | if (proto) { | 
|---|
| 3386 | po->prot_hook.type = proto; | 
|---|
| 3387 | __register_prot_hook(sk); | 
|---|
| 3388 | } | 
|---|
| 3389 |  | 
|---|
| 3390 | mutex_lock(lock: &net->packet.sklist_lock); | 
|---|
| 3391 | sk_add_node_tail_rcu(sk, list: &net->packet.sklist); | 
|---|
| 3392 | mutex_unlock(lock: &net->packet.sklist_lock); | 
|---|
| 3393 |  | 
|---|
| 3394 | sock_prot_inuse_add(net, prot: &packet_proto, val: 1); | 
|---|
| 3395 |  | 
|---|
| 3396 | return 0; | 
|---|
| 3397 | out_sk_free: | 
|---|
| 3398 | sk_free(sk); | 
|---|
| 3399 | out: | 
|---|
| 3400 | return err; | 
|---|
| 3401 | } | 
|---|
| 3402 |  | 
|---|
| 3403 | /* | 
|---|
| 3404 | *	Pull a packet from our receive queue and hand it to the user. | 
|---|
| 3405 | *	If necessary we block. | 
|---|
| 3406 | */ | 
|---|
| 3407 |  | 
|---|
| 3408 | static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | 
|---|
| 3409 | int flags) | 
|---|
| 3410 | { | 
|---|
| 3411 | struct sock *sk = sock->sk; | 
|---|
| 3412 | struct sk_buff *skb; | 
|---|
| 3413 | int copied, err; | 
|---|
| 3414 | int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz); | 
|---|
| 3415 | unsigned int origlen = 0; | 
|---|
| 3416 |  | 
|---|
| 3417 | err = -EINVAL; | 
|---|
| 3418 | if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) | 
|---|
| 3419 | goto out; | 
|---|
| 3420 |  | 
|---|
| 3421 | #if 0 | 
|---|
| 3422 | /* What error should we return now? EUNATTACH? */ | 
|---|
| 3423 | if (pkt_sk(sk)->ifindex < 0) | 
|---|
| 3424 | return -ENODEV; | 
|---|
| 3425 | #endif | 
|---|
| 3426 |  | 
|---|
| 3427 | if (flags & MSG_ERRQUEUE) { | 
|---|
| 3428 | err = sock_recv_errqueue(sk, msg, len, | 
|---|
| 3429 | SOL_PACKET, PACKET_TX_TIMESTAMP); | 
|---|
| 3430 | goto out; | 
|---|
| 3431 | } | 
|---|
| 3432 |  | 
|---|
| 3433 | /* | 
|---|
| 3434 | *	Call the generic datagram receiver. This handles all sorts | 
|---|
| 3435 | *	of horrible races and re-entrancy so we can forget about it | 
|---|
| 3436 | *	in the protocol layers. | 
|---|
| 3437 | * | 
|---|
| 3438 | *	Now it will return ENETDOWN, if device have just gone down, | 
|---|
| 3439 | *	but then it will block. | 
|---|
| 3440 | */ | 
|---|
| 3441 |  | 
|---|
| 3442 | skb = skb_recv_datagram(sk, flags, err: &err); | 
|---|
| 3443 |  | 
|---|
| 3444 | /* | 
|---|
| 3445 | *	An error occurred so return it. Because skb_recv_datagram() | 
|---|
| 3446 | *	handles the blocking we don't see and worry about blocking | 
|---|
| 3447 | *	retries. | 
|---|
| 3448 | */ | 
|---|
| 3449 |  | 
|---|
| 3450 | if (skb == NULL) | 
|---|
| 3451 | goto out; | 
|---|
| 3452 |  | 
|---|
| 3453 | packet_rcv_try_clear_pressure(pkt_sk(sk)); | 
|---|
| 3454 |  | 
|---|
| 3455 | if (vnet_hdr_len) { | 
|---|
| 3456 | err = packet_rcv_vnet(msg, skb, len: &len, vnet_hdr_sz: vnet_hdr_len); | 
|---|
| 3457 | if (err) | 
|---|
| 3458 | goto out_free; | 
|---|
| 3459 | } | 
|---|
| 3460 |  | 
|---|
| 3461 | /* You lose any data beyond the buffer you gave. If it worries | 
|---|
| 3462 | * a user program they can ask the device for its MTU | 
|---|
| 3463 | * anyway. | 
|---|
| 3464 | */ | 
|---|
| 3465 | copied = skb->len; | 
|---|
| 3466 | if (copied > len) { | 
|---|
| 3467 | copied = len; | 
|---|
| 3468 | msg->msg_flags |= MSG_TRUNC; | 
|---|
| 3469 | } | 
|---|
| 3470 |  | 
|---|
| 3471 | err = skb_copy_datagram_msg(from: skb, offset: 0, msg, size: copied); | 
|---|
| 3472 | if (err) | 
|---|
| 3473 | goto out_free; | 
|---|
| 3474 |  | 
|---|
| 3475 | if (sock->type != SOCK_PACKET) { | 
|---|
| 3476 | struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; | 
|---|
| 3477 |  | 
|---|
| 3478 | /* Original length was stored in sockaddr_ll fields */ | 
|---|
| 3479 | origlen = PACKET_SKB_CB(skb)->sa.origlen; | 
|---|
| 3480 | sll->sll_family = AF_PACKET; | 
|---|
| 3481 | sll->sll_protocol = (sock->type == SOCK_DGRAM) ? | 
|---|
| 3482 | vlan_get_protocol_dgram(skb) : skb->protocol; | 
|---|
| 3483 | } | 
|---|
| 3484 |  | 
|---|
| 3485 | sock_recv_cmsgs(msg, sk, skb); | 
|---|
| 3486 |  | 
|---|
| 3487 | if (msg->msg_name) { | 
|---|
| 3488 | const size_t max_len = min(sizeof(skb->cb), | 
|---|
| 3489 | sizeof(struct sockaddr_storage)); | 
|---|
| 3490 | int copy_len; | 
|---|
| 3491 |  | 
|---|
| 3492 | /* If the address length field is there to be filled | 
|---|
| 3493 | * in, we fill it in now. | 
|---|
| 3494 | */ | 
|---|
| 3495 | if (sock->type == SOCK_PACKET) { | 
|---|
| 3496 | __sockaddr_check_size(sizeof(struct sockaddr_pkt)); | 
|---|
| 3497 | msg->msg_namelen = sizeof(struct sockaddr_pkt); | 
|---|
| 3498 | copy_len = msg->msg_namelen; | 
|---|
| 3499 | } else { | 
|---|
| 3500 | struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; | 
|---|
| 3501 |  | 
|---|
| 3502 | msg->msg_namelen = sll->sll_halen + | 
|---|
| 3503 | offsetof(struct sockaddr_ll, sll_addr); | 
|---|
| 3504 | copy_len = msg->msg_namelen; | 
|---|
| 3505 | if (msg->msg_namelen < sizeof(struct sockaddr_ll)) { | 
|---|
| 3506 | memset(s: msg->msg_name + | 
|---|
| 3507 | offsetof(struct sockaddr_ll, sll_addr), | 
|---|
| 3508 | c: 0, n: sizeof(sll->sll_addr)); | 
|---|
| 3509 | msg->msg_namelen = sizeof(struct sockaddr_ll); | 
|---|
| 3510 | } | 
|---|
| 3511 | } | 
|---|
| 3512 | if (WARN_ON_ONCE(copy_len > max_len)) { | 
|---|
| 3513 | copy_len = max_len; | 
|---|
| 3514 | msg->msg_namelen = copy_len; | 
|---|
| 3515 | } | 
|---|
| 3516 | memcpy(to: msg->msg_name, from: &PACKET_SKB_CB(skb)->sa, len: copy_len); | 
|---|
| 3517 | } | 
|---|
| 3518 |  | 
|---|
| 3519 | if (packet_sock_flag(pkt_sk(sk), flag: PACKET_SOCK_AUXDATA)) { | 
|---|
| 3520 | struct tpacket_auxdata aux; | 
|---|
| 3521 |  | 
|---|
| 3522 | aux.tp_status = TP_STATUS_USER; | 
|---|
| 3523 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 
|---|
| 3524 | aux.tp_status |= TP_STATUS_CSUMNOTREADY; | 
|---|
| 3525 | else if (skb->pkt_type != PACKET_OUTGOING && | 
|---|
| 3526 | skb_csum_unnecessary(skb)) | 
|---|
| 3527 | aux.tp_status |= TP_STATUS_CSUM_VALID; | 
|---|
| 3528 | if (skb_is_gso(skb) && skb_is_gso_tcp(skb)) | 
|---|
| 3529 | aux.tp_status |= TP_STATUS_GSO_TCP; | 
|---|
| 3530 |  | 
|---|
| 3531 | aux.tp_len = origlen; | 
|---|
| 3532 | aux.tp_snaplen = skb->len; | 
|---|
| 3533 | aux.tp_mac = 0; | 
|---|
| 3534 | aux.tp_net = skb_network_offset(skb); | 
|---|
| 3535 | if (skb_vlan_tag_present(skb)) { | 
|---|
| 3536 | aux.tp_vlan_tci = skb_vlan_tag_get(skb); | 
|---|
| 3537 | aux.tp_vlan_tpid = ntohs(skb->vlan_proto); | 
|---|
| 3538 | aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | 
|---|
| 3539 | } else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) { | 
|---|
| 3540 | struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; | 
|---|
| 3541 | struct net_device *dev; | 
|---|
| 3542 |  | 
|---|
| 3543 | rcu_read_lock(); | 
|---|
| 3544 | dev = dev_get_by_index_rcu(net: sock_net(sk), ifindex: sll->sll_ifindex); | 
|---|
| 3545 | if (dev) { | 
|---|
| 3546 | aux.tp_vlan_tci = vlan_get_tci(skb, dev); | 
|---|
| 3547 | aux.tp_vlan_tpid = ntohs(skb->protocol); | 
|---|
| 3548 | aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; | 
|---|
| 3549 | } else { | 
|---|
| 3550 | aux.tp_vlan_tci = 0; | 
|---|
| 3551 | aux.tp_vlan_tpid = 0; | 
|---|
| 3552 | } | 
|---|
| 3553 | rcu_read_unlock(); | 
|---|
| 3554 | } else { | 
|---|
| 3555 | aux.tp_vlan_tci = 0; | 
|---|
| 3556 | aux.tp_vlan_tpid = 0; | 
|---|
| 3557 | } | 
|---|
| 3558 | put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, len: sizeof(aux), data: &aux); | 
|---|
| 3559 | } | 
|---|
| 3560 |  | 
|---|
| 3561 | /* | 
|---|
| 3562 | *	Free or return the buffer as appropriate. Again this | 
|---|
| 3563 | *	hides all the races and re-entrancy issues from us. | 
|---|
| 3564 | */ | 
|---|
| 3565 | err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); | 
|---|
| 3566 |  | 
|---|
| 3567 | out_free: | 
|---|
| 3568 | skb_free_datagram(sk, skb); | 
|---|
| 3569 | out: | 
|---|
| 3570 | return err; | 
|---|
| 3571 | } | 
|---|
| 3572 |  | 
|---|
| 3573 | static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, | 
|---|
| 3574 | int peer) | 
|---|
| 3575 | { | 
|---|
| 3576 | struct net_device *dev; | 
|---|
| 3577 | struct sock *sk	= sock->sk; | 
|---|
| 3578 |  | 
|---|
| 3579 | if (peer) | 
|---|
| 3580 | return -EOPNOTSUPP; | 
|---|
| 3581 |  | 
|---|
| 3582 | uaddr->sa_family = AF_PACKET; | 
|---|
| 3583 | memset(s: uaddr->sa_data, c: 0, n: sizeof(uaddr->sa_data_min)); | 
|---|
| 3584 | rcu_read_lock(); | 
|---|
| 3585 | dev = dev_get_by_index_rcu(net: sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex)); | 
|---|
| 3586 | if (dev) | 
|---|
| 3587 | strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min)); | 
|---|
| 3588 | rcu_read_unlock(); | 
|---|
| 3589 |  | 
|---|
| 3590 | return sizeof(*uaddr); | 
|---|
| 3591 | } | 
|---|
| 3592 |  | 
|---|
| 3593 | static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | 
|---|
| 3594 | int peer) | 
|---|
| 3595 | { | 
|---|
| 3596 | struct net_device *dev; | 
|---|
| 3597 | struct sock *sk = sock->sk; | 
|---|
| 3598 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 3599 | DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); | 
|---|
| 3600 | int ifindex; | 
|---|
| 3601 |  | 
|---|
| 3602 | if (peer) | 
|---|
| 3603 | return -EOPNOTSUPP; | 
|---|
| 3604 |  | 
|---|
| 3605 | ifindex = READ_ONCE(po->ifindex); | 
|---|
| 3606 | sll->sll_family = AF_PACKET; | 
|---|
| 3607 | sll->sll_ifindex = ifindex; | 
|---|
| 3608 | sll->sll_protocol = READ_ONCE(po->num); | 
|---|
| 3609 | sll->sll_pkttype = 0; | 
|---|
| 3610 | rcu_read_lock(); | 
|---|
| 3611 | dev = dev_get_by_index_rcu(net: sock_net(sk), ifindex); | 
|---|
| 3612 | if (dev) { | 
|---|
| 3613 | sll->sll_hatype = dev->type; | 
|---|
| 3614 | sll->sll_halen = dev->addr_len; | 
|---|
| 3615 |  | 
|---|
| 3616 | /* Let __fortify_memcpy_chk() know the actual buffer size. */ | 
|---|
| 3617 | memcpy(to: ((struct sockaddr_storage *)sll)->__data + | 
|---|
| 3618 | offsetof(struct sockaddr_ll, sll_addr) - | 
|---|
| 3619 | offsetofend(struct sockaddr_ll, sll_family), | 
|---|
| 3620 | from: dev->dev_addr, len: dev->addr_len); | 
|---|
| 3621 | } else { | 
|---|
| 3622 | sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */ | 
|---|
| 3623 | sll->sll_halen = 0; | 
|---|
| 3624 | } | 
|---|
| 3625 | rcu_read_unlock(); | 
|---|
| 3626 |  | 
|---|
| 3627 | return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen; | 
|---|
| 3628 | } | 
|---|
| 3629 |  | 
|---|
| 3630 | static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | 
|---|
| 3631 | int what) | 
|---|
| 3632 | { | 
|---|
| 3633 | switch (i->type) { | 
|---|
| 3634 | case PACKET_MR_MULTICAST: | 
|---|
| 3635 | if (i->alen != dev->addr_len) | 
|---|
| 3636 | return -EINVAL; | 
|---|
| 3637 | if (what > 0) | 
|---|
| 3638 | return dev_mc_add(dev, addr: i->addr); | 
|---|
| 3639 | else | 
|---|
| 3640 | return dev_mc_del(dev, addr: i->addr); | 
|---|
| 3641 | break; | 
|---|
| 3642 | case PACKET_MR_PROMISC: | 
|---|
| 3643 | return dev_set_promiscuity(dev, inc: what); | 
|---|
| 3644 | case PACKET_MR_ALLMULTI: | 
|---|
| 3645 | return dev_set_allmulti(dev, inc: what); | 
|---|
| 3646 | case PACKET_MR_UNICAST: | 
|---|
| 3647 | if (i->alen != dev->addr_len) | 
|---|
| 3648 | return -EINVAL; | 
|---|
| 3649 | if (what > 0) | 
|---|
| 3650 | return dev_uc_add(dev, addr: i->addr); | 
|---|
| 3651 | else | 
|---|
| 3652 | return dev_uc_del(dev, addr: i->addr); | 
|---|
| 3653 | break; | 
|---|
| 3654 | default: | 
|---|
| 3655 | break; | 
|---|
| 3656 | } | 
|---|
| 3657 | return 0; | 
|---|
| 3658 | } | 
|---|
| 3659 |  | 
|---|
| 3660 | static void packet_dev_mclist_delete(struct net_device *dev, | 
|---|
| 3661 | struct packet_mclist **mlp, | 
|---|
| 3662 | struct list_head *list) | 
|---|
| 3663 | { | 
|---|
| 3664 | struct packet_mclist *ml; | 
|---|
| 3665 |  | 
|---|
| 3666 | while ((ml = *mlp) != NULL) { | 
|---|
| 3667 | if (ml->ifindex == dev->ifindex) { | 
|---|
| 3668 | list_add(new: &ml->remove_list, head: list); | 
|---|
| 3669 | *mlp = ml->next; | 
|---|
| 3670 | } else | 
|---|
| 3671 | mlp = &ml->next; | 
|---|
| 3672 | } | 
|---|
| 3673 | } | 
|---|
| 3674 |  | 
|---|
| 3675 | static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) | 
|---|
| 3676 | { | 
|---|
| 3677 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 3678 | struct packet_mclist *ml, *i; | 
|---|
| 3679 | struct net_device *dev; | 
|---|
| 3680 | int err; | 
|---|
| 3681 |  | 
|---|
| 3682 | rtnl_lock(); | 
|---|
| 3683 |  | 
|---|
| 3684 | err = -ENODEV; | 
|---|
| 3685 | dev = __dev_get_by_index(net: sock_net(sk), ifindex: mreq->mr_ifindex); | 
|---|
| 3686 | if (!dev) | 
|---|
| 3687 | goto done; | 
|---|
| 3688 |  | 
|---|
| 3689 | err = -EINVAL; | 
|---|
| 3690 | if (mreq->mr_alen > dev->addr_len) | 
|---|
| 3691 | goto done; | 
|---|
| 3692 |  | 
|---|
| 3693 | err = -ENOBUFS; | 
|---|
| 3694 | i = kmalloc(sizeof(*i), GFP_KERNEL); | 
|---|
| 3695 | if (i == NULL) | 
|---|
| 3696 | goto done; | 
|---|
| 3697 |  | 
|---|
| 3698 | err = 0; | 
|---|
| 3699 | for (ml = po->mclist; ml; ml = ml->next) { | 
|---|
| 3700 | if (ml->ifindex == mreq->mr_ifindex && | 
|---|
| 3701 | ml->type == mreq->mr_type && | 
|---|
| 3702 | ml->alen == mreq->mr_alen && | 
|---|
| 3703 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { | 
|---|
| 3704 | ml->count++; | 
|---|
| 3705 | /* Free the new element ... */ | 
|---|
| 3706 | kfree(objp: i); | 
|---|
| 3707 | goto done; | 
|---|
| 3708 | } | 
|---|
| 3709 | } | 
|---|
| 3710 |  | 
|---|
| 3711 | i->type = mreq->mr_type; | 
|---|
| 3712 | i->ifindex = mreq->mr_ifindex; | 
|---|
| 3713 | i->alen = mreq->mr_alen; | 
|---|
| 3714 | memcpy(to: i->addr, from: mreq->mr_address, len: i->alen); | 
|---|
| 3715 | memset(s: i->addr + i->alen, c: 0, n: sizeof(i->addr) - i->alen); | 
|---|
| 3716 | i->count = 1; | 
|---|
| 3717 | INIT_LIST_HEAD(list: &i->remove_list); | 
|---|
| 3718 | i->next = po->mclist; | 
|---|
| 3719 | po->mclist = i; | 
|---|
| 3720 | err = packet_dev_mc(dev, i, what: 1); | 
|---|
| 3721 | if (err) { | 
|---|
| 3722 | po->mclist = i->next; | 
|---|
| 3723 | kfree(objp: i); | 
|---|
| 3724 | } | 
|---|
| 3725 |  | 
|---|
| 3726 | done: | 
|---|
| 3727 | rtnl_unlock(); | 
|---|
| 3728 | return err; | 
|---|
| 3729 | } | 
|---|
| 3730 |  | 
|---|
| 3731 | static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) | 
|---|
| 3732 | { | 
|---|
| 3733 | struct packet_mclist *ml, **mlp; | 
|---|
| 3734 |  | 
|---|
| 3735 | rtnl_lock(); | 
|---|
| 3736 |  | 
|---|
| 3737 | for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) { | 
|---|
| 3738 | if (ml->ifindex == mreq->mr_ifindex && | 
|---|
| 3739 | ml->type == mreq->mr_type && | 
|---|
| 3740 | ml->alen == mreq->mr_alen && | 
|---|
| 3741 | memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) { | 
|---|
| 3742 | if (--ml->count == 0) { | 
|---|
| 3743 | struct net_device *dev; | 
|---|
| 3744 | *mlp = ml->next; | 
|---|
| 3745 | dev = __dev_get_by_index(net: sock_net(sk), ifindex: ml->ifindex); | 
|---|
| 3746 | if (dev) | 
|---|
| 3747 | packet_dev_mc(dev, i: ml, what: -1); | 
|---|
| 3748 | kfree(objp: ml); | 
|---|
| 3749 | } | 
|---|
| 3750 | break; | 
|---|
| 3751 | } | 
|---|
| 3752 | } | 
|---|
| 3753 | rtnl_unlock(); | 
|---|
| 3754 | return 0; | 
|---|
| 3755 | } | 
|---|
| 3756 |  | 
|---|
| 3757 | static void packet_flush_mclist(struct sock *sk) | 
|---|
| 3758 | { | 
|---|
| 3759 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 3760 | struct packet_mclist *ml; | 
|---|
| 3761 |  | 
|---|
| 3762 | if (!po->mclist) | 
|---|
| 3763 | return; | 
|---|
| 3764 |  | 
|---|
| 3765 | rtnl_lock(); | 
|---|
| 3766 | while ((ml = po->mclist) != NULL) { | 
|---|
| 3767 | struct net_device *dev; | 
|---|
| 3768 |  | 
|---|
| 3769 | po->mclist = ml->next; | 
|---|
| 3770 | dev = __dev_get_by_index(net: sock_net(sk), ifindex: ml->ifindex); | 
|---|
| 3771 | if (dev != NULL) | 
|---|
| 3772 | packet_dev_mc(dev, i: ml, what: -1); | 
|---|
| 3773 | kfree(objp: ml); | 
|---|
| 3774 | } | 
|---|
| 3775 | rtnl_unlock(); | 
|---|
| 3776 | } | 
|---|
| 3777 |  | 
|---|
| 3778 | static int | 
|---|
| 3779 | packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, | 
|---|
| 3780 | unsigned int optlen) | 
|---|
| 3781 | { | 
|---|
| 3782 | struct sock *sk = sock->sk; | 
|---|
| 3783 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 3784 | int ret; | 
|---|
| 3785 |  | 
|---|
| 3786 | if (level != SOL_PACKET) | 
|---|
| 3787 | return -ENOPROTOOPT; | 
|---|
| 3788 |  | 
|---|
| 3789 | switch (optname) { | 
|---|
| 3790 | case PACKET_ADD_MEMBERSHIP: | 
|---|
| 3791 | case PACKET_DROP_MEMBERSHIP: | 
|---|
| 3792 | { | 
|---|
| 3793 | struct packet_mreq_max mreq; | 
|---|
| 3794 | int len = optlen; | 
|---|
| 3795 | memset(s: &mreq, c: 0, n: sizeof(mreq)); | 
|---|
| 3796 | if (len < sizeof(struct packet_mreq)) | 
|---|
| 3797 | return -EINVAL; | 
|---|
| 3798 | if (len > sizeof(mreq)) | 
|---|
| 3799 | len = sizeof(mreq); | 
|---|
| 3800 | if (copy_from_sockptr(dst: &mreq, src: optval, size: len)) | 
|---|
| 3801 | return -EFAULT; | 
|---|
| 3802 | if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) | 
|---|
| 3803 | return -EINVAL; | 
|---|
| 3804 | if (optname == PACKET_ADD_MEMBERSHIP) | 
|---|
| 3805 | ret = packet_mc_add(sk, mreq: &mreq); | 
|---|
| 3806 | else | 
|---|
| 3807 | ret = packet_mc_drop(sk, mreq: &mreq); | 
|---|
| 3808 | return ret; | 
|---|
| 3809 | } | 
|---|
| 3810 |  | 
|---|
| 3811 | case PACKET_RX_RING: | 
|---|
| 3812 | case PACKET_TX_RING: | 
|---|
| 3813 | { | 
|---|
| 3814 | union tpacket_req_u req_u; | 
|---|
| 3815 |  | 
|---|
| 3816 | ret = -EINVAL; | 
|---|
| 3817 | lock_sock(sk); | 
|---|
| 3818 | switch (po->tp_version) { | 
|---|
| 3819 | case TPACKET_V1: | 
|---|
| 3820 | case TPACKET_V2: | 
|---|
| 3821 | if (optlen < sizeof(req_u.req)) | 
|---|
| 3822 | break; | 
|---|
| 3823 | ret = copy_from_sockptr(dst: &req_u.req, src: optval, | 
|---|
| 3824 | size: sizeof(req_u.req)) ? | 
|---|
| 3825 | -EINVAL : 0; | 
|---|
| 3826 | break; | 
|---|
| 3827 | case TPACKET_V3: | 
|---|
| 3828 | default: | 
|---|
| 3829 | if (optlen < sizeof(req_u.req3)) | 
|---|
| 3830 | break; | 
|---|
| 3831 | ret = copy_from_sockptr(dst: &req_u.req3, src: optval, | 
|---|
| 3832 | size: sizeof(req_u.req3)) ? | 
|---|
| 3833 | -EINVAL : 0; | 
|---|
| 3834 | break; | 
|---|
| 3835 | } | 
|---|
| 3836 | if (!ret) | 
|---|
| 3837 | ret = packet_set_ring(sk, req_u: &req_u, closing: 0, | 
|---|
| 3838 | tx_ring: optname == PACKET_TX_RING); | 
|---|
| 3839 | release_sock(sk); | 
|---|
| 3840 | return ret; | 
|---|
| 3841 | } | 
|---|
| 3842 | case PACKET_COPY_THRESH: | 
|---|
| 3843 | { | 
|---|
| 3844 | int val; | 
|---|
| 3845 |  | 
|---|
| 3846 | if (optlen != sizeof(val)) | 
|---|
| 3847 | return -EINVAL; | 
|---|
| 3848 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) | 
|---|
| 3849 | return -EFAULT; | 
|---|
| 3850 |  | 
|---|
| 3851 | WRITE_ONCE(pkt_sk(sk)->copy_thresh, val); | 
|---|
| 3852 | return 0; | 
|---|
| 3853 | } | 
|---|
| 3854 | case PACKET_VERSION: | 
|---|
| 3855 | { | 
|---|
| 3856 | int val; | 
|---|
| 3857 |  | 
|---|
| 3858 | if (optlen != sizeof(val)) | 
|---|
| 3859 | return -EINVAL; | 
|---|
| 3860 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) | 
|---|
| 3861 | return -EFAULT; | 
|---|
| 3862 | switch (val) { | 
|---|
| 3863 | case TPACKET_V1: | 
|---|
| 3864 | case TPACKET_V2: | 
|---|
| 3865 | case TPACKET_V3: | 
|---|
| 3866 | break; | 
|---|
| 3867 | default: | 
|---|
| 3868 | return -EINVAL; | 
|---|
| 3869 | } | 
|---|
| 3870 | lock_sock(sk); | 
|---|
| 3871 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { | 
|---|
| 3872 | ret = -EBUSY; | 
|---|
| 3873 | } else { | 
|---|
| 3874 | po->tp_version = val; | 
|---|
| 3875 | ret = 0; | 
|---|
| 3876 | } | 
|---|
| 3877 | release_sock(sk); | 
|---|
| 3878 | return ret; | 
|---|
| 3879 | } | 
|---|
| 3880 | case PACKET_RESERVE: | 
|---|
| 3881 | { | 
|---|
| 3882 | unsigned int val; | 
|---|
| 3883 |  | 
|---|
| 3884 | if (optlen != sizeof(val)) | 
|---|
| 3885 | return -EINVAL; | 
|---|
| 3886 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) | 
|---|
| 3887 | return -EFAULT; | 
|---|
| 3888 | if (val > INT_MAX) | 
|---|
| 3889 | return -EINVAL; | 
|---|
| 3890 | lock_sock(sk); | 
|---|
| 3891 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { | 
|---|
| 3892 | ret = -EBUSY; | 
|---|
| 3893 | } else { | 
|---|
| 3894 | po->tp_reserve = val; | 
|---|
| 3895 | ret = 0; | 
|---|
| 3896 | } | 
|---|
| 3897 | release_sock(sk); | 
|---|
| 3898 | return ret; | 
|---|
| 3899 | } | 
|---|
| 3900 | case PACKET_LOSS: | 
|---|
| 3901 | { | 
|---|
| 3902 | unsigned int val; | 
|---|
| 3903 |  | 
|---|
| 3904 | if (optlen != sizeof(val)) | 
|---|
| 3905 | return -EINVAL; | 
|---|
| 3906 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) | 
|---|
| 3907 | return -EFAULT; | 
|---|
| 3908 |  | 
|---|
| 3909 | lock_sock(sk); | 
|---|
| 3910 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { | 
|---|
| 3911 | ret = -EBUSY; | 
|---|
| 3912 | } else { | 
|---|
| 3913 | packet_sock_flag_set(po, flag: PACKET_SOCK_TP_LOSS, val); | 
|---|
| 3914 | ret = 0; | 
|---|
| 3915 | } | 
|---|
| 3916 | release_sock(sk); | 
|---|
| 3917 | return ret; | 
|---|
| 3918 | } | 
|---|
| 3919 | case PACKET_AUXDATA: | 
|---|
| 3920 | { | 
|---|
| 3921 | int val; | 
|---|
| 3922 |  | 
|---|
| 3923 | if (optlen < sizeof(val)) | 
|---|
| 3924 | return -EINVAL; | 
|---|
| 3925 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) | 
|---|
| 3926 | return -EFAULT; | 
|---|
| 3927 |  | 
|---|
| 3928 | packet_sock_flag_set(po, flag: PACKET_SOCK_AUXDATA, val); | 
|---|
| 3929 | return 0; | 
|---|
| 3930 | } | 
|---|
| 3931 | case PACKET_ORIGDEV: | 
|---|
| 3932 | { | 
|---|
| 3933 | int val; | 
|---|
| 3934 |  | 
|---|
| 3935 | if (optlen < sizeof(val)) | 
|---|
| 3936 | return -EINVAL; | 
|---|
| 3937 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) | 
|---|
| 3938 | return -EFAULT; | 
|---|
| 3939 |  | 
|---|
| 3940 | packet_sock_flag_set(po, flag: PACKET_SOCK_ORIGDEV, val); | 
|---|
| 3941 | return 0; | 
|---|
| 3942 | } | 
|---|
| 3943 | case PACKET_VNET_HDR: | 
|---|
| 3944 | case PACKET_VNET_HDR_SZ: | 
|---|
| 3945 | { | 
|---|
| 3946 | int val, hdr_len; | 
|---|
| 3947 |  | 
|---|
| 3948 | if (sock->type != SOCK_RAW) | 
|---|
| 3949 | return -EINVAL; | 
|---|
| 3950 | if (optlen < sizeof(val)) | 
|---|
| 3951 | return -EINVAL; | 
|---|
| 3952 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) | 
|---|
| 3953 | return -EFAULT; | 
|---|
| 3954 |  | 
|---|
| 3955 | if (optname == PACKET_VNET_HDR_SZ) { | 
|---|
| 3956 | if (val && val != sizeof(struct virtio_net_hdr) && | 
|---|
| 3957 | val != sizeof(struct virtio_net_hdr_mrg_rxbuf)) | 
|---|
| 3958 | return -EINVAL; | 
|---|
| 3959 | hdr_len = val; | 
|---|
| 3960 | } else { | 
|---|
| 3961 | hdr_len = val ? sizeof(struct virtio_net_hdr) : 0; | 
|---|
| 3962 | } | 
|---|
| 3963 | lock_sock(sk); | 
|---|
| 3964 | if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { | 
|---|
| 3965 | ret = -EBUSY; | 
|---|
| 3966 | } else { | 
|---|
| 3967 | WRITE_ONCE(po->vnet_hdr_sz, hdr_len); | 
|---|
| 3968 | ret = 0; | 
|---|
| 3969 | } | 
|---|
| 3970 | release_sock(sk); | 
|---|
| 3971 | return ret; | 
|---|
| 3972 | } | 
|---|
| 3973 | case PACKET_TIMESTAMP: | 
|---|
| 3974 | { | 
|---|
| 3975 | int val; | 
|---|
| 3976 |  | 
|---|
| 3977 | if (optlen != sizeof(val)) | 
|---|
| 3978 | return -EINVAL; | 
|---|
| 3979 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) | 
|---|
| 3980 | return -EFAULT; | 
|---|
| 3981 |  | 
|---|
| 3982 | WRITE_ONCE(po->tp_tstamp, val); | 
|---|
| 3983 | return 0; | 
|---|
| 3984 | } | 
|---|
| 3985 | case PACKET_FANOUT: | 
|---|
| 3986 | { | 
|---|
| 3987 | struct fanout_args args = { 0 }; | 
|---|
| 3988 |  | 
|---|
| 3989 | if (optlen != sizeof(int) && optlen != sizeof(args)) | 
|---|
| 3990 | return -EINVAL; | 
|---|
| 3991 | if (copy_from_sockptr(dst: &args, src: optval, size: optlen)) | 
|---|
| 3992 | return -EFAULT; | 
|---|
| 3993 |  | 
|---|
| 3994 | return fanout_add(sk, args: &args); | 
|---|
| 3995 | } | 
|---|
| 3996 | case PACKET_FANOUT_DATA: | 
|---|
| 3997 | { | 
|---|
| 3998 | /* Paired with the WRITE_ONCE() in fanout_add() */ | 
|---|
| 3999 | if (!READ_ONCE(po->fanout)) | 
|---|
| 4000 | return -EINVAL; | 
|---|
| 4001 |  | 
|---|
| 4002 | return fanout_set_data(po, data: optval, len: optlen); | 
|---|
| 4003 | } | 
|---|
| 4004 | case PACKET_IGNORE_OUTGOING: | 
|---|
| 4005 | { | 
|---|
| 4006 | int val; | 
|---|
| 4007 |  | 
|---|
| 4008 | if (optlen != sizeof(val)) | 
|---|
| 4009 | return -EINVAL; | 
|---|
| 4010 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) | 
|---|
| 4011 | return -EFAULT; | 
|---|
| 4012 | if (val < 0 || val > 1) | 
|---|
| 4013 | return -EINVAL; | 
|---|
| 4014 |  | 
|---|
| 4015 | WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val); | 
|---|
| 4016 | return 0; | 
|---|
| 4017 | } | 
|---|
| 4018 | case PACKET_TX_HAS_OFF: | 
|---|
| 4019 | { | 
|---|
| 4020 | unsigned int val; | 
|---|
| 4021 |  | 
|---|
| 4022 | if (optlen != sizeof(val)) | 
|---|
| 4023 | return -EINVAL; | 
|---|
| 4024 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) | 
|---|
| 4025 | return -EFAULT; | 
|---|
| 4026 |  | 
|---|
| 4027 | lock_sock(sk); | 
|---|
| 4028 | if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec) | 
|---|
| 4029 | packet_sock_flag_set(po, flag: PACKET_SOCK_TX_HAS_OFF, val); | 
|---|
| 4030 |  | 
|---|
| 4031 | release_sock(sk); | 
|---|
| 4032 | return 0; | 
|---|
| 4033 | } | 
|---|
| 4034 | case PACKET_QDISC_BYPASS: | 
|---|
| 4035 | { | 
|---|
| 4036 | int val; | 
|---|
| 4037 |  | 
|---|
| 4038 | if (optlen != sizeof(val)) | 
|---|
| 4039 | return -EINVAL; | 
|---|
| 4040 | if (copy_from_sockptr(dst: &val, src: optval, size: sizeof(val))) | 
|---|
| 4041 | return -EFAULT; | 
|---|
| 4042 |  | 
|---|
| 4043 | packet_sock_flag_set(po, flag: PACKET_SOCK_QDISC_BYPASS, val); | 
|---|
| 4044 | return 0; | 
|---|
| 4045 | } | 
|---|
| 4046 | default: | 
|---|
| 4047 | return -ENOPROTOOPT; | 
|---|
| 4048 | } | 
|---|
| 4049 | } | 
|---|
| 4050 |  | 
|---|
| 4051 | static int packet_getsockopt(struct socket *sock, int level, int optname, | 
|---|
| 4052 | char __user *optval, int __user *optlen) | 
|---|
| 4053 | { | 
|---|
| 4054 | int len; | 
|---|
| 4055 | int val, lv = sizeof(val); | 
|---|
| 4056 | struct sock *sk = sock->sk; | 
|---|
| 4057 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 4058 | void *data = &val; | 
|---|
| 4059 | union tpacket_stats_u st; | 
|---|
| 4060 | struct tpacket_rollover_stats rstats; | 
|---|
| 4061 | int drops; | 
|---|
| 4062 |  | 
|---|
| 4063 | if (level != SOL_PACKET) | 
|---|
| 4064 | return -ENOPROTOOPT; | 
|---|
| 4065 |  | 
|---|
| 4066 | if (get_user(len, optlen)) | 
|---|
| 4067 | return -EFAULT; | 
|---|
| 4068 |  | 
|---|
| 4069 | if (len < 0) | 
|---|
| 4070 | return -EINVAL; | 
|---|
| 4071 |  | 
|---|
| 4072 | switch (optname) { | 
|---|
| 4073 | case PACKET_STATISTICS: | 
|---|
| 4074 | spin_lock_bh(lock: &sk->sk_receive_queue.lock); | 
|---|
| 4075 | memcpy(to: &st, from: &po->stats, len: sizeof(st)); | 
|---|
| 4076 | memset(s: &po->stats, c: 0, n: sizeof(po->stats)); | 
|---|
| 4077 | spin_unlock_bh(lock: &sk->sk_receive_queue.lock); | 
|---|
| 4078 | drops = atomic_xchg(v: &po->tp_drops, new: 0); | 
|---|
| 4079 |  | 
|---|
| 4080 | if (po->tp_version == TPACKET_V3) { | 
|---|
| 4081 | lv = sizeof(struct tpacket_stats_v3); | 
|---|
| 4082 | st.stats3.tp_drops = drops; | 
|---|
| 4083 | st.stats3.tp_packets += drops; | 
|---|
| 4084 | data = &st.stats3; | 
|---|
| 4085 | } else { | 
|---|
| 4086 | lv = sizeof(struct tpacket_stats); | 
|---|
| 4087 | st.stats1.tp_drops = drops; | 
|---|
| 4088 | st.stats1.tp_packets += drops; | 
|---|
| 4089 | data = &st.stats1; | 
|---|
| 4090 | } | 
|---|
| 4091 |  | 
|---|
| 4092 | break; | 
|---|
| 4093 | case PACKET_AUXDATA: | 
|---|
| 4094 | val = packet_sock_flag(po, flag: PACKET_SOCK_AUXDATA); | 
|---|
| 4095 | break; | 
|---|
| 4096 | case PACKET_ORIGDEV: | 
|---|
| 4097 | val = packet_sock_flag(po, flag: PACKET_SOCK_ORIGDEV); | 
|---|
| 4098 | break; | 
|---|
| 4099 | case PACKET_VNET_HDR: | 
|---|
| 4100 | val = !!READ_ONCE(po->vnet_hdr_sz); | 
|---|
| 4101 | break; | 
|---|
| 4102 | case PACKET_VNET_HDR_SZ: | 
|---|
| 4103 | val = READ_ONCE(po->vnet_hdr_sz); | 
|---|
| 4104 | break; | 
|---|
| 4105 | case PACKET_COPY_THRESH: | 
|---|
| 4106 | val = READ_ONCE(pkt_sk(sk)->copy_thresh); | 
|---|
| 4107 | break; | 
|---|
| 4108 | case PACKET_VERSION: | 
|---|
| 4109 | val = po->tp_version; | 
|---|
| 4110 | break; | 
|---|
| 4111 | case PACKET_HDRLEN: | 
|---|
| 4112 | if (len > sizeof(int)) | 
|---|
| 4113 | len = sizeof(int); | 
|---|
| 4114 | if (len < sizeof(int)) | 
|---|
| 4115 | return -EINVAL; | 
|---|
| 4116 | if (copy_from_user(to: &val, from: optval, n: len)) | 
|---|
| 4117 | return -EFAULT; | 
|---|
| 4118 | switch (val) { | 
|---|
| 4119 | case TPACKET_V1: | 
|---|
| 4120 | val = sizeof(struct tpacket_hdr); | 
|---|
| 4121 | break; | 
|---|
| 4122 | case TPACKET_V2: | 
|---|
| 4123 | val = sizeof(struct tpacket2_hdr); | 
|---|
| 4124 | break; | 
|---|
| 4125 | case TPACKET_V3: | 
|---|
| 4126 | val = sizeof(struct tpacket3_hdr); | 
|---|
| 4127 | break; | 
|---|
| 4128 | default: | 
|---|
| 4129 | return -EINVAL; | 
|---|
| 4130 | } | 
|---|
| 4131 | break; | 
|---|
| 4132 | case PACKET_RESERVE: | 
|---|
| 4133 | val = po->tp_reserve; | 
|---|
| 4134 | break; | 
|---|
| 4135 | case PACKET_LOSS: | 
|---|
| 4136 | val = packet_sock_flag(po, flag: PACKET_SOCK_TP_LOSS); | 
|---|
| 4137 | break; | 
|---|
| 4138 | case PACKET_TIMESTAMP: | 
|---|
| 4139 | val = READ_ONCE(po->tp_tstamp); | 
|---|
| 4140 | break; | 
|---|
| 4141 | case PACKET_FANOUT: | 
|---|
| 4142 | val = (po->fanout ? | 
|---|
| 4143 | ((u32)po->fanout->id | | 
|---|
| 4144 | ((u32)po->fanout->type << 16) | | 
|---|
| 4145 | ((u32)po->fanout->flags << 24)) : | 
|---|
| 4146 | 0); | 
|---|
| 4147 | break; | 
|---|
| 4148 | case PACKET_IGNORE_OUTGOING: | 
|---|
| 4149 | val = READ_ONCE(po->prot_hook.ignore_outgoing); | 
|---|
| 4150 | break; | 
|---|
| 4151 | case PACKET_ROLLOVER_STATS: | 
|---|
| 4152 | if (!po->rollover) | 
|---|
| 4153 | return -EINVAL; | 
|---|
| 4154 | rstats.tp_all = atomic_long_read(v: &po->rollover->num); | 
|---|
| 4155 | rstats.tp_huge = atomic_long_read(v: &po->rollover->num_huge); | 
|---|
| 4156 | rstats.tp_failed = atomic_long_read(v: &po->rollover->num_failed); | 
|---|
| 4157 | data = &rstats; | 
|---|
| 4158 | lv = sizeof(rstats); | 
|---|
| 4159 | break; | 
|---|
| 4160 | case PACKET_TX_HAS_OFF: | 
|---|
| 4161 | val = packet_sock_flag(po, flag: PACKET_SOCK_TX_HAS_OFF); | 
|---|
| 4162 | break; | 
|---|
| 4163 | case PACKET_QDISC_BYPASS: | 
|---|
| 4164 | val = packet_sock_flag(po, flag: PACKET_SOCK_QDISC_BYPASS); | 
|---|
| 4165 | break; | 
|---|
| 4166 | default: | 
|---|
| 4167 | return -ENOPROTOOPT; | 
|---|
| 4168 | } | 
|---|
| 4169 |  | 
|---|
| 4170 | if (len > lv) | 
|---|
| 4171 | len = lv; | 
|---|
| 4172 | if (put_user(len, optlen)) | 
|---|
| 4173 | return -EFAULT; | 
|---|
| 4174 | if (copy_to_user(to: optval, from: data, n: len)) | 
|---|
| 4175 | return -EFAULT; | 
|---|
| 4176 | return 0; | 
|---|
| 4177 | } | 
|---|
| 4178 |  | 
|---|
| 4179 | static int packet_notifier(struct notifier_block *this, | 
|---|
| 4180 | unsigned long msg, void *ptr) | 
|---|
| 4181 | { | 
|---|
| 4182 | struct net_device *dev = netdev_notifier_info_to_dev(info: ptr); | 
|---|
| 4183 | struct net *net = dev_net(dev); | 
|---|
| 4184 | struct packet_mclist *ml, *tmp; | 
|---|
| 4185 | LIST_HEAD(mclist); | 
|---|
| 4186 | struct sock *sk; | 
|---|
| 4187 |  | 
|---|
| 4188 | rcu_read_lock(); | 
|---|
| 4189 | sk_for_each_rcu(sk, &net->packet.sklist) { | 
|---|
| 4190 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 4191 |  | 
|---|
| 4192 | switch (msg) { | 
|---|
| 4193 | case NETDEV_UNREGISTER: | 
|---|
| 4194 | if (po->mclist) | 
|---|
| 4195 | packet_dev_mclist_delete(dev, mlp: &po->mclist, | 
|---|
| 4196 | list: &mclist); | 
|---|
| 4197 | fallthrough; | 
|---|
| 4198 |  | 
|---|
| 4199 | case NETDEV_DOWN: | 
|---|
| 4200 | if (dev->ifindex == po->ifindex) { | 
|---|
| 4201 | spin_lock(lock: &po->bind_lock); | 
|---|
| 4202 | if (packet_sock_flag(po, flag: PACKET_SOCK_RUNNING)) { | 
|---|
| 4203 | __unregister_prot_hook(sk, sync: false); | 
|---|
| 4204 | sk->sk_err = ENETDOWN; | 
|---|
| 4205 | if (!sock_flag(sk, flag: SOCK_DEAD)) | 
|---|
| 4206 | sk_error_report(sk); | 
|---|
| 4207 | } | 
|---|
| 4208 | if (msg == NETDEV_UNREGISTER) { | 
|---|
| 4209 | packet_cached_dev_reset(po); | 
|---|
| 4210 | WRITE_ONCE(po->ifindex, -1); | 
|---|
| 4211 | netdev_put(dev: po->prot_hook.dev, | 
|---|
| 4212 | tracker: &po->prot_hook.dev_tracker); | 
|---|
| 4213 | po->prot_hook.dev = NULL; | 
|---|
| 4214 | } | 
|---|
| 4215 | spin_unlock(lock: &po->bind_lock); | 
|---|
| 4216 | } | 
|---|
| 4217 | break; | 
|---|
| 4218 | case NETDEV_UP: | 
|---|
| 4219 | if (dev->ifindex == po->ifindex) { | 
|---|
| 4220 | spin_lock(lock: &po->bind_lock); | 
|---|
| 4221 | if (po->num) | 
|---|
| 4222 | register_prot_hook(sk); | 
|---|
| 4223 | spin_unlock(lock: &po->bind_lock); | 
|---|
| 4224 | } | 
|---|
| 4225 | break; | 
|---|
| 4226 | } | 
|---|
| 4227 | } | 
|---|
| 4228 | rcu_read_unlock(); | 
|---|
| 4229 |  | 
|---|
| 4230 | /* packet_dev_mc might grab instance locks so can't run under rcu */ | 
|---|
| 4231 | list_for_each_entry_safe(ml, tmp, &mclist, remove_list) { | 
|---|
| 4232 | packet_dev_mc(dev, i: ml, what: -1); | 
|---|
| 4233 | kfree(objp: ml); | 
|---|
| 4234 | } | 
|---|
| 4235 |  | 
|---|
| 4236 | return NOTIFY_DONE; | 
|---|
| 4237 | } | 
|---|
| 4238 |  | 
|---|
| 4239 |  | 
|---|
| 4240 | static int packet_ioctl(struct socket *sock, unsigned int cmd, | 
|---|
| 4241 | unsigned long arg) | 
|---|
| 4242 | { | 
|---|
| 4243 | struct sock *sk = sock->sk; | 
|---|
| 4244 |  | 
|---|
| 4245 | switch (cmd) { | 
|---|
| 4246 | case SIOCOUTQ: | 
|---|
| 4247 | { | 
|---|
| 4248 | int amount = sk_wmem_alloc_get(sk); | 
|---|
| 4249 |  | 
|---|
| 4250 | return put_user(amount, (int __user *)arg); | 
|---|
| 4251 | } | 
|---|
| 4252 | case SIOCINQ: | 
|---|
| 4253 | { | 
|---|
| 4254 | struct sk_buff *skb; | 
|---|
| 4255 | int amount = 0; | 
|---|
| 4256 |  | 
|---|
| 4257 | spin_lock_bh(lock: &sk->sk_receive_queue.lock); | 
|---|
| 4258 | skb = skb_peek(list_: &sk->sk_receive_queue); | 
|---|
| 4259 | if (skb) | 
|---|
| 4260 | amount = skb->len; | 
|---|
| 4261 | spin_unlock_bh(lock: &sk->sk_receive_queue.lock); | 
|---|
| 4262 | return put_user(amount, (int __user *)arg); | 
|---|
| 4263 | } | 
|---|
| 4264 | #ifdef CONFIG_INET | 
|---|
| 4265 | case SIOCADDRT: | 
|---|
| 4266 | case SIOCDELRT: | 
|---|
| 4267 | case SIOCDARP: | 
|---|
| 4268 | case SIOCGARP: | 
|---|
| 4269 | case SIOCSARP: | 
|---|
| 4270 | case SIOCGIFADDR: | 
|---|
| 4271 | case SIOCSIFADDR: | 
|---|
| 4272 | case SIOCGIFBRDADDR: | 
|---|
| 4273 | case SIOCSIFBRDADDR: | 
|---|
| 4274 | case SIOCGIFNETMASK: | 
|---|
| 4275 | case SIOCSIFNETMASK: | 
|---|
| 4276 | case SIOCGIFDSTADDR: | 
|---|
| 4277 | case SIOCSIFDSTADDR: | 
|---|
| 4278 | case SIOCSIFFLAGS: | 
|---|
| 4279 | return inet_dgram_ops.ioctl(sock, cmd, arg); | 
|---|
| 4280 | #endif | 
|---|
| 4281 |  | 
|---|
| 4282 | default: | 
|---|
| 4283 | return -ENOIOCTLCMD; | 
|---|
| 4284 | } | 
|---|
| 4285 | return 0; | 
|---|
| 4286 | } | 
|---|
| 4287 |  | 
|---|
| 4288 | static __poll_t packet_poll(struct file *file, struct socket *sock, | 
|---|
| 4289 | poll_table *wait) | 
|---|
| 4290 | { | 
|---|
| 4291 | struct sock *sk = sock->sk; | 
|---|
| 4292 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 4293 | __poll_t mask = datagram_poll(file, sock, wait); | 
|---|
| 4294 |  | 
|---|
| 4295 | spin_lock_bh(lock: &sk->sk_receive_queue.lock); | 
|---|
| 4296 | if (po->rx_ring.pg_vec) { | 
|---|
| 4297 | if (!packet_previous_rx_frame(po, rb: &po->rx_ring, | 
|---|
| 4298 | TP_STATUS_KERNEL)) | 
|---|
| 4299 | mask |= EPOLLIN | EPOLLRDNORM; | 
|---|
| 4300 | } | 
|---|
| 4301 | packet_rcv_try_clear_pressure(po); | 
|---|
| 4302 | spin_unlock_bh(lock: &sk->sk_receive_queue.lock); | 
|---|
| 4303 | spin_lock_bh(lock: &sk->sk_write_queue.lock); | 
|---|
| 4304 | if (po->tx_ring.pg_vec) { | 
|---|
| 4305 | if (packet_current_frame(po, rb: &po->tx_ring, TP_STATUS_AVAILABLE)) | 
|---|
| 4306 | mask |= EPOLLOUT | EPOLLWRNORM; | 
|---|
| 4307 | } | 
|---|
| 4308 | spin_unlock_bh(lock: &sk->sk_write_queue.lock); | 
|---|
| 4309 | return mask; | 
|---|
| 4310 | } | 
|---|
| 4311 |  | 
|---|
| 4312 |  | 
|---|
| 4313 | /* Dirty? Well, I still did not learn better way to account | 
|---|
| 4314 | * for user mmaps. | 
|---|
| 4315 | */ | 
|---|
| 4316 |  | 
|---|
| 4317 | static void packet_mm_open(struct vm_area_struct *vma) | 
|---|
| 4318 | { | 
|---|
| 4319 | struct file *file = vma->vm_file; | 
|---|
| 4320 | struct socket *sock = file->private_data; | 
|---|
| 4321 | struct sock *sk = sock->sk; | 
|---|
| 4322 |  | 
|---|
| 4323 | if (sk) | 
|---|
| 4324 | atomic_long_inc(v: &pkt_sk(sk)->mapped); | 
|---|
| 4325 | } | 
|---|
| 4326 |  | 
|---|
| 4327 | static void packet_mm_close(struct vm_area_struct *vma) | 
|---|
| 4328 | { | 
|---|
| 4329 | struct file *file = vma->vm_file; | 
|---|
| 4330 | struct socket *sock = file->private_data; | 
|---|
| 4331 | struct sock *sk = sock->sk; | 
|---|
| 4332 |  | 
|---|
| 4333 | if (sk) | 
|---|
| 4334 | atomic_long_dec(v: &pkt_sk(sk)->mapped); | 
|---|
| 4335 | } | 
|---|
| 4336 |  | 
|---|
| 4337 | static const struct vm_operations_struct packet_mmap_ops = { | 
|---|
| 4338 | .open	=	packet_mm_open, | 
|---|
| 4339 | .close	=	packet_mm_close, | 
|---|
| 4340 | }; | 
|---|
| 4341 |  | 
|---|
| 4342 | static void free_pg_vec(struct pgv *pg_vec, unsigned int order, | 
|---|
| 4343 | unsigned int len) | 
|---|
| 4344 | { | 
|---|
| 4345 | int i; | 
|---|
| 4346 |  | 
|---|
| 4347 | for (i = 0; i < len; i++) { | 
|---|
| 4348 | if (likely(pg_vec[i].buffer)) { | 
|---|
| 4349 | if (is_vmalloc_addr(x: pg_vec[i].buffer)) | 
|---|
| 4350 | vfree(addr: pg_vec[i].buffer); | 
|---|
| 4351 | else | 
|---|
| 4352 | free_pages(addr: (unsigned long)pg_vec[i].buffer, | 
|---|
| 4353 | order); | 
|---|
| 4354 | pg_vec[i].buffer = NULL; | 
|---|
| 4355 | } | 
|---|
| 4356 | } | 
|---|
| 4357 | kfree(objp: pg_vec); | 
|---|
| 4358 | } | 
|---|
| 4359 |  | 
|---|
| 4360 | static char *alloc_one_pg_vec_page(unsigned long order) | 
|---|
| 4361 | { | 
|---|
| 4362 | char *buffer; | 
|---|
| 4363 | gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | | 
|---|
| 4364 | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; | 
|---|
| 4365 |  | 
|---|
| 4366 | buffer = (char *) __get_free_pages(gfp_flags, order); | 
|---|
| 4367 | if (buffer) | 
|---|
| 4368 | return buffer; | 
|---|
| 4369 |  | 
|---|
| 4370 | /* __get_free_pages failed, fall back to vmalloc */ | 
|---|
| 4371 | buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); | 
|---|
| 4372 | if (buffer) | 
|---|
| 4373 | return buffer; | 
|---|
| 4374 |  | 
|---|
| 4375 | /* vmalloc failed, lets dig into swap here */ | 
|---|
| 4376 | gfp_flags &= ~__GFP_NORETRY; | 
|---|
| 4377 | buffer = (char *) __get_free_pages(gfp_flags, order); | 
|---|
| 4378 | if (buffer) | 
|---|
| 4379 | return buffer; | 
|---|
| 4380 |  | 
|---|
| 4381 | /* complete and utter failure */ | 
|---|
| 4382 | return NULL; | 
|---|
| 4383 | } | 
|---|
| 4384 |  | 
|---|
| 4385 | static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) | 
|---|
| 4386 | { | 
|---|
| 4387 | unsigned int block_nr = req->tp_block_nr; | 
|---|
| 4388 | struct pgv *pg_vec; | 
|---|
| 4389 | int i; | 
|---|
| 4390 |  | 
|---|
| 4391 | pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN); | 
|---|
| 4392 | if (unlikely(!pg_vec)) | 
|---|
| 4393 | goto out; | 
|---|
| 4394 |  | 
|---|
| 4395 | for (i = 0; i < block_nr; i++) { | 
|---|
| 4396 | pg_vec[i].buffer = alloc_one_pg_vec_page(order); | 
|---|
| 4397 | if (unlikely(!pg_vec[i].buffer)) | 
|---|
| 4398 | goto out_free_pgvec; | 
|---|
| 4399 | } | 
|---|
| 4400 |  | 
|---|
| 4401 | out: | 
|---|
| 4402 | return pg_vec; | 
|---|
| 4403 |  | 
|---|
| 4404 | out_free_pgvec: | 
|---|
| 4405 | free_pg_vec(pg_vec, order, len: block_nr); | 
|---|
| 4406 | pg_vec = NULL; | 
|---|
| 4407 | goto out; | 
|---|
| 4408 | } | 
|---|
| 4409 |  | 
|---|
| 4410 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | 
|---|
| 4411 | int closing, int tx_ring) | 
|---|
| 4412 | { | 
|---|
| 4413 | struct pgv *pg_vec = NULL; | 
|---|
| 4414 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 4415 | unsigned long *rx_owner_map = NULL; | 
|---|
| 4416 | int was_running, order = 0; | 
|---|
| 4417 | struct packet_ring_buffer *rb; | 
|---|
| 4418 | struct sk_buff_head *rb_queue; | 
|---|
| 4419 | __be16 num; | 
|---|
| 4420 | int err; | 
|---|
| 4421 | /* Added to avoid minimal code churn */ | 
|---|
| 4422 | struct tpacket_req *req = &req_u->req; | 
|---|
| 4423 |  | 
|---|
| 4424 | rb = tx_ring ? &po->tx_ring : &po->rx_ring; | 
|---|
| 4425 | rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; | 
|---|
| 4426 |  | 
|---|
| 4427 | err = -EBUSY; | 
|---|
| 4428 | if (!closing) { | 
|---|
| 4429 | if (atomic_long_read(v: &po->mapped)) | 
|---|
| 4430 | goto out; | 
|---|
| 4431 | if (packet_read_pending(rb)) | 
|---|
| 4432 | goto out; | 
|---|
| 4433 | } | 
|---|
| 4434 |  | 
|---|
| 4435 | if (req->tp_block_nr) { | 
|---|
| 4436 | unsigned int min_frame_size; | 
|---|
| 4437 |  | 
|---|
| 4438 | /* Sanity tests and some calculations */ | 
|---|
| 4439 | err = -EBUSY; | 
|---|
| 4440 | if (unlikely(rb->pg_vec)) | 
|---|
| 4441 | goto out; | 
|---|
| 4442 |  | 
|---|
| 4443 | switch (po->tp_version) { | 
|---|
| 4444 | case TPACKET_V1: | 
|---|
| 4445 | po->tp_hdrlen = TPACKET_HDRLEN; | 
|---|
| 4446 | break; | 
|---|
| 4447 | case TPACKET_V2: | 
|---|
| 4448 | po->tp_hdrlen = TPACKET2_HDRLEN; | 
|---|
| 4449 | break; | 
|---|
| 4450 | case TPACKET_V3: | 
|---|
| 4451 | po->tp_hdrlen = TPACKET3_HDRLEN; | 
|---|
| 4452 | break; | 
|---|
| 4453 | } | 
|---|
| 4454 |  | 
|---|
| 4455 | err = -EINVAL; | 
|---|
| 4456 | if (unlikely((int)req->tp_block_size <= 0)) | 
|---|
| 4457 | goto out; | 
|---|
| 4458 | if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) | 
|---|
| 4459 | goto out; | 
|---|
| 4460 | min_frame_size = po->tp_hdrlen + po->tp_reserve; | 
|---|
| 4461 | if (po->tp_version >= TPACKET_V3 && | 
|---|
| 4462 | req->tp_block_size < | 
|---|
| 4463 | BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size) | 
|---|
| 4464 | goto out; | 
|---|
| 4465 | if (unlikely(req->tp_frame_size < min_frame_size)) | 
|---|
| 4466 | goto out; | 
|---|
| 4467 | if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) | 
|---|
| 4468 | goto out; | 
|---|
| 4469 |  | 
|---|
| 4470 | rb->frames_per_block = req->tp_block_size / req->tp_frame_size; | 
|---|
| 4471 | if (unlikely(rb->frames_per_block == 0)) | 
|---|
| 4472 | goto out; | 
|---|
| 4473 | if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) | 
|---|
| 4474 | goto out; | 
|---|
| 4475 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != | 
|---|
| 4476 | req->tp_frame_nr)) | 
|---|
| 4477 | goto out; | 
|---|
| 4478 |  | 
|---|
| 4479 | err = -ENOMEM; | 
|---|
| 4480 | order = get_order(size: req->tp_block_size); | 
|---|
| 4481 | pg_vec = alloc_pg_vec(req, order); | 
|---|
| 4482 | if (unlikely(!pg_vec)) | 
|---|
| 4483 | goto out; | 
|---|
| 4484 | switch (po->tp_version) { | 
|---|
| 4485 | case TPACKET_V3: | 
|---|
| 4486 | /* Block transmit is not supported yet */ | 
|---|
| 4487 | if (!tx_ring) { | 
|---|
| 4488 | init_prb_bdqc(po, rb, pg_vec, req_u); | 
|---|
| 4489 | } else { | 
|---|
| 4490 | struct tpacket_req3 *req3 = &req_u->req3; | 
|---|
| 4491 |  | 
|---|
| 4492 | if (req3->tp_retire_blk_tov || | 
|---|
| 4493 | req3->tp_sizeof_priv || | 
|---|
| 4494 | req3->tp_feature_req_word) { | 
|---|
| 4495 | err = -EINVAL; | 
|---|
| 4496 | goto out_free_pg_vec; | 
|---|
| 4497 | } | 
|---|
| 4498 | } | 
|---|
| 4499 | break; | 
|---|
| 4500 | default: | 
|---|
| 4501 | if (!tx_ring) { | 
|---|
| 4502 | rx_owner_map = bitmap_alloc(nbits: req->tp_frame_nr, | 
|---|
| 4503 | GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); | 
|---|
| 4504 | if (!rx_owner_map) | 
|---|
| 4505 | goto out_free_pg_vec; | 
|---|
| 4506 | } | 
|---|
| 4507 | break; | 
|---|
| 4508 | } | 
|---|
| 4509 | } | 
|---|
| 4510 | /* Done */ | 
|---|
| 4511 | else { | 
|---|
| 4512 | err = -EINVAL; | 
|---|
| 4513 | if (unlikely(req->tp_frame_nr)) | 
|---|
| 4514 | goto out; | 
|---|
| 4515 | } | 
|---|
| 4516 |  | 
|---|
| 4517 |  | 
|---|
| 4518 | /* Detach socket from network */ | 
|---|
| 4519 | spin_lock(lock: &po->bind_lock); | 
|---|
| 4520 | was_running = packet_sock_flag(po, flag: PACKET_SOCK_RUNNING); | 
|---|
| 4521 | num = po->num; | 
|---|
| 4522 | WRITE_ONCE(po->num, 0); | 
|---|
| 4523 | if (was_running) | 
|---|
| 4524 | __unregister_prot_hook(sk, sync: false); | 
|---|
| 4525 |  | 
|---|
| 4526 | spin_unlock(lock: &po->bind_lock); | 
|---|
| 4527 |  | 
|---|
| 4528 | synchronize_net(); | 
|---|
| 4529 |  | 
|---|
| 4530 | err = -EBUSY; | 
|---|
| 4531 | mutex_lock(lock: &po->pg_vec_lock); | 
|---|
| 4532 | if (closing || atomic_long_read(v: &po->mapped) == 0) { | 
|---|
| 4533 | err = 0; | 
|---|
| 4534 | spin_lock_bh(lock: &rb_queue->lock); | 
|---|
| 4535 | swap(rb->pg_vec, pg_vec); | 
|---|
| 4536 | if (po->tp_version <= TPACKET_V2) | 
|---|
| 4537 | swap(rb->rx_owner_map, rx_owner_map); | 
|---|
| 4538 | rb->frame_max = (req->tp_frame_nr - 1); | 
|---|
| 4539 | rb->head = 0; | 
|---|
| 4540 | rb->frame_size = req->tp_frame_size; | 
|---|
| 4541 | spin_unlock_bh(lock: &rb_queue->lock); | 
|---|
| 4542 |  | 
|---|
| 4543 | swap(rb->pg_vec_order, order); | 
|---|
| 4544 | swap(rb->pg_vec_len, req->tp_block_nr); | 
|---|
| 4545 |  | 
|---|
| 4546 | rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; | 
|---|
| 4547 | po->prot_hook.func = (po->rx_ring.pg_vec) ? | 
|---|
| 4548 | tpacket_rcv : packet_rcv; | 
|---|
| 4549 | skb_queue_purge(list: rb_queue); | 
|---|
| 4550 | if (atomic_long_read(v: &po->mapped)) | 
|---|
| 4551 | pr_err( "packet_mmap: vma is busy: %ld\n", | 
|---|
| 4552 | atomic_long_read(&po->mapped)); | 
|---|
| 4553 | } | 
|---|
| 4554 | mutex_unlock(lock: &po->pg_vec_lock); | 
|---|
| 4555 |  | 
|---|
| 4556 | spin_lock(lock: &po->bind_lock); | 
|---|
| 4557 | WRITE_ONCE(po->num, num); | 
|---|
| 4558 | if (was_running) | 
|---|
| 4559 | register_prot_hook(sk); | 
|---|
| 4560 |  | 
|---|
| 4561 | spin_unlock(lock: &po->bind_lock); | 
|---|
| 4562 | if (pg_vec && (po->tp_version > TPACKET_V2)) { | 
|---|
| 4563 | /* Because we don't support block-based V3 on tx-ring */ | 
|---|
| 4564 | if (!tx_ring) | 
|---|
| 4565 | prb_shutdown_retire_blk_timer(po, rb_queue); | 
|---|
| 4566 | } | 
|---|
| 4567 |  | 
|---|
| 4568 | out_free_pg_vec: | 
|---|
| 4569 | if (pg_vec) { | 
|---|
| 4570 | bitmap_free(bitmap: rx_owner_map); | 
|---|
| 4571 | free_pg_vec(pg_vec, order, len: req->tp_block_nr); | 
|---|
| 4572 | } | 
|---|
| 4573 | out: | 
|---|
| 4574 | return err; | 
|---|
| 4575 | } | 
|---|
| 4576 |  | 
|---|
| 4577 | static int packet_mmap(struct file *file, struct socket *sock, | 
|---|
| 4578 | struct vm_area_struct *vma) | 
|---|
| 4579 | { | 
|---|
| 4580 | struct sock *sk = sock->sk; | 
|---|
| 4581 | struct packet_sock *po = pkt_sk(sk); | 
|---|
| 4582 | unsigned long size, expected_size; | 
|---|
| 4583 | struct packet_ring_buffer *rb; | 
|---|
| 4584 | unsigned long start; | 
|---|
| 4585 | int err = -EINVAL; | 
|---|
| 4586 | int i; | 
|---|
| 4587 |  | 
|---|
| 4588 | if (vma->vm_pgoff) | 
|---|
| 4589 | return -EINVAL; | 
|---|
| 4590 |  | 
|---|
| 4591 | mutex_lock(lock: &po->pg_vec_lock); | 
|---|
| 4592 |  | 
|---|
| 4593 | expected_size = 0; | 
|---|
| 4594 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { | 
|---|
| 4595 | if (rb->pg_vec) { | 
|---|
| 4596 | expected_size += rb->pg_vec_len | 
|---|
| 4597 | * rb->pg_vec_pages | 
|---|
| 4598 | * PAGE_SIZE; | 
|---|
| 4599 | } | 
|---|
| 4600 | } | 
|---|
| 4601 |  | 
|---|
| 4602 | if (expected_size == 0) | 
|---|
| 4603 | goto out; | 
|---|
| 4604 |  | 
|---|
| 4605 | size = vma->vm_end - vma->vm_start; | 
|---|
| 4606 | if (size != expected_size) | 
|---|
| 4607 | goto out; | 
|---|
| 4608 |  | 
|---|
| 4609 | start = vma->vm_start; | 
|---|
| 4610 | for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) { | 
|---|
| 4611 | if (rb->pg_vec == NULL) | 
|---|
| 4612 | continue; | 
|---|
| 4613 |  | 
|---|
| 4614 | for (i = 0; i < rb->pg_vec_len; i++) { | 
|---|
| 4615 | struct page *page; | 
|---|
| 4616 | void *kaddr = rb->pg_vec[i].buffer; | 
|---|
| 4617 | int pg_num; | 
|---|
| 4618 |  | 
|---|
| 4619 | for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) { | 
|---|
| 4620 | page = pgv_to_page(addr: kaddr); | 
|---|
| 4621 | err = vm_insert_page(vma, addr: start, page); | 
|---|
| 4622 | if (unlikely(err)) | 
|---|
| 4623 | goto out; | 
|---|
| 4624 | start += PAGE_SIZE; | 
|---|
| 4625 | kaddr += PAGE_SIZE; | 
|---|
| 4626 | } | 
|---|
| 4627 | } | 
|---|
| 4628 | } | 
|---|
| 4629 |  | 
|---|
| 4630 | atomic_long_inc(v: &po->mapped); | 
|---|
| 4631 | vma->vm_ops = &packet_mmap_ops; | 
|---|
| 4632 | err = 0; | 
|---|
| 4633 |  | 
|---|
| 4634 | out: | 
|---|
| 4635 | mutex_unlock(lock: &po->pg_vec_lock); | 
|---|
| 4636 | return err; | 
|---|
| 4637 | } | 
|---|
| 4638 |  | 
|---|
| 4639 | static const struct proto_ops packet_ops_spkt = { | 
|---|
| 4640 | .family =	PF_PACKET, | 
|---|
| 4641 | .owner =	THIS_MODULE, | 
|---|
| 4642 | .release =	packet_release, | 
|---|
| 4643 | .bind =		packet_bind_spkt, | 
|---|
| 4644 | .connect =	sock_no_connect, | 
|---|
| 4645 | .socketpair =	sock_no_socketpair, | 
|---|
| 4646 | .accept =	sock_no_accept, | 
|---|
| 4647 | .getname =	packet_getname_spkt, | 
|---|
| 4648 | .poll =		datagram_poll, | 
|---|
| 4649 | .ioctl =	packet_ioctl, | 
|---|
| 4650 | .gettstamp =	sock_gettstamp, | 
|---|
| 4651 | .listen =	sock_no_listen, | 
|---|
| 4652 | .shutdown =	sock_no_shutdown, | 
|---|
| 4653 | .sendmsg =	packet_sendmsg_spkt, | 
|---|
| 4654 | .recvmsg =	packet_recvmsg, | 
|---|
| 4655 | .mmap =		sock_no_mmap, | 
|---|
| 4656 | }; | 
|---|
| 4657 |  | 
|---|
| 4658 | static const struct proto_ops packet_ops = { | 
|---|
| 4659 | .family =	PF_PACKET, | 
|---|
| 4660 | .owner =	THIS_MODULE, | 
|---|
| 4661 | .release =	packet_release, | 
|---|
| 4662 | .bind =		packet_bind, | 
|---|
| 4663 | .connect =	sock_no_connect, | 
|---|
| 4664 | .socketpair =	sock_no_socketpair, | 
|---|
| 4665 | .accept =	sock_no_accept, | 
|---|
| 4666 | .getname =	packet_getname, | 
|---|
| 4667 | .poll =		packet_poll, | 
|---|
| 4668 | .ioctl =	packet_ioctl, | 
|---|
| 4669 | .gettstamp =	sock_gettstamp, | 
|---|
| 4670 | .listen =	sock_no_listen, | 
|---|
| 4671 | .shutdown =	sock_no_shutdown, | 
|---|
| 4672 | .setsockopt =	packet_setsockopt, | 
|---|
| 4673 | .getsockopt =	packet_getsockopt, | 
|---|
| 4674 | .sendmsg =	packet_sendmsg, | 
|---|
| 4675 | .recvmsg =	packet_recvmsg, | 
|---|
| 4676 | .mmap =		packet_mmap, | 
|---|
| 4677 | }; | 
|---|
| 4678 |  | 
|---|
| 4679 | static const struct net_proto_family packet_family_ops = { | 
|---|
| 4680 | .family =	PF_PACKET, | 
|---|
| 4681 | .create =	packet_create, | 
|---|
| 4682 | .owner	=	THIS_MODULE, | 
|---|
| 4683 | }; | 
|---|
| 4684 |  | 
|---|
| 4685 | static struct notifier_block packet_netdev_notifier = { | 
|---|
| 4686 | .notifier_call =	packet_notifier, | 
|---|
| 4687 | }; | 
|---|
| 4688 |  | 
|---|
| 4689 | #ifdef CONFIG_PROC_FS | 
|---|
| 4690 |  | 
|---|
| 4691 | static void *packet_seq_start(struct seq_file *seq, loff_t *pos) | 
|---|
| 4692 | __acquires(RCU) | 
|---|
| 4693 | { | 
|---|
| 4694 | struct net *net = seq_file_net(seq); | 
|---|
| 4695 |  | 
|---|
| 4696 | rcu_read_lock(); | 
|---|
| 4697 | return seq_hlist_start_head_rcu(head: &net->packet.sklist, pos: *pos); | 
|---|
| 4698 | } | 
|---|
| 4699 |  | 
|---|
| 4700 | static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 
|---|
| 4701 | { | 
|---|
| 4702 | struct net *net = seq_file_net(seq); | 
|---|
| 4703 | return seq_hlist_next_rcu(v, head: &net->packet.sklist, ppos: pos); | 
|---|
| 4704 | } | 
|---|
| 4705 |  | 
|---|
| 4706 | static void packet_seq_stop(struct seq_file *seq, void *v) | 
|---|
| 4707 | __releases(RCU) | 
|---|
| 4708 | { | 
|---|
| 4709 | rcu_read_unlock(); | 
|---|
| 4710 | } | 
|---|
| 4711 |  | 
|---|
| 4712 | static int packet_seq_show(struct seq_file *seq, void *v) | 
|---|
| 4713 | { | 
|---|
| 4714 | if (v == SEQ_START_TOKEN) | 
|---|
| 4715 | seq_printf(m: seq, | 
|---|
| 4716 | fmt: "%*sRefCnt Type Proto  Iface R Rmem   User   Inode\n", | 
|---|
| 4717 | IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk"); | 
|---|
| 4718 | else { | 
|---|
| 4719 | struct sock *s = sk_entry(node: v); | 
|---|
| 4720 | const struct packet_sock *po = pkt_sk(s); | 
|---|
| 4721 |  | 
|---|
| 4722 | seq_printf(m: seq, | 
|---|
| 4723 | fmt: "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n", | 
|---|
| 4724 | s, | 
|---|
| 4725 | refcount_read(r: &s->sk_refcnt), | 
|---|
| 4726 | s->sk_type, | 
|---|
| 4727 | ntohs(READ_ONCE(po->num)), | 
|---|
| 4728 | READ_ONCE(po->ifindex), | 
|---|
| 4729 | packet_sock_flag(po, flag: PACKET_SOCK_RUNNING), | 
|---|
| 4730 | atomic_read(v: &s->sk_rmem_alloc), | 
|---|
| 4731 | from_kuid_munged(to: seq_user_ns(seq), kuid: sk_uid(sk: s)), | 
|---|
| 4732 | sock_i_ino(sk: s)); | 
|---|
| 4733 | } | 
|---|
| 4734 |  | 
|---|
| 4735 | return 0; | 
|---|
| 4736 | } | 
|---|
| 4737 |  | 
|---|
| 4738 | static const struct seq_operations packet_seq_ops = { | 
|---|
| 4739 | .start	= packet_seq_start, | 
|---|
| 4740 | .next	= packet_seq_next, | 
|---|
| 4741 | .stop	= packet_seq_stop, | 
|---|
| 4742 | .show	= packet_seq_show, | 
|---|
| 4743 | }; | 
|---|
| 4744 | #endif | 
|---|
| 4745 |  | 
|---|
| 4746 | static int __net_init packet_net_init(struct net *net) | 
|---|
| 4747 | { | 
|---|
| 4748 | mutex_init(&net->packet.sklist_lock); | 
|---|
| 4749 | INIT_HLIST_HEAD(&net->packet.sklist); | 
|---|
| 4750 |  | 
|---|
| 4751 | #ifdef CONFIG_PROC_FS | 
|---|
| 4752 | if (!proc_create_net( "packet", 0, net->proc_net, &packet_seq_ops, | 
|---|
| 4753 | sizeof(struct seq_net_private))) | 
|---|
| 4754 | return -ENOMEM; | 
|---|
| 4755 | #endif /* CONFIG_PROC_FS */ | 
|---|
| 4756 |  | 
|---|
| 4757 | return 0; | 
|---|
| 4758 | } | 
|---|
| 4759 |  | 
|---|
| 4760 | static void __net_exit packet_net_exit(struct net *net) | 
|---|
| 4761 | { | 
|---|
| 4762 | remove_proc_entry( "packet", net->proc_net); | 
|---|
| 4763 | WARN_ON_ONCE(!hlist_empty(&net->packet.sklist)); | 
|---|
| 4764 | } | 
|---|
| 4765 |  | 
|---|
| 4766 | static struct pernet_operations packet_net_ops = { | 
|---|
| 4767 | .init = packet_net_init, | 
|---|
| 4768 | .exit = packet_net_exit, | 
|---|
| 4769 | }; | 
|---|
| 4770 |  | 
|---|
| 4771 |  | 
|---|
| 4772 | static void __exit packet_exit(void) | 
|---|
| 4773 | { | 
|---|
| 4774 | sock_unregister(PF_PACKET); | 
|---|
| 4775 | proto_unregister(prot: &packet_proto); | 
|---|
| 4776 | unregister_netdevice_notifier(nb: &packet_netdev_notifier); | 
|---|
| 4777 | unregister_pernet_subsys(&packet_net_ops); | 
|---|
| 4778 | } | 
|---|
| 4779 |  | 
|---|
| 4780 | static int __init packet_init(void) | 
|---|
| 4781 | { | 
|---|
| 4782 | int rc; | 
|---|
| 4783 |  | 
|---|
| 4784 | rc = register_pernet_subsys(&packet_net_ops); | 
|---|
| 4785 | if (rc) | 
|---|
| 4786 | goto out; | 
|---|
| 4787 | rc = register_netdevice_notifier(nb: &packet_netdev_notifier); | 
|---|
| 4788 | if (rc) | 
|---|
| 4789 | goto out_pernet; | 
|---|
| 4790 | rc = proto_register(prot: &packet_proto, alloc_slab: 0); | 
|---|
| 4791 | if (rc) | 
|---|
| 4792 | goto out_notifier; | 
|---|
| 4793 | rc = sock_register(fam: &packet_family_ops); | 
|---|
| 4794 | if (rc) | 
|---|
| 4795 | goto out_proto; | 
|---|
| 4796 |  | 
|---|
| 4797 | return 0; | 
|---|
| 4798 |  | 
|---|
| 4799 | out_proto: | 
|---|
| 4800 | proto_unregister(prot: &packet_proto); | 
|---|
| 4801 | out_notifier: | 
|---|
| 4802 | unregister_netdevice_notifier(nb: &packet_netdev_notifier); | 
|---|
| 4803 | out_pernet: | 
|---|
| 4804 | unregister_pernet_subsys(&packet_net_ops); | 
|---|
| 4805 | out: | 
|---|
| 4806 | return rc; | 
|---|
| 4807 | } | 
|---|
| 4808 |  | 
|---|
| 4809 | module_init(packet_init); | 
|---|
| 4810 | module_exit(packet_exit); | 
|---|
| 4811 | MODULE_DESCRIPTION( "Packet socket support (AF_PACKET)"); | 
|---|
| 4812 | MODULE_LICENSE( "GPL"); | 
|---|
| 4813 | MODULE_ALIAS_NETPROTO(PF_PACKET); | 
|---|
| 4814 |  | 
|---|