1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Device memory TCP support
4 *
5 * Authors: Mina Almasry <almasrymina@google.com>
6 * Willem de Bruijn <willemb@google.com>
7 * Kaiyuan Zhang <kaiyuanz@google.com>
8 *
9 */
10#ifndef _NET_DEVMEM_H
11#define _NET_DEVMEM_H
12
13#include <net/netmem.h>
14#include <net/netdev_netlink.h>
15
16struct netlink_ext_ack;
17
18struct net_devmem_dmabuf_binding {
19 struct dma_buf *dmabuf;
20 struct dma_buf_attachment *attachment;
21 struct sg_table *sgt;
22 struct net_device *dev;
23 struct gen_pool *chunk_pool;
24 /* Protect dev */
25 struct mutex lock;
26
27 /* The user holds a ref (via the netlink API) for as long as they want
28 * the binding to remain alive. Each page pool using this binding holds
29 * a ref to keep the binding alive. The page_pool does not release the
30 * ref until all the net_iovs allocated from this binding are released
31 * back to the page_pool.
32 *
33 * The binding undos itself and unmaps the underlying dmabuf once all
34 * those refs are dropped and the binding is no longer desired or in
35 * use.
36 *
37 * net_devmem_get_net_iov() on dmabuf net_iovs will increment this
38 * reference, making sure that the binding remains alive until all the
39 * net_iovs are no longer used. net_iovs allocated from this binding
40 * that are stuck in the TX path for any reason (such as awaiting
41 * retransmits) hold a reference to the binding until the skb holding
42 * them is freed.
43 */
44 refcount_t ref;
45
46 /* The list of bindings currently active. Used for netlink to notify us
47 * of the user dropping the bind.
48 */
49 struct list_head list;
50
51 /* rxq's this binding is active on. */
52 struct xarray bound_rxqs;
53
54 /* ID of this binding. Globally unique to all bindings currently
55 * active.
56 */
57 u32 id;
58
59 /* DMA direction, FROM_DEVICE for Rx binding, TO_DEVICE for Tx. */
60 enum dma_data_direction direction;
61
62 /* Array of net_iov pointers for this binding, sorted by virtual
63 * address. This array is convenient to map the virtual addresses to
64 * net_iovs in the TX path.
65 */
66 struct net_iov **tx_vec;
67
68 struct work_struct unbind_w;
69};
70
71#if defined(CONFIG_NET_DEVMEM)
72/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
73 * entry from the dmabuf is inserted into the genpool as a chunk, and needs
74 * this owner struct to keep track of some metadata necessary to create
75 * allocations from this chunk.
76 */
77struct dmabuf_genpool_chunk_owner {
78 struct net_iov_area area;
79 struct net_devmem_dmabuf_binding *binding;
80
81 /* dma_addr of the start of the chunk. */
82 dma_addr_t base_dma_addr;
83};
84
85void __net_devmem_dmabuf_binding_free(struct work_struct *wq);
86struct net_devmem_dmabuf_binding *
87net_devmem_bind_dmabuf(struct net_device *dev,
88 struct device *dma_dev,
89 enum dma_data_direction direction,
90 unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
91 struct netlink_ext_ack *extack);
92struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id);
93void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
94int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
95 struct net_devmem_dmabuf_binding *binding,
96 struct netlink_ext_ack *extack);
97void net_devmem_bind_tx_release(struct sock *sk);
98
99static inline struct dmabuf_genpool_chunk_owner *
100net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
101{
102 struct net_iov_area *owner = net_iov_owner(niov);
103
104 return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
105}
106
107static inline struct net_devmem_dmabuf_binding *
108net_devmem_iov_binding(const struct net_iov *niov)
109{
110 return net_devmem_iov_to_chunk_owner(niov)->binding;
111}
112
113static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
114{
115 return net_devmem_iov_binding(niov)->id;
116}
117
118static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
119{
120 struct net_iov_area *owner = net_iov_owner(niov);
121
122 return owner->base_virtual +
123 ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
124}
125
126static inline bool
127net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
128{
129 return refcount_inc_not_zero(&binding->ref);
130}
131
132static inline void
133net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
134{
135 if (!refcount_dec_and_test(&binding->ref))
136 return;
137
138 INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free);
139 schedule_work(&binding->unbind_w);
140}
141
142void net_devmem_get_net_iov(struct net_iov *niov);
143void net_devmem_put_net_iov(struct net_iov *niov);
144
145struct net_iov *
146net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
147void net_devmem_free_dmabuf(struct net_iov *ppiov);
148
149bool net_is_devmem_iov(struct net_iov *niov);
150struct net_devmem_dmabuf_binding *
151net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id);
152struct net_iov *
153net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
154 size_t *off, size_t *size);
155
156#else
157struct net_devmem_dmabuf_binding;
158
159static inline void
160net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
161{
162}
163
164static inline void net_devmem_get_net_iov(struct net_iov *niov)
165{
166}
167
168static inline void net_devmem_put_net_iov(struct net_iov *niov)
169{
170}
171
172static inline struct net_devmem_dmabuf_binding *
173net_devmem_bind_dmabuf(struct net_device *dev,
174 struct device *dma_dev,
175 enum dma_data_direction direction,
176 unsigned int dmabuf_fd,
177 struct netdev_nl_sock *priv,
178 struct netlink_ext_ack *extack)
179{
180 return ERR_PTR(error: -EOPNOTSUPP);
181}
182
183static inline struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
184{
185 return NULL;
186}
187
188static inline void
189net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
190{
191}
192
193static inline int
194net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
195 struct net_devmem_dmabuf_binding *binding,
196 struct netlink_ext_ack *extack)
197
198{
199 return -EOPNOTSUPP;
200}
201
202static inline struct net_iov *
203net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
204{
205 return NULL;
206}
207
208static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
209{
210}
211
212static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
213{
214 return 0;
215}
216
217static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
218{
219 return 0;
220}
221
222static inline bool net_is_devmem_iov(struct net_iov *niov)
223{
224 return false;
225}
226
227static inline struct net_devmem_dmabuf_binding *
228net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id)
229{
230 return ERR_PTR(error: -EOPNOTSUPP);
231}
232
233static inline struct net_iov *
234net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
235 size_t *off, size_t *size)
236{
237 return NULL;
238}
239
240static inline struct net_devmem_dmabuf_binding *
241net_devmem_iov_binding(const struct net_iov *niov)
242{
243 return NULL;
244}
245#endif
246
247#endif /* _NET_DEVMEM_H */
248