1/* SPDX-License-Identifier: GPL-2.0 */
2/* Interface for implementing AF_XDP zero-copy support in drivers.
3 * Copyright(c) 2020 Intel Corporation.
4 */
5
6#ifndef _LINUX_XDP_SOCK_DRV_H
7#define _LINUX_XDP_SOCK_DRV_H
8
9#include <net/xdp_sock.h>
10#include <net/xsk_buff_pool.h>
11
12#define XDP_UMEM_MIN_CHUNK_SHIFT 11
13#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14
15struct xsk_cb_desc {
16 void *src;
17 u8 off;
18 u8 bytes;
19};
20
21#ifdef CONFIG_XDP_SOCKETS
22
23void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26void xsk_tx_release(struct xsk_buff_pool *pool);
27struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
28 u16 queue_id);
29void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
34
35static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
36{
37 return XDP_PACKET_HEADROOM + pool->headroom;
38}
39
40static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
41{
42 return pool->chunk_size;
43}
44
45static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
46{
47 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
48}
49
50static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
51 struct xdp_rxq_info *rxq)
52{
53 xp_set_rxq_info(pool, rxq);
54}
55
56static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
57 struct xsk_cb_desc *desc)
58{
59 xp_fill_cb(pool, desc);
60}
61
62static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
63 unsigned long attrs)
64{
65 xp_dma_unmap(pool, attrs);
66}
67
68static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
69 struct device *dev, unsigned long attrs)
70{
71 struct xdp_umem *umem = pool->umem;
72
73 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
74}
75
76static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
77{
78 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
79
80 return xp_get_dma(xskb);
81}
82
83static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
84{
85 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
86
87 return xp_get_frame_dma(xskb);
88}
89
90static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
91{
92 return xp_alloc(pool);
93}
94
95static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
96{
97 return !xp_mb_desc(desc);
98}
99
100/* Returns as many entries as possible up to max. 0 <= N <= max. */
101static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
102{
103 return xp_alloc_batch(pool, xdp, max);
104}
105
106static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
107{
108 return xp_can_alloc(pool, count);
109}
110
111static inline void xsk_buff_free(struct xdp_buff *xdp)
112{
113 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
114 struct list_head *xskb_list = &xskb->pool->xskb_list;
115 struct xdp_buff_xsk *pos, *tmp;
116
117 if (likely(!xdp_buff_has_frags(xdp)))
118 goto out;
119
120 list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
121 list_del(&pos->list_node);
122 xp_free(pos);
123 }
124
125 xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
126out:
127 xp_free(xskb);
128}
129
130static inline bool xsk_buff_add_frag(struct xdp_buff *head,
131 struct xdp_buff *xdp)
132{
133 const void *data = xdp->data;
134 struct xdp_buff_xsk *frag;
135
136 if (!__xdp_buff_add_frag(head, virt_to_netmem(data),
137 offset_in_page(data), xdp->data_end - data,
138 xdp->frame_sz, false))
139 return false;
140
141 frag = container_of(xdp, struct xdp_buff_xsk, xdp);
142 list_add_tail(&frag->list_node, &frag->pool->xskb_list);
143
144 return true;
145}
146
147static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
148{
149 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
150 struct xdp_buff *ret = NULL;
151 struct xdp_buff_xsk *frag;
152
153 frag = list_first_entry_or_null(&xskb->pool->xskb_list,
154 struct xdp_buff_xsk, list_node);
155 if (frag) {
156 list_del(&frag->list_node);
157 ret = &frag->xdp;
158 }
159
160 return ret;
161}
162
163static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
164{
165 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
166
167 list_del(&xskb->list_node);
168}
169
170static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)
171{
172 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
173 struct xdp_buff_xsk *frag;
174
175 frag = list_first_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
176 list_node);
177 return &frag->xdp;
178}
179
180static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
181{
182 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
183 struct xdp_buff_xsk *frag;
184
185 frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
186 list_node);
187 return &frag->xdp;
188}
189
190static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
191{
192 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
193 xdp->data_meta = xdp->data;
194 xdp->data_end = xdp->data + size;
195 xdp->flags = 0;
196}
197
198static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
199 u64 addr)
200{
201 return xp_raw_get_dma(pool, addr);
202}
203
204static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
205{
206 return xp_raw_get_data(pool, addr);
207}
208
209/**
210 * xsk_buff_raw_get_ctx - get &xdp_desc context
211 * @pool: XSk buff pool desc address belongs to
212 * @addr: desc address (from userspace)
213 *
214 * Wrapper for xp_raw_get_ctx() to be used in drivers, see its kdoc for
215 * details.
216 *
217 * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata
218 * pointer, if it is present and valid (initialized to %NULL otherwise).
219 */
220static inline struct xdp_desc_ctx
221xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
222{
223 return xp_raw_get_ctx(pool, addr);
224}
225
226#define XDP_TXMD_FLAGS_VALID ( \
227 XDP_TXMD_FLAGS_TIMESTAMP | \
228 XDP_TXMD_FLAGS_CHECKSUM | \
229 XDP_TXMD_FLAGS_LAUNCH_TIME | \
230 0)
231
232static inline bool
233xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta)
234{
235 return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
236}
237
238static inline struct xsk_tx_metadata *
239__xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
240{
241 struct xsk_tx_metadata *meta;
242
243 if (!pool->tx_metadata_len)
244 return NULL;
245
246 meta = data - pool->tx_metadata_len;
247 if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
248 return NULL; /* no way to signal the error to the user */
249
250 return meta;
251}
252
253static inline struct xsk_tx_metadata *
254xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
255{
256 return __xsk_buff_get_metadata(pool, xp_raw_get_data(pool, addr));
257}
258
259static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
260{
261 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
262
263 xp_dma_sync_for_cpu(xskb);
264}
265
266static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
267 dma_addr_t dma,
268 size_t size)
269{
270 xp_dma_sync_for_device(pool, dma, size);
271}
272
273#else
274
275static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
276{
277}
278
279static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
280 struct xdp_desc *desc)
281{
282 return false;
283}
284
285static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
286{
287 return 0;
288}
289
290static inline void xsk_tx_release(struct xsk_buff_pool *pool)
291{
292}
293
294static inline struct xsk_buff_pool *
295xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
296{
297 return NULL;
298}
299
300static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
301{
302}
303
304static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
305{
306}
307
308static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
309{
310}
311
312static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
313{
314}
315
316static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
317{
318 return false;
319}
320
321static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
322{
323 return 0;
324}
325
326static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
327{
328 return 0;
329}
330
331static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
332{
333 return 0;
334}
335
336static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
337 struct xdp_rxq_info *rxq)
338{
339}
340
341static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
342 struct xsk_cb_desc *desc)
343{
344}
345
346static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
347 unsigned long attrs)
348{
349}
350
351static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
352 struct device *dev, unsigned long attrs)
353{
354 return 0;
355}
356
357static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
358{
359 return 0;
360}
361
362static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
363{
364 return 0;
365}
366
367static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
368{
369 return NULL;
370}
371
372static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
373{
374 return false;
375}
376
377static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
378{
379 return 0;
380}
381
382static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
383{
384 return false;
385}
386
387static inline void xsk_buff_free(struct xdp_buff *xdp)
388{
389}
390
391static inline bool xsk_buff_add_frag(struct xdp_buff *head,
392 struct xdp_buff *xdp)
393{
394 return false;
395}
396
397static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
398{
399 return NULL;
400}
401
402static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
403{
404}
405
406static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)
407{
408 return NULL;
409}
410
411static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
412{
413 return NULL;
414}
415
416static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
417{
418}
419
420static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
421 u64 addr)
422{
423 return 0;
424}
425
426static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
427{
428 return NULL;
429}
430
431static inline struct xdp_desc_ctx
432xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
433{
434 return (struct xdp_desc_ctx){ };
435}
436
437static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
438{
439 return false;
440}
441
442static inline struct xsk_tx_metadata *
443__xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
444{
445 return NULL;
446}
447
448static inline struct xsk_tx_metadata *
449xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
450{
451 return NULL;
452}
453
454static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
455{
456}
457
458static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
459 dma_addr_t dma,
460 size_t size)
461{
462}
463
464#endif /* CONFIG_XDP_SOCKETS */
465
466#endif /* _LINUX_XDP_SOCK_DRV_H */
467