1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM xdp
4
5#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_XDP_H
7
8#include <linux/netdevice.h>
9#include <linux/filter.h>
10#include <linux/tracepoint.h>
11#include <linux/bpf.h>
12#include <net/xdp.h>
13
14#define __XDP_ACT_MAP(FN) \
15 FN(ABORTED) \
16 FN(DROP) \
17 FN(PASS) \
18 FN(TX) \
19 FN(REDIRECT)
20
21#define __XDP_ACT_TP_FN(x) \
22 TRACE_DEFINE_ENUM(XDP_##x);
23#define __XDP_ACT_SYM_FN(x) \
24 { XDP_##x, #x },
25#define __XDP_ACT_SYM_TAB \
26 __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
27__XDP_ACT_MAP(__XDP_ACT_TP_FN)
28
29TRACE_EVENT(xdp_exception,
30
31 TP_PROTO(const struct net_device *dev,
32 const struct bpf_prog *xdp, u32 act),
33
34 TP_ARGS(dev, xdp, act),
35
36 TP_STRUCT__entry(
37 __field(int, prog_id)
38 __field(u32, act)
39 __field(int, ifindex)
40 ),
41
42 TP_fast_assign(
43 __entry->prog_id = xdp->aux->id;
44 __entry->act = act;
45 __entry->ifindex = dev->ifindex;
46 ),
47
48 TP_printk("prog_id=%d action=%s ifindex=%d",
49 __entry->prog_id,
50 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
51 __entry->ifindex)
52);
53
54TRACE_EVENT(xdp_bulk_tx,
55
56 TP_PROTO(const struct net_device *dev,
57 int sent, int drops, int err),
58
59 TP_ARGS(dev, sent, drops, err),
60
61 TP_STRUCT__entry(
62 __field(int, ifindex)
63 __field(u32, act)
64 __field(int, drops)
65 __field(int, sent)
66 __field(int, err)
67 ),
68
69 TP_fast_assign(
70 __entry->ifindex = dev->ifindex;
71 __entry->act = XDP_TX;
72 __entry->drops = drops;
73 __entry->sent = sent;
74 __entry->err = err;
75 ),
76
77 TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
78 __entry->ifindex,
79 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
80 __entry->sent, __entry->drops, __entry->err)
81);
82
83#ifndef __DEVMAP_OBJ_TYPE
84#define __DEVMAP_OBJ_TYPE
85struct _bpf_dtab_netdev {
86 struct net_device *dev;
87};
88#endif /* __DEVMAP_OBJ_TYPE */
89
90DECLARE_EVENT_CLASS(xdp_redirect_template,
91
92 TP_PROTO(const struct net_device *dev,
93 const struct bpf_prog *xdp,
94 const void *tgt, int err,
95 enum bpf_map_type map_type,
96 u32 map_id, u32 index),
97
98 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),
99
100 TP_STRUCT__entry(
101 __field(int, prog_id)
102 __field(u32, act)
103 __field(int, ifindex)
104 __field(int, err)
105 __field(int, to_ifindex)
106 __field(u32, map_id)
107 __field(int, map_index)
108 ),
109
110 TP_fast_assign(
111 u32 ifindex = 0, map_index = index;
112
113 if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
114 /* Just leave to_ifindex to 0 if do broadcast redirect,
115 * as tgt will be NULL.
116 */
117 if (tgt)
118 ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
119 } else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
120 ifindex = index;
121 map_index = 0;
122 }
123
124 __entry->prog_id = xdp->aux->id;
125 __entry->act = XDP_REDIRECT;
126 __entry->ifindex = dev->ifindex;
127 __entry->err = err;
128 __entry->to_ifindex = ifindex;
129 __entry->map_id = map_id;
130 __entry->map_index = map_index;
131 ),
132
133 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
134 " map_id=%d map_index=%d",
135 __entry->prog_id,
136 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
137 __entry->ifindex, __entry->to_ifindex,
138 __entry->err, __entry->map_id, __entry->map_index)
139);
140
141DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
142 TP_PROTO(const struct net_device *dev,
143 const struct bpf_prog *xdp,
144 const void *tgt, int err,
145 enum bpf_map_type map_type,
146 u32 map_id, u32 index),
147 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
148);
149
150DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
151 TP_PROTO(const struct net_device *dev,
152 const struct bpf_prog *xdp,
153 const void *tgt, int err,
154 enum bpf_map_type map_type,
155 u32 map_id, u32 index),
156 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
157);
158
159#define _trace_xdp_redirect(dev, xdp, to) \
160 trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
161
162#define _trace_xdp_redirect_err(dev, xdp, to, err) \
163 trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
164
165#define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
166 trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)
167
168#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
169 trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
170
171#ifdef CONFIG_BPF_SYSCALL
172TRACE_EVENT(xdp_cpumap_kthread,
173
174 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
175 int sched, struct xdp_cpumap_stats *xdp_stats),
176
177 TP_ARGS(map_id, processed, drops, sched, xdp_stats),
178
179 TP_STRUCT__entry(
180 __field(int, map_id)
181 __field(u32, act)
182 __field(int, cpu)
183 __field(unsigned int, drops)
184 __field(unsigned int, processed)
185 __field(int, sched)
186 __field(unsigned int, xdp_pass)
187 __field(unsigned int, xdp_drop)
188 __field(unsigned int, xdp_redirect)
189 ),
190
191 TP_fast_assign(
192 __entry->map_id = map_id;
193 __entry->act = XDP_REDIRECT;
194 __entry->cpu = smp_processor_id();
195 __entry->drops = drops;
196 __entry->processed = processed;
197 __entry->sched = sched;
198 __entry->xdp_pass = xdp_stats->pass;
199 __entry->xdp_drop = xdp_stats->drop;
200 __entry->xdp_redirect = xdp_stats->redirect;
201 ),
202
203 TP_printk("kthread"
204 " cpu=%d map_id=%d action=%s"
205 " processed=%u drops=%u"
206 " sched=%d"
207 " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
208 __entry->cpu, __entry->map_id,
209 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
210 __entry->processed, __entry->drops,
211 __entry->sched,
212 __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
213);
214
215TRACE_EVENT(xdp_cpumap_enqueue,
216
217 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
218 int to_cpu),
219
220 TP_ARGS(map_id, processed, drops, to_cpu),
221
222 TP_STRUCT__entry(
223 __field(int, map_id)
224 __field(u32, act)
225 __field(int, cpu)
226 __field(unsigned int, drops)
227 __field(unsigned int, processed)
228 __field(int, to_cpu)
229 ),
230
231 TP_fast_assign(
232 __entry->map_id = map_id;
233 __entry->act = XDP_REDIRECT;
234 __entry->cpu = smp_processor_id();
235 __entry->drops = drops;
236 __entry->processed = processed;
237 __entry->to_cpu = to_cpu;
238 ),
239
240 TP_printk("enqueue"
241 " cpu=%d map_id=%d action=%s"
242 " processed=%u drops=%u"
243 " to_cpu=%d",
244 __entry->cpu, __entry->map_id,
245 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
246 __entry->processed, __entry->drops,
247 __entry->to_cpu)
248);
249
250TRACE_EVENT(xdp_devmap_xmit,
251
252 TP_PROTO(const struct net_device *from_dev,
253 const struct net_device *to_dev,
254 int sent, int drops, int err),
255
256 TP_ARGS(from_dev, to_dev, sent, drops, err),
257
258 TP_STRUCT__entry(
259 __field(int, from_ifindex)
260 __field(u32, act)
261 __field(int, to_ifindex)
262 __field(int, drops)
263 __field(int, sent)
264 __field(int, err)
265 ),
266
267 TP_fast_assign(
268 __entry->from_ifindex = from_dev->ifindex;
269 __entry->act = XDP_REDIRECT;
270 __entry->to_ifindex = to_dev->ifindex;
271 __entry->drops = drops;
272 __entry->sent = sent;
273 __entry->err = err;
274 ),
275
276 TP_printk("ndo_xdp_xmit"
277 " from_ifindex=%d to_ifindex=%d action=%s"
278 " sent=%d drops=%d"
279 " err=%d",
280 __entry->from_ifindex, __entry->to_ifindex,
281 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
282 __entry->sent, __entry->drops,
283 __entry->err)
284);
285#endif /* CONFIG_BPF_SYSCALL */
286
287/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
288#include <net/xdp_priv.h>
289
290#define __MEM_TYPE_MAP(FN) \
291 FN(PAGE_SHARED) \
292 FN(PAGE_ORDER0) \
293 FN(PAGE_POOL) \
294 FN(XSK_BUFF_POOL)
295
296#define __MEM_TYPE_TP_FN(x) \
297 TRACE_DEFINE_ENUM(MEM_TYPE_##x);
298#define __MEM_TYPE_SYM_FN(x) \
299 { MEM_TYPE_##x, #x },
300#define __MEM_TYPE_SYM_TAB \
301 __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
302__MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
303
304TRACE_EVENT(mem_disconnect,
305
306 TP_PROTO(const struct xdp_mem_allocator *xa),
307
308 TP_ARGS(xa),
309
310 TP_STRUCT__entry(
311 __field(const struct xdp_mem_allocator *, xa)
312 __field(u32, mem_id)
313 __field(u32, mem_type)
314 __field(const void *, allocator)
315 ),
316
317 TP_fast_assign(
318 __entry->xa = xa;
319 __entry->mem_id = xa->mem.id;
320 __entry->mem_type = xa->mem.type;
321 __entry->allocator = xa->allocator;
322 ),
323
324 TP_printk("mem_id=%d mem_type=%s allocator=%p",
325 __entry->mem_id,
326 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
327 __entry->allocator
328 )
329);
330
331TRACE_EVENT(mem_connect,
332
333 TP_PROTO(const struct xdp_mem_allocator *xa,
334 const struct xdp_rxq_info *rxq),
335
336 TP_ARGS(xa, rxq),
337
338 TP_STRUCT__entry(
339 __field(const struct xdp_mem_allocator *, xa)
340 __field(u32, mem_id)
341 __field(u32, mem_type)
342 __field(const void *, allocator)
343 __field(const struct xdp_rxq_info *, rxq)
344 __field(int, ifindex)
345 ),
346
347 TP_fast_assign(
348 __entry->xa = xa;
349 __entry->mem_id = xa->mem.id;
350 __entry->mem_type = xa->mem.type;
351 __entry->allocator = xa->allocator;
352 __entry->rxq = rxq;
353 __entry->ifindex = rxq->dev->ifindex;
354 ),
355
356 TP_printk("mem_id=%d mem_type=%s allocator=%p"
357 " ifindex=%d",
358 __entry->mem_id,
359 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
360 __entry->allocator,
361 __entry->ifindex
362 )
363);
364
365TRACE_EVENT(bpf_xdp_link_attach_failed,
366
367 TP_PROTO(const char *msg),
368
369 TP_ARGS(msg),
370
371 TP_STRUCT__entry(
372 __string(msg, msg)
373 ),
374
375 TP_fast_assign(
376 __assign_str(msg);
377 ),
378
379 TP_printk("errmsg=%s", __get_str(msg))
380);
381
382#endif /* _TRACE_XDP_H */
383
384#include <trace/define_trace.h>
385