1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM irq_vectors
4
5#if !defined(_TRACE_IRQ_VECTORS_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_IRQ_VECTORS_H
7
8#include <linux/tracepoint.h>
9
10#ifdef CONFIG_X86_LOCAL_APIC
11
12DECLARE_EVENT_CLASS(x86_irq_vector,
13
14 TP_PROTO(int vector),
15
16 TP_ARGS(vector),
17
18 TP_STRUCT__entry(
19 __field( int, vector )
20 ),
21
22 TP_fast_assign(
23 __entry->vector = vector;
24 ),
25
26 TP_printk("vector=%d", __entry->vector) );
27
28#define DEFINE_IRQ_VECTOR_EVENT(name) \
29DEFINE_EVENT_FN(x86_irq_vector, name##_entry, \
30 TP_PROTO(int vector), \
31 TP_ARGS(vector), NULL, NULL); \
32DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
33 TP_PROTO(int vector), \
34 TP_ARGS(vector), NULL, NULL);
35
36/*
37 * local_timer - called when entering/exiting a local timer interrupt
38 * vector handler
39 */
40DEFINE_IRQ_VECTOR_EVENT(local_timer);
41
42/*
43 * spurious_apic - called when entering/exiting a spurious apic vector handler
44 */
45DEFINE_IRQ_VECTOR_EVENT(spurious_apic);
46
47/*
48 * error_apic - called when entering/exiting an error apic vector handler
49 */
50DEFINE_IRQ_VECTOR_EVENT(error_apic);
51
52/*
53 * x86_platform_ipi - called when entering/exiting a x86 platform ipi interrupt
54 * vector handler
55 */
56DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
57
58#ifdef CONFIG_IRQ_WORK
59/*
60 * irq_work - called when entering/exiting a irq work interrupt
61 * vector handler
62 */
63DEFINE_IRQ_VECTOR_EVENT(irq_work);
64
65/*
66 * We must dis-allow sampling irq_work_exit() because perf event sampling
67 * itself can cause irq_work, which would lead to an infinite loop;
68 *
69 * 1) irq_work_exit happens
70 * 2) generates perf sample
71 * 3) generates irq_work
72 * 4) goto 1
73 */
74TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
75#endif
76
77/*
78 * The ifdef is required because that tracepoint macro hell emits tracepoint
79 * code in files which include this header even if the tracepoint is not
80 * enabled. Brilliant stuff that.
81 */
82#ifdef CONFIG_SMP
83/*
84 * reschedule - called when entering/exiting a reschedule vector handler
85 */
86DEFINE_IRQ_VECTOR_EVENT(reschedule);
87
88/*
89 * call_function - called when entering/exiting a call function interrupt
90 * vector handler
91 */
92DEFINE_IRQ_VECTOR_EVENT(call_function);
93
94/*
95 * call_function_single - called when entering/exiting a call function
96 * single interrupt vector handler
97 */
98DEFINE_IRQ_VECTOR_EVENT(call_function_single);
99#endif
100
101#ifdef CONFIG_X86_MCE_THRESHOLD
102/*
103 * threshold_apic - called when entering/exiting a threshold apic interrupt
104 * vector handler
105 */
106DEFINE_IRQ_VECTOR_EVENT(threshold_apic);
107#endif
108
109#ifdef CONFIG_X86_MCE_AMD
110/*
111 * deferred_error_apic - called when entering/exiting a deferred apic interrupt
112 * vector handler
113 */
114DEFINE_IRQ_VECTOR_EVENT(deferred_error_apic);
115#endif
116
117#ifdef CONFIG_X86_THERMAL_VECTOR
118/*
119 * thermal_apic - called when entering/exiting a thermal apic interrupt
120 * vector handler
121 */
122DEFINE_IRQ_VECTOR_EVENT(thermal_apic);
123#endif
124
125TRACE_EVENT(vector_config,
126
127 TP_PROTO(unsigned int irq, unsigned int vector,
128 unsigned int cpu, unsigned int apicdest),
129
130 TP_ARGS(irq, vector, cpu, apicdest),
131
132 TP_STRUCT__entry(
133 __field( unsigned int, irq )
134 __field( unsigned int, vector )
135 __field( unsigned int, cpu )
136 __field( unsigned int, apicdest )
137 ),
138
139 TP_fast_assign(
140 __entry->irq = irq;
141 __entry->vector = vector;
142 __entry->cpu = cpu;
143 __entry->apicdest = apicdest;
144 ),
145
146 TP_printk("irq=%u vector=%u cpu=%u apicdest=0x%08x",
147 __entry->irq, __entry->vector, __entry->cpu,
148 __entry->apicdest)
149);
150
151DECLARE_EVENT_CLASS(vector_mod,
152
153 TP_PROTO(unsigned int irq, unsigned int vector,
154 unsigned int cpu, unsigned int prev_vector,
155 unsigned int prev_cpu),
156
157 TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu),
158
159 TP_STRUCT__entry(
160 __field( unsigned int, irq )
161 __field( unsigned int, vector )
162 __field( unsigned int, cpu )
163 __field( unsigned int, prev_vector )
164 __field( unsigned int, prev_cpu )
165 ),
166
167 TP_fast_assign(
168 __entry->irq = irq;
169 __entry->vector = vector;
170 __entry->cpu = cpu;
171 __entry->prev_vector = prev_vector;
172 __entry->prev_cpu = prev_cpu;
173
174 ),
175
176 TP_printk("irq=%u vector=%u cpu=%u prev_vector=%u prev_cpu=%u",
177 __entry->irq, __entry->vector, __entry->cpu,
178 __entry->prev_vector, __entry->prev_cpu)
179);
180
181#define DEFINE_IRQ_VECTOR_MOD_EVENT(name) \
182DEFINE_EVENT_FN(vector_mod, name, \
183 TP_PROTO(unsigned int irq, unsigned int vector, \
184 unsigned int cpu, unsigned int prev_vector, \
185 unsigned int prev_cpu), \
186 TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), NULL, NULL); \
187
188DEFINE_IRQ_VECTOR_MOD_EVENT(vector_update);
189DEFINE_IRQ_VECTOR_MOD_EVENT(vector_clear);
190
191DECLARE_EVENT_CLASS(vector_reserve,
192
193 TP_PROTO(unsigned int irq, int ret),
194
195 TP_ARGS(irq, ret),
196
197 TP_STRUCT__entry(
198 __field( unsigned int, irq )
199 __field( int, ret )
200 ),
201
202 TP_fast_assign(
203 __entry->irq = irq;
204 __entry->ret = ret;
205 ),
206
207 TP_printk("irq=%u ret=%d", __entry->irq, __entry->ret)
208);
209
210#define DEFINE_IRQ_VECTOR_RESERVE_EVENT(name) \
211DEFINE_EVENT_FN(vector_reserve, name, \
212 TP_PROTO(unsigned int irq, int ret), \
213 TP_ARGS(irq, ret), NULL, NULL); \
214
215DEFINE_IRQ_VECTOR_RESERVE_EVENT(vector_reserve_managed);
216DEFINE_IRQ_VECTOR_RESERVE_EVENT(vector_reserve);
217
218TRACE_EVENT(vector_alloc,
219
220 TP_PROTO(unsigned int irq, unsigned int vector, bool reserved,
221 int ret),
222
223 TP_ARGS(irq, vector, reserved, ret),
224
225 TP_STRUCT__entry(
226 __field( unsigned int, irq )
227 __field( unsigned int, vector )
228 __field( bool, reserved )
229 __field( int, ret )
230 ),
231
232 TP_fast_assign(
233 __entry->irq = irq;
234 __entry->vector = ret < 0 ? 0 : vector;
235 __entry->reserved = reserved;
236 __entry->ret = ret > 0 ? 0 : ret;
237 ),
238
239 TP_printk("irq=%u vector=%u reserved=%d ret=%d",
240 __entry->irq, __entry->vector,
241 __entry->reserved, __entry->ret)
242);
243
244TRACE_EVENT(vector_alloc_managed,
245
246 TP_PROTO(unsigned int irq, unsigned int vector,
247 int ret),
248
249 TP_ARGS(irq, vector, ret),
250
251 TP_STRUCT__entry(
252 __field( unsigned int, irq )
253 __field( unsigned int, vector )
254 __field( int, ret )
255 ),
256
257 TP_fast_assign(
258 __entry->irq = irq;
259 __entry->vector = ret < 0 ? 0 : vector;
260 __entry->ret = ret > 0 ? 0 : ret;
261 ),
262
263 TP_printk("irq=%u vector=%u ret=%d",
264 __entry->irq, __entry->vector, __entry->ret)
265);
266
267DECLARE_EVENT_CLASS(vector_activate,
268
269 TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve,
270 bool reserve),
271
272 TP_ARGS(irq, is_managed, can_reserve, reserve),
273
274 TP_STRUCT__entry(
275 __field( unsigned int, irq )
276 __field( bool, is_managed )
277 __field( bool, can_reserve )
278 __field( bool, reserve )
279 ),
280
281 TP_fast_assign(
282 __entry->irq = irq;
283 __entry->is_managed = is_managed;
284 __entry->can_reserve = can_reserve;
285 __entry->reserve = reserve;
286 ),
287
288 TP_printk("irq=%u is_managed=%d can_reserve=%d reserve=%d",
289 __entry->irq, __entry->is_managed, __entry->can_reserve,
290 __entry->reserve)
291);
292
293#define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name) \
294DEFINE_EVENT_FN(vector_activate, name, \
295 TP_PROTO(unsigned int irq, bool is_managed, \
296 bool can_reserve, bool reserve), \
297 TP_ARGS(irq, is_managed, can_reserve, reserve), NULL, NULL); \
298
299DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate);
300DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate);
301
302TRACE_EVENT(vector_teardown,
303
304 TP_PROTO(unsigned int irq, bool is_managed, bool has_reserved),
305
306 TP_ARGS(irq, is_managed, has_reserved),
307
308 TP_STRUCT__entry(
309 __field( unsigned int, irq )
310 __field( bool, is_managed )
311 __field( bool, has_reserved )
312 ),
313
314 TP_fast_assign(
315 __entry->irq = irq;
316 __entry->is_managed = is_managed;
317 __entry->has_reserved = has_reserved;
318 ),
319
320 TP_printk("irq=%u is_managed=%d has_reserved=%d",
321 __entry->irq, __entry->is_managed, __entry->has_reserved)
322);
323
324TRACE_EVENT(vector_setup,
325
326 TP_PROTO(unsigned int irq, bool is_legacy, int ret),
327
328 TP_ARGS(irq, is_legacy, ret),
329
330 TP_STRUCT__entry(
331 __field( unsigned int, irq )
332 __field( bool, is_legacy )
333 __field( int, ret )
334 ),
335
336 TP_fast_assign(
337 __entry->irq = irq;
338 __entry->is_legacy = is_legacy;
339 __entry->ret = ret;
340 ),
341
342 TP_printk("irq=%u is_legacy=%d ret=%d",
343 __entry->irq, __entry->is_legacy, __entry->ret)
344);
345
346TRACE_EVENT(vector_free_moved,
347
348 TP_PROTO(unsigned int irq, unsigned int cpu, unsigned int vector,
349 bool is_managed),
350
351 TP_ARGS(irq, cpu, vector, is_managed),
352
353 TP_STRUCT__entry(
354 __field( unsigned int, irq )
355 __field( unsigned int, cpu )
356 __field( unsigned int, vector )
357 __field( bool, is_managed )
358 ),
359
360 TP_fast_assign(
361 __entry->irq = irq;
362 __entry->cpu = cpu;
363 __entry->vector = vector;
364 __entry->is_managed = is_managed;
365 ),
366
367 TP_printk("irq=%u cpu=%u vector=%u is_managed=%d",
368 __entry->irq, __entry->cpu, __entry->vector,
369 __entry->is_managed)
370);
371
372
373#endif /* CONFIG_X86_LOCAL_APIC */
374
375#undef TRACE_INCLUDE_PATH
376#undef TRACE_INCLUDE_FILE
377#define TRACE_INCLUDE_PATH .
378#define TRACE_INCLUDE_FILE irq_vectors
379#endif /* _TRACE_IRQ_VECTORS_H */
380
381/* This part must be outside protection */
382#include <trace/define_trace.h>
383