1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM power
4
5#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_POWER_H
7
8#include <linux/cpufreq.h>
9#include <linux/ktime.h>
10#include <linux/pm_qos.h>
11#include <linux/tracepoint.h>
12#include <linux/trace_events.h>
13
14#define TPS(x) tracepoint_string(x)
15
16DECLARE_EVENT_CLASS(cpu,
17
18 TP_PROTO(unsigned int state, unsigned int cpu_id),
19
20 TP_ARGS(state, cpu_id),
21
22 TP_STRUCT__entry(
23 __field( u32, state )
24 __field( u32, cpu_id )
25 ),
26
27 TP_fast_assign(
28 __entry->state = state;
29 __entry->cpu_id = cpu_id;
30 ),
31
32 TP_printk("state=%lu cpu_id=%lu", (unsigned long)__entry->state,
33 (unsigned long)__entry->cpu_id)
34);
35
36DEFINE_EVENT(cpu, cpu_idle,
37
38 TP_PROTO(unsigned int state, unsigned int cpu_id),
39
40 TP_ARGS(state, cpu_id)
41);
42
43TRACE_EVENT(cpu_idle_miss,
44
45 TP_PROTO(unsigned int cpu_id, unsigned int state, bool below),
46
47 TP_ARGS(cpu_id, state, below),
48
49 TP_STRUCT__entry(
50 __field(u32, cpu_id)
51 __field(u32, state)
52 __field(bool, below)
53 ),
54
55 TP_fast_assign(
56 __entry->cpu_id = cpu_id;
57 __entry->state = state;
58 __entry->below = below;
59 ),
60
61 TP_printk("cpu_id=%lu state=%lu type=%s", (unsigned long)__entry->cpu_id,
62 (unsigned long)__entry->state, (__entry->below)?"below":"above")
63);
64
65#ifdef CONFIG_ARM_PSCI_CPUIDLE
66DECLARE_EVENT_CLASS(psci_domain_idle,
67
68 TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle),
69
70 TP_ARGS(cpu_id, state, s2idle),
71
72 TP_STRUCT__entry(
73 __field(u32, cpu_id)
74 __field(u32, state)
75 __field(bool, s2idle)
76 ),
77
78 TP_fast_assign(
79 __entry->cpu_id = cpu_id;
80 __entry->state = state;
81 __entry->s2idle = s2idle;
82 ),
83
84 TP_printk("cpu_id=%lu state=0x%lx is_s2idle=%s",
85 (unsigned long)__entry->cpu_id, (unsigned long)__entry->state,
86 (__entry->s2idle)?"yes":"no")
87);
88
89DEFINE_EVENT(psci_domain_idle, psci_domain_idle_enter,
90
91 TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle),
92
93 TP_ARGS(cpu_id, state, s2idle)
94);
95
96DEFINE_EVENT(psci_domain_idle, psci_domain_idle_exit,
97
98 TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle),
99
100 TP_ARGS(cpu_id, state, s2idle)
101);
102#endif
103
104TRACE_EVENT(pstate_sample,
105
106 TP_PROTO(u32 core_busy,
107 u32 scaled_busy,
108 u32 from,
109 u32 to,
110 u64 mperf,
111 u64 aperf,
112 u64 tsc,
113 u32 freq,
114 u32 io_boost
115 ),
116
117 TP_ARGS(core_busy,
118 scaled_busy,
119 from,
120 to,
121 mperf,
122 aperf,
123 tsc,
124 freq,
125 io_boost
126 ),
127
128 TP_STRUCT__entry(
129 __field(u32, core_busy)
130 __field(u32, scaled_busy)
131 __field(u32, from)
132 __field(u32, to)
133 __field(u64, mperf)
134 __field(u64, aperf)
135 __field(u64, tsc)
136 __field(u32, freq)
137 __field(u32, io_boost)
138 ),
139
140 TP_fast_assign(
141 __entry->core_busy = core_busy;
142 __entry->scaled_busy = scaled_busy;
143 __entry->from = from;
144 __entry->to = to;
145 __entry->mperf = mperf;
146 __entry->aperf = aperf;
147 __entry->tsc = tsc;
148 __entry->freq = freq;
149 __entry->io_boost = io_boost;
150 ),
151
152 TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu io_boost=%lu",
153 (unsigned long)__entry->core_busy,
154 (unsigned long)__entry->scaled_busy,
155 (unsigned long)__entry->from,
156 (unsigned long)__entry->to,
157 (unsigned long long)__entry->mperf,
158 (unsigned long long)__entry->aperf,
159 (unsigned long long)__entry->tsc,
160 (unsigned long)__entry->freq,
161 (unsigned long)__entry->io_boost
162 )
163
164);
165
166/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
167#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING
168#define _PWR_EVENT_AVOID_DOUBLE_DEFINING
169
170#define PWR_EVENT_EXIT -1
171#endif
172
173#define pm_verb_symbolic(event) \
174 __print_symbolic(event, \
175 { PM_EVENT_SUSPEND, "suspend" }, \
176 { PM_EVENT_RESUME, "resume" }, \
177 { PM_EVENT_FREEZE, "freeze" }, \
178 { PM_EVENT_QUIESCE, "quiesce" }, \
179 { PM_EVENT_HIBERNATE, "hibernate" }, \
180 { PM_EVENT_THAW, "thaw" }, \
181 { PM_EVENT_RESTORE, "restore" }, \
182 { PM_EVENT_RECOVER, "recover" })
183
184DEFINE_EVENT(cpu, cpu_frequency,
185
186 TP_PROTO(unsigned int frequency, unsigned int cpu_id),
187
188 TP_ARGS(frequency, cpu_id)
189);
190
191TRACE_EVENT(cpu_frequency_limits,
192
193 TP_PROTO(struct cpufreq_policy *policy),
194
195 TP_ARGS(policy),
196
197 TP_STRUCT__entry(
198 __field(u32, min_freq)
199 __field(u32, max_freq)
200 __field(u32, cpu_id)
201 ),
202
203 TP_fast_assign(
204 __entry->min_freq = policy->min;
205 __entry->max_freq = policy->max;
206 __entry->cpu_id = policy->cpu;
207 ),
208
209 TP_printk("min=%lu max=%lu cpu_id=%lu",
210 (unsigned long)__entry->min_freq,
211 (unsigned long)__entry->max_freq,
212 (unsigned long)__entry->cpu_id)
213);
214
215#ifdef CONFIG_PM_SLEEP
216TRACE_EVENT(device_pm_callback_start,
217
218 TP_PROTO(struct device *dev, const char *pm_ops, int event),
219
220 TP_ARGS(dev, pm_ops, event),
221
222 TP_STRUCT__entry(
223 __string(device, dev_name(dev))
224 __string(driver, dev_driver_string(dev))
225 __string(parent, dev->parent ? dev_name(dev->parent) : "none")
226 __string(pm_ops, pm_ops ? pm_ops : "none ")
227 __field(int, event)
228 ),
229
230 TP_fast_assign(
231 __assign_str(device);
232 __assign_str(driver);
233 __assign_str(parent);
234 __assign_str(pm_ops);
235 __entry->event = event;
236 ),
237
238 TP_printk("%s %s, parent: %s, %s[%s]", __get_str(driver),
239 __get_str(device), __get_str(parent), __get_str(pm_ops),
240 pm_verb_symbolic(__entry->event))
241);
242
243TRACE_EVENT(device_pm_callback_end,
244
245 TP_PROTO(struct device *dev, int error),
246
247 TP_ARGS(dev, error),
248
249 TP_STRUCT__entry(
250 __string(device, dev_name(dev))
251 __string(driver, dev_driver_string(dev))
252 __field(int, error)
253 ),
254
255 TP_fast_assign(
256 __assign_str(device);
257 __assign_str(driver);
258 __entry->error = error;
259 ),
260
261 TP_printk("%s %s, err=%d",
262 __get_str(driver), __get_str(device), __entry->error)
263);
264#endif
265
266TRACE_EVENT(suspend_resume,
267
268 TP_PROTO(const char *action, int val, bool start),
269
270 TP_ARGS(action, val, start),
271
272 TP_STRUCT__entry(
273 __field(const char *, action)
274 __field(int, val)
275 __field(bool, start)
276 ),
277
278 TP_fast_assign(
279 __entry->action = action;
280 __entry->val = val;
281 __entry->start = start;
282 ),
283
284 TP_printk("%s[%u] %s", __entry->action, (unsigned int)__entry->val,
285 (__entry->start)?"begin":"end")
286);
287
288DECLARE_EVENT_CLASS(wakeup_source,
289
290 TP_PROTO(const char *name, unsigned int state),
291
292 TP_ARGS(name, state),
293
294 TP_STRUCT__entry(
295 __string( name, name )
296 __field( u64, state )
297 ),
298
299 TP_fast_assign(
300 __assign_str(name);
301 __entry->state = state;
302 ),
303
304 TP_printk("%s state=0x%lx", __get_str(name),
305 (unsigned long)__entry->state)
306);
307
308DEFINE_EVENT(wakeup_source, wakeup_source_activate,
309
310 TP_PROTO(const char *name, unsigned int state),
311
312 TP_ARGS(name, state)
313);
314
315DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
316
317 TP_PROTO(const char *name, unsigned int state),
318
319 TP_ARGS(name, state)
320);
321
322#ifdef CONFIG_ARCH_OMAP2PLUS
323/*
324 * The power domain events are used for power domains transitions
325 */
326DECLARE_EVENT_CLASS(power_domain,
327
328 TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
329
330 TP_ARGS(name, state, cpu_id),
331
332 TP_STRUCT__entry(
333 __string( name, name )
334 __field( u64, state )
335 __field( u64, cpu_id )
336 ),
337
338 TP_fast_assign(
339 __assign_str(name);
340 __entry->state = state;
341 __entry->cpu_id = cpu_id;
342),
343
344 TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
345 (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
346);
347
348DEFINE_EVENT(power_domain, power_domain_target,
349
350 TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
351
352 TP_ARGS(name, state, cpu_id)
353);
354#endif
355
356/*
357 * CPU latency QoS events used for global CPU latency QoS list updates
358 */
359DECLARE_EVENT_CLASS(cpu_latency_qos_request,
360
361 TP_PROTO(s32 value),
362
363 TP_ARGS(value),
364
365 TP_STRUCT__entry(
366 __field( s32, value )
367 ),
368
369 TP_fast_assign(
370 __entry->value = value;
371 ),
372
373 TP_printk("CPU_DMA_LATENCY value=%d",
374 __entry->value)
375);
376
377DEFINE_EVENT(cpu_latency_qos_request, pm_qos_add_request,
378
379 TP_PROTO(s32 value),
380
381 TP_ARGS(value)
382);
383
384DEFINE_EVENT(cpu_latency_qos_request, pm_qos_update_request,
385
386 TP_PROTO(s32 value),
387
388 TP_ARGS(value)
389);
390
391DEFINE_EVENT(cpu_latency_qos_request, pm_qos_remove_request,
392
393 TP_PROTO(s32 value),
394
395 TP_ARGS(value)
396);
397
398/*
399 * General PM QoS events used for updates of PM QoS request lists
400 */
401DECLARE_EVENT_CLASS(pm_qos_update,
402
403 TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
404
405 TP_ARGS(action, prev_value, curr_value),
406
407 TP_STRUCT__entry(
408 __field( enum pm_qos_req_action, action )
409 __field( int, prev_value )
410 __field( int, curr_value )
411 ),
412
413 TP_fast_assign(
414 __entry->action = action;
415 __entry->prev_value = prev_value;
416 __entry->curr_value = curr_value;
417 ),
418
419 TP_printk("action=%s prev_value=%d curr_value=%d",
420 __print_symbolic(__entry->action,
421 { PM_QOS_ADD_REQ, "ADD_REQ" },
422 { PM_QOS_UPDATE_REQ, "UPDATE_REQ" },
423 { PM_QOS_REMOVE_REQ, "REMOVE_REQ" }),
424 __entry->prev_value, __entry->curr_value)
425);
426
427DEFINE_EVENT(pm_qos_update, pm_qos_update_target,
428
429 TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
430
431 TP_ARGS(action, prev_value, curr_value)
432);
433
434DEFINE_EVENT_PRINT(pm_qos_update, pm_qos_update_flags,
435
436 TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
437
438 TP_ARGS(action, prev_value, curr_value),
439
440 TP_printk("action=%s prev_value=0x%x curr_value=0x%x",
441 __print_symbolic(__entry->action,
442 { PM_QOS_ADD_REQ, "ADD_REQ" },
443 { PM_QOS_UPDATE_REQ, "UPDATE_REQ" },
444 { PM_QOS_REMOVE_REQ, "REMOVE_REQ" }),
445 __entry->prev_value, __entry->curr_value)
446);
447
448DECLARE_EVENT_CLASS(dev_pm_qos_request,
449
450 TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
451 s32 new_value),
452
453 TP_ARGS(name, type, new_value),
454
455 TP_STRUCT__entry(
456 __string( name, name )
457 __field( enum dev_pm_qos_req_type, type )
458 __field( s32, new_value )
459 ),
460
461 TP_fast_assign(
462 __assign_str(name);
463 __entry->type = type;
464 __entry->new_value = new_value;
465 ),
466
467 TP_printk("device=%s type=%s new_value=%d",
468 __get_str(name),
469 __print_symbolic(__entry->type,
470 { DEV_PM_QOS_RESUME_LATENCY, "DEV_PM_QOS_RESUME_LATENCY" },
471 { DEV_PM_QOS_FLAGS, "DEV_PM_QOS_FLAGS" }),
472 __entry->new_value)
473);
474
475DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_add_request,
476
477 TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
478 s32 new_value),
479
480 TP_ARGS(name, type, new_value)
481);
482
483DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_update_request,
484
485 TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
486 s32 new_value),
487
488 TP_ARGS(name, type, new_value)
489);
490
491DEFINE_EVENT(dev_pm_qos_request, dev_pm_qos_remove_request,
492
493 TP_PROTO(const char *name, enum dev_pm_qos_req_type type,
494 s32 new_value),
495
496 TP_ARGS(name, type, new_value)
497);
498
499TRACE_EVENT(guest_halt_poll_ns,
500
501 TP_PROTO(bool grow, unsigned int new, unsigned int old),
502
503 TP_ARGS(grow, new, old),
504
505 TP_STRUCT__entry(
506 __field(bool, grow)
507 __field(unsigned int, new)
508 __field(unsigned int, old)
509 ),
510
511 TP_fast_assign(
512 __entry->grow = grow;
513 __entry->new = new;
514 __entry->old = old;
515 ),
516
517 TP_printk("halt_poll_ns %u (%s %u)",
518 __entry->new,
519 __entry->grow ? "grow" : "shrink",
520 __entry->old)
521);
522
523#define trace_guest_halt_poll_ns_grow(new, old) \
524 trace_guest_halt_poll_ns(true, new, old)
525#define trace_guest_halt_poll_ns_shrink(new, old) \
526 trace_guest_halt_poll_ns(false, new, old)
527#endif /* _TRACE_POWER_H */
528
529/* This part must be outside protection */
530#include <trace/define_trace.h>
531