1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Tick related global functions
4 */
5#ifndef _LINUX_TICK_H
6#define _LINUX_TICK_H
7
8#include <linux/clockchips.h>
9#include <linux/irqflags.h>
10#include <linux/percpu.h>
11#include <linux/context_tracking_state.h>
12#include <linux/cpumask.h>
13#include <linux/sched.h>
14#include <linux/rcupdate.h>
15#include <linux/static_key.h>
16
17#ifdef CONFIG_GENERIC_CLOCKEVENTS
18extern void __init tick_init(void);
19/* Should be core only, but ARM BL switcher requires it */
20extern void tick_suspend_local(void);
21/* Should be core only, but XEN resume magic and ARM BL switcher require it */
22extern void tick_resume_local(void);
23#else /* CONFIG_GENERIC_CLOCKEVENTS */
24static inline void tick_init(void) { }
25static inline void tick_suspend_local(void) { }
26static inline void tick_resume_local(void) { }
27#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
28
29#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_HOTPLUG_CPU)
30extern int tick_cpu_dying(unsigned int cpu);
31extern void tick_assert_timekeeping_handover(void);
32#else
33#define tick_cpu_dying NULL
34static inline void tick_assert_timekeeping_handover(void) { }
35#endif
36
37#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
38extern void tick_freeze(void);
39extern void tick_unfreeze(void);
40#else
41static inline void tick_freeze(void) { }
42static inline void tick_unfreeze(void) { }
43#endif
44
45#ifdef CONFIG_TICK_ONESHOT
46extern void tick_irq_enter(void);
47# ifndef arch_needs_cpu
48# define arch_needs_cpu() (0)
49# endif
50# else
51static inline void tick_irq_enter(void) { }
52#endif
53
54#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
55extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu);
56#else
57static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { }
58#endif
59
60enum tick_broadcast_mode {
61 TICK_BROADCAST_OFF,
62 TICK_BROADCAST_ON,
63 TICK_BROADCAST_FORCE,
64};
65
66enum tick_broadcast_state {
67 TICK_BROADCAST_EXIT,
68 TICK_BROADCAST_ENTER,
69};
70
71extern struct static_key_false arch_needs_tick_broadcast;
72
73#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
74extern void tick_broadcast_control(enum tick_broadcast_mode mode);
75#else
76static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
77#endif /* BROADCAST */
78
79#ifdef CONFIG_GENERIC_CLOCKEVENTS
80extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
81#else
82static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
83{
84 return 0;
85}
86#endif
87
88static inline void tick_broadcast_enable(void)
89{
90 tick_broadcast_control(mode: TICK_BROADCAST_ON);
91}
92static inline void tick_broadcast_disable(void)
93{
94 tick_broadcast_control(mode: TICK_BROADCAST_OFF);
95}
96static inline void tick_broadcast_force(void)
97{
98 tick_broadcast_control(mode: TICK_BROADCAST_FORCE);
99}
100static inline int tick_broadcast_enter(void)
101{
102 return tick_broadcast_oneshot_control(state: TICK_BROADCAST_ENTER);
103}
104static inline void tick_broadcast_exit(void)
105{
106 tick_broadcast_oneshot_control(state: TICK_BROADCAST_EXIT);
107}
108
109enum tick_dep_bits {
110 TICK_DEP_BIT_POSIX_TIMER = 0,
111 TICK_DEP_BIT_PERF_EVENTS = 1,
112 TICK_DEP_BIT_SCHED = 2,
113 TICK_DEP_BIT_CLOCK_UNSTABLE = 3,
114 TICK_DEP_BIT_RCU = 4,
115 TICK_DEP_BIT_RCU_EXP = 5
116};
117#define TICK_DEP_BIT_MAX TICK_DEP_BIT_RCU_EXP
118
119#define TICK_DEP_MASK_NONE 0
120#define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER)
121#define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
122#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
123#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
124#define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU)
125#define TICK_DEP_MASK_RCU_EXP (1 << TICK_DEP_BIT_RCU_EXP)
126
127#ifdef CONFIG_NO_HZ_COMMON
128extern bool tick_nohz_enabled;
129extern bool tick_nohz_tick_stopped(void);
130extern bool tick_nohz_tick_stopped_cpu(int cpu);
131extern void tick_nohz_idle_stop_tick(void);
132extern void tick_nohz_idle_retain_tick(void);
133extern void tick_nohz_idle_restart_tick(void);
134extern void tick_nohz_idle_enter(void);
135extern void tick_nohz_idle_exit(void);
136extern void tick_nohz_irq_exit(void);
137extern bool tick_nohz_idle_got_tick(void);
138extern ktime_t tick_nohz_get_next_hrtimer(void);
139extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
140extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
141extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
142extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
143#else /* !CONFIG_NO_HZ_COMMON */
144#define tick_nohz_enabled (0)
145static inline int tick_nohz_tick_stopped(void) { return 0; }
146static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
147static inline void tick_nohz_idle_stop_tick(void) { }
148static inline void tick_nohz_idle_retain_tick(void) { }
149static inline void tick_nohz_idle_restart_tick(void) { }
150static inline void tick_nohz_idle_enter(void) { }
151static inline void tick_nohz_idle_exit(void) { }
152static inline bool tick_nohz_idle_got_tick(void) { return false; }
153static inline ktime_t tick_nohz_get_next_hrtimer(void)
154{
155 /* Next wake up is the tick period, assume it starts now */
156 return ktime_add(ktime_get(), TICK_NSEC);
157}
158static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
159{
160 *delta_next = TICK_NSEC;
161 return *delta_next;
162}
163static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
164static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
165#endif /* !CONFIG_NO_HZ_COMMON */
166
167/*
168 * Mask of CPUs that are nohz_full.
169 *
170 * Users should be guarded by CONFIG_NO_HZ_FULL or a tick_nohz_full_cpu()
171 * check.
172 */
173extern cpumask_var_t tick_nohz_full_mask;
174
175#ifdef CONFIG_NO_HZ_FULL
176extern bool tick_nohz_full_running;
177
178static inline bool tick_nohz_full_enabled(void)
179{
180 if (!context_tracking_enabled())
181 return false;
182
183 return tick_nohz_full_running;
184}
185
186/*
187 * Check if a CPU is part of the nohz_full subset. Arrange for evaluating
188 * the cpu expression (typically smp_processor_id()) _after_ the static
189 * key.
190 */
191#define tick_nohz_full_cpu(_cpu) ({ \
192 bool __ret = false; \
193 if (tick_nohz_full_enabled()) \
194 __ret = cpumask_test_cpu((_cpu), tick_nohz_full_mask); \
195 __ret; \
196})
197
198extern void tick_nohz_dep_set(enum tick_dep_bits bit);
199extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
200extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
201extern void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit);
202extern void tick_nohz_dep_set_task(struct task_struct *tsk,
203 enum tick_dep_bits bit);
204extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
205 enum tick_dep_bits bit);
206extern void tick_nohz_dep_set_signal(struct task_struct *tsk,
207 enum tick_dep_bits bit);
208extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
209 enum tick_dep_bits bit);
210extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);
211
212/*
213 * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
214 * on top of static keys.
215 */
216static inline void tick_dep_set(enum tick_dep_bits bit)
217{
218 if (tick_nohz_full_enabled())
219 tick_nohz_dep_set(bit);
220}
221
222static inline void tick_dep_clear(enum tick_dep_bits bit)
223{
224 if (tick_nohz_full_enabled())
225 tick_nohz_dep_clear(bit);
226}
227
228static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit)
229{
230 if (tick_nohz_full_cpu(cpu))
231 tick_nohz_dep_set_cpu(cpu, bit);
232}
233
234static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
235{
236 if (tick_nohz_full_cpu(cpu))
237 tick_nohz_dep_clear_cpu(cpu, bit);
238}
239
240static inline void tick_dep_set_task(struct task_struct *tsk,
241 enum tick_dep_bits bit)
242{
243 if (tick_nohz_full_enabled())
244 tick_nohz_dep_set_task(tsk, bit);
245}
246
247static inline void tick_dep_clear_task(struct task_struct *tsk,
248 enum tick_dep_bits bit)
249{
250 if (tick_nohz_full_enabled())
251 tick_nohz_dep_clear_task(tsk, bit);
252}
253
254static inline void tick_dep_init_task(struct task_struct *tsk)
255{
256 atomic_set(&tsk->tick_dep_mask, 0);
257}
258
259static inline void tick_dep_set_signal(struct task_struct *tsk,
260 enum tick_dep_bits bit)
261{
262 if (tick_nohz_full_enabled())
263 tick_nohz_dep_set_signal(tsk, bit);
264}
265static inline void tick_dep_clear_signal(struct signal_struct *signal,
266 enum tick_dep_bits bit)
267{
268 if (tick_nohz_full_enabled())
269 tick_nohz_dep_clear_signal(signal, bit);
270}
271
272extern void tick_nohz_full_kick_cpu(int cpu);
273extern void __tick_nohz_task_switch(void);
274extern void __init tick_nohz_full_setup(cpumask_var_t cpumask);
275#else
276static inline bool tick_nohz_full_enabled(void) { return false; }
277static inline bool tick_nohz_full_cpu(int cpu) { return false; }
278
279static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
280static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
281static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }
282
283static inline void tick_dep_set(enum tick_dep_bits bit) { }
284static inline void tick_dep_clear(enum tick_dep_bits bit) { }
285static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
286static inline void tick_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
287static inline void tick_dep_set_task(struct task_struct *tsk,
288 enum tick_dep_bits bit) { }
289static inline void tick_dep_clear_task(struct task_struct *tsk,
290 enum tick_dep_bits bit) { }
291static inline void tick_dep_init_task(struct task_struct *tsk) { }
292static inline void tick_dep_set_signal(struct task_struct *tsk,
293 enum tick_dep_bits bit) { }
294static inline void tick_dep_clear_signal(struct signal_struct *signal,
295 enum tick_dep_bits bit) { }
296
297static inline void tick_nohz_full_kick_cpu(int cpu) { }
298static inline void __tick_nohz_task_switch(void) { }
299static inline void tick_nohz_full_setup(cpumask_var_t cpumask) { }
300#endif
301
302static inline void tick_nohz_task_switch(void)
303{
304 if (tick_nohz_full_enabled())
305 __tick_nohz_task_switch();
306}
307
308static inline void tick_nohz_user_enter_prepare(void)
309{
310 if (tick_nohz_full_cpu(smp_processor_id()))
311 rcu_nocb_flush_deferred_wakeup();
312}
313
314#endif
315