1/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2#ifndef _LINUX_RSEQ_H
3#define _LINUX_RSEQ_H
4
5#ifdef CONFIG_RSEQ
6
7#include <linux/preempt.h>
8#include <linux/sched.h>
9
10#ifdef CONFIG_MEMBARRIER
11# define RSEQ_EVENT_GUARD irq
12#else
13# define RSEQ_EVENT_GUARD preempt
14#endif
15
16/*
17 * Map the event mask on the user-space ABI enum rseq_cs_flags
18 * for direct mask checks.
19 */
20enum rseq_event_mask_bits {
21 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
22 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
23 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
24};
25
26enum rseq_event_mask {
27 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
28 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
29 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
30};
31
32static inline void rseq_set_notify_resume(struct task_struct *t)
33{
34 if (t->rseq)
35 set_tsk_thread_flag(tsk: t, TIF_NOTIFY_RESUME);
36}
37
38void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
39
40static inline void rseq_handle_notify_resume(struct ksignal *ksig,
41 struct pt_regs *regs)
42{
43 if (current->rseq)
44 __rseq_handle_notify_resume(sig: ksig, regs);
45}
46
47static inline void rseq_signal_deliver(struct ksignal *ksig,
48 struct pt_regs *regs)
49{
50 scoped_guard(RSEQ_EVENT_GUARD)
51 __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
52 rseq_handle_notify_resume(ksig, regs);
53}
54
55/* rseq_preempt() requires preemption to be disabled. */
56static inline void rseq_preempt(struct task_struct *t)
57{
58 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
59 rseq_set_notify_resume(t);
60}
61
62/* rseq_migrate() requires preemption to be disabled. */
63static inline void rseq_migrate(struct task_struct *t)
64{
65 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
66 rseq_set_notify_resume(t);
67}
68
69/*
70 * If parent process has a registered restartable sequences area, the
71 * child inherits. Unregister rseq for a clone with CLONE_VM set.
72 */
73static inline void rseq_fork(struct task_struct *t, u64 clone_flags)
74{
75 if (clone_flags & CLONE_VM) {
76 t->rseq = NULL;
77 t->rseq_len = 0;
78 t->rseq_sig = 0;
79 t->rseq_event_mask = 0;
80 } else {
81 t->rseq = current->rseq;
82 t->rseq_len = current->rseq_len;
83 t->rseq_sig = current->rseq_sig;
84 t->rseq_event_mask = current->rseq_event_mask;
85 }
86}
87
88static inline void rseq_execve(struct task_struct *t)
89{
90 t->rseq = NULL;
91 t->rseq_len = 0;
92 t->rseq_sig = 0;
93 t->rseq_event_mask = 0;
94}
95
96#else
97
98static inline void rseq_set_notify_resume(struct task_struct *t)
99{
100}
101static inline void rseq_handle_notify_resume(struct ksignal *ksig,
102 struct pt_regs *regs)
103{
104}
105static inline void rseq_signal_deliver(struct ksignal *ksig,
106 struct pt_regs *regs)
107{
108}
109static inline void rseq_preempt(struct task_struct *t)
110{
111}
112static inline void rseq_migrate(struct task_struct *t)
113{
114}
115static inline void rseq_fork(struct task_struct *t, u64 clone_flags)
116{
117}
118static inline void rseq_execve(struct task_struct *t)
119{
120}
121
122#endif
123
124#ifdef CONFIG_DEBUG_RSEQ
125
126void rseq_syscall(struct pt_regs *regs);
127
128#else
129
130static inline void rseq_syscall(struct pt_regs *regs)
131{
132}
133
134#endif
135
136#endif /* _LINUX_RSEQ_H */
137