1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update mechanism for mutual exclusion, adapted for tracing.
4 *
5 * Copyright (C) 2020 Paul E. McKenney.
6 */
7
8#ifndef __LINUX_RCUPDATE_TRACE_H
9#define __LINUX_RCUPDATE_TRACE_H
10
11#include <linux/sched.h>
12#include <linux/rcupdate.h>
13#include <linux/cleanup.h>
14
15extern struct lockdep_map rcu_trace_lock_map;
16
17#ifdef CONFIG_DEBUG_LOCK_ALLOC
18
19static inline int rcu_read_lock_trace_held(void)
20{
21 return lock_is_held(&rcu_trace_lock_map);
22}
23
24#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
25
26static inline int rcu_read_lock_trace_held(void)
27{
28 return 1;
29}
30
31#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
32
33#ifdef CONFIG_TASKS_TRACE_RCU
34
35void rcu_read_unlock_trace_special(struct task_struct *t);
36
37/**
38 * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
39 *
40 * When synchronize_rcu_tasks_trace() is invoked by one task, then that
41 * task is guaranteed to block until all other tasks exit their read-side
42 * critical sections. Similarly, if call_rcu_trace() is invoked on one
43 * task while other tasks are within RCU read-side critical sections,
44 * invocation of the corresponding RCU callback is deferred until after
45 * the all the other tasks exit their critical sections.
46 *
47 * For more details, please see the documentation for rcu_read_lock().
48 */
49static inline void rcu_read_lock_trace(void)
50{
51 struct task_struct *t = current;
52
53 WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
54 barrier();
55 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
56 t->trc_reader_special.b.need_mb)
57 smp_mb(); // Pairs with update-side barriers
58 rcu_lock_acquire(&rcu_trace_lock_map);
59}
60
61/**
62 * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section
63 *
64 * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is
65 * allowed. Invoking a rcu_read_unlock_trace() when there is no matching
66 * rcu_read_lock_trace() is verboten, and will result in lockdep complaints.
67 *
68 * For more details, please see the documentation for rcu_read_unlock().
69 */
70static inline void rcu_read_unlock_trace(void)
71{
72 int nesting;
73 struct task_struct *t = current;
74
75 rcu_lock_release(&rcu_trace_lock_map);
76 nesting = READ_ONCE(t->trc_reader_nesting) - 1;
77 barrier(); // Critical section before disabling.
78 // Disable IPI-based setting of .need_qs.
79 WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
80 if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
81 WRITE_ONCE(t->trc_reader_nesting, nesting);
82 return; // We assume shallow reader nesting.
83 }
84 WARN_ON_ONCE(nesting != 0);
85 rcu_read_unlock_trace_special(t);
86}
87
88void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
89void synchronize_rcu_tasks_trace(void);
90void rcu_barrier_tasks_trace(void);
91struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
92#else
93/*
94 * The BPF JIT forms these addresses even when it doesn't call these
95 * functions, so provide definitions that result in runtime errors.
96 */
97static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); }
98static inline void rcu_read_lock_trace(void) { BUG(); }
99static inline void rcu_read_unlock_trace(void) { BUG(); }
100#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
101
102DEFINE_LOCK_GUARD_0(rcu_tasks_trace,
103 rcu_read_lock_trace(),
104 rcu_read_unlock_trace())
105
106#endif /* __LINUX_RCUPDATE_TRACE_H */
107