| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _LINUX_RING_BUFFER_H | 
|---|
| 3 | #define _LINUX_RING_BUFFER_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/mm.h> | 
|---|
| 6 | #include <linux/seq_file.h> | 
|---|
| 7 | #include <linux/poll.h> | 
|---|
| 8 |  | 
|---|
| 9 | #include <uapi/linux/trace_mmap.h> | 
|---|
| 10 |  | 
|---|
| 11 | struct trace_buffer; | 
|---|
| 12 | struct ring_buffer_iter; | 
|---|
| 13 |  | 
|---|
| 14 | /* | 
|---|
| 15 | * Don't refer to this struct directly, use functions below. | 
|---|
| 16 | */ | 
|---|
| 17 | struct ring_buffer_event { | 
|---|
| 18 | u32		type_len:5, time_delta:27; | 
|---|
| 19 |  | 
|---|
| 20 | u32		array[]; | 
|---|
| 21 | }; | 
|---|
| 22 |  | 
|---|
| 23 | /** | 
|---|
| 24 | * enum ring_buffer_type - internal ring buffer types | 
|---|
| 25 | * | 
|---|
| 26 | * @RINGBUF_TYPE_PADDING:	Left over page padding or discarded event | 
|---|
| 27 | *				 If time_delta is 0: | 
|---|
| 28 | *				  array is ignored | 
|---|
| 29 | *				  size is variable depending on how much | 
|---|
| 30 | *				  padding is needed | 
|---|
| 31 | *				 If time_delta is non zero: | 
|---|
| 32 | *				  array[0] holds the actual length | 
|---|
| 33 | *				  size = 4 + length (bytes) | 
|---|
| 34 | * | 
|---|
| 35 | * @RINGBUF_TYPE_TIME_EXTEND:	Extend the time delta | 
|---|
| 36 | *				 array[0] = time delta (28 .. 59) | 
|---|
| 37 | *				 size = 8 bytes | 
|---|
| 38 | * | 
|---|
| 39 | * @RINGBUF_TYPE_TIME_STAMP:	Absolute timestamp | 
|---|
| 40 | *				 Same format as TIME_EXTEND except that the | 
|---|
| 41 | *				 value is an absolute timestamp, not a delta | 
|---|
| 42 | *				 event.time_delta contains bottom 27 bits | 
|---|
| 43 | *				 array[0] = top (28 .. 59) bits | 
|---|
| 44 | *				 size = 8 bytes | 
|---|
| 45 | * | 
|---|
| 46 | * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: | 
|---|
| 47 | *				Data record | 
|---|
| 48 | *				 If type_len is zero: | 
|---|
| 49 | *				  array[0] holds the actual length | 
|---|
| 50 | *				  array[1..(length+3)/4] holds data | 
|---|
| 51 | *				  size = 4 + length (bytes) | 
|---|
| 52 | *				 else | 
|---|
| 53 | *				  length = type_len << 2 | 
|---|
| 54 | *				  array[0..(length+3)/4-1] holds data | 
|---|
| 55 | *				  size = 4 + length (bytes) | 
|---|
| 56 | */ | 
|---|
| 57 | enum ring_buffer_type { | 
|---|
| 58 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, | 
|---|
| 59 | RINGBUF_TYPE_PADDING, | 
|---|
| 60 | RINGBUF_TYPE_TIME_EXTEND, | 
|---|
| 61 | RINGBUF_TYPE_TIME_STAMP, | 
|---|
| 62 | }; | 
|---|
| 63 |  | 
|---|
| 64 | unsigned ring_buffer_event_length(struct ring_buffer_event *event); | 
|---|
| 65 | void *ring_buffer_event_data(struct ring_buffer_event *event); | 
|---|
| 66 | u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, | 
|---|
| 67 | struct ring_buffer_event *event); | 
|---|
| 68 |  | 
|---|
| 69 | /* | 
|---|
| 70 | * ring_buffer_discard_commit will remove an event that has not | 
|---|
| 71 | *   been committed yet. If this is used, then ring_buffer_unlock_commit | 
|---|
| 72 | *   must not be called on the discarded event. This function | 
|---|
| 73 | *   will try to remove the event from the ring buffer completely | 
|---|
| 74 | *   if another event has not been written after it. | 
|---|
| 75 | * | 
|---|
| 76 | * Example use: | 
|---|
| 77 | * | 
|---|
| 78 | *  if (some_condition) | 
|---|
| 79 | *    ring_buffer_discard_commit(buffer, event); | 
|---|
| 80 | *  else | 
|---|
| 81 | *    ring_buffer_unlock_commit(buffer, event); | 
|---|
| 82 | */ | 
|---|
| 83 | void ring_buffer_discard_commit(struct trace_buffer *buffer, | 
|---|
| 84 | struct ring_buffer_event *event); | 
|---|
| 85 |  | 
|---|
| 86 | /* | 
|---|
| 87 | * size is in bytes for each per CPU buffer. | 
|---|
| 88 | */ | 
|---|
| 89 | struct trace_buffer * | 
|---|
| 90 | __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); | 
|---|
| 91 |  | 
|---|
| 92 | struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags, | 
|---|
| 93 | int order, unsigned long start, | 
|---|
| 94 | unsigned long range_size, | 
|---|
| 95 | unsigned long scratch_size, | 
|---|
| 96 | struct lock_class_key *key); | 
|---|
| 97 |  | 
|---|
| 98 | void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size); | 
|---|
| 99 |  | 
|---|
| 100 | /* | 
|---|
| 101 | * Because the ring buffer is generic, if other users of the ring buffer get | 
|---|
| 102 | * traced by ftrace, it can produce lockdep warnings. We need to keep each | 
|---|
| 103 | * ring buffer's lock class separate. | 
|---|
| 104 | */ | 
|---|
| 105 | #define ring_buffer_alloc(size, flags)			\ | 
|---|
| 106 | ({							\ | 
|---|
| 107 | static struct lock_class_key __key;		\ | 
|---|
| 108 | __ring_buffer_alloc((size), (flags), &__key);	\ | 
|---|
| 109 | }) | 
|---|
| 110 |  | 
|---|
| 111 | /* | 
|---|
| 112 | * Because the ring buffer is generic, if other users of the ring buffer get | 
|---|
| 113 | * traced by ftrace, it can produce lockdep warnings. We need to keep each | 
|---|
| 114 | * ring buffer's lock class separate. | 
|---|
| 115 | */ | 
|---|
| 116 | #define ring_buffer_alloc_range(size, flags, order, start, range_size, s_size)	\ | 
|---|
| 117 | ({									\ | 
|---|
| 118 | static struct lock_class_key __key;				\ | 
|---|
| 119 | __ring_buffer_alloc_range((size), (flags), (order), (start),	\ | 
|---|
| 120 | (range_size), (s_size), &__key);	\ | 
|---|
| 121 | }) | 
|---|
| 122 |  | 
|---|
| 123 | typedef bool (*ring_buffer_cond_fn)(void *data); | 
|---|
| 124 | int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full, | 
|---|
| 125 | ring_buffer_cond_fn cond, void *data); | 
|---|
| 126 | __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, | 
|---|
| 127 | struct file *filp, poll_table *poll_table, int full); | 
|---|
| 128 | void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu); | 
|---|
| 129 |  | 
|---|
| 130 | #define RING_BUFFER_ALL_CPUS -1 | 
|---|
| 131 |  | 
|---|
| 132 | void ring_buffer_free(struct trace_buffer *buffer); | 
|---|
| 133 |  | 
|---|
| 134 | int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu); | 
|---|
| 135 |  | 
|---|
| 136 | void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val); | 
|---|
| 137 |  | 
|---|
| 138 | struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer, | 
|---|
| 139 | unsigned long length); | 
|---|
| 140 | int ring_buffer_unlock_commit(struct trace_buffer *buffer); | 
|---|
| 141 | int ring_buffer_write(struct trace_buffer *buffer, | 
|---|
| 142 | unsigned long length, void *data); | 
|---|
| 143 |  | 
|---|
| 144 | void ring_buffer_nest_start(struct trace_buffer *buffer); | 
|---|
| 145 | void ring_buffer_nest_end(struct trace_buffer *buffer); | 
|---|
| 146 |  | 
|---|
| 147 | DEFINE_GUARD(ring_buffer_nest, struct trace_buffer *, | 
|---|
| 148 | ring_buffer_nest_start(_T), ring_buffer_nest_end(_T)) | 
|---|
| 149 |  | 
|---|
| 150 | struct ring_buffer_event * | 
|---|
| 151 | ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, | 
|---|
| 152 | unsigned long *lost_events); | 
|---|
| 153 | struct ring_buffer_event * | 
|---|
| 154 | ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, | 
|---|
| 155 | unsigned long *lost_events); | 
|---|
| 156 |  | 
|---|
| 157 | struct ring_buffer_iter * | 
|---|
| 158 | ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags); | 
|---|
| 159 | void ring_buffer_read_finish(struct ring_buffer_iter *iter); | 
|---|
| 160 |  | 
|---|
| 161 | struct ring_buffer_event * | 
|---|
| 162 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); | 
|---|
| 163 | void ring_buffer_iter_advance(struct ring_buffer_iter *iter); | 
|---|
| 164 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter); | 
|---|
| 165 | int ring_buffer_iter_empty(struct ring_buffer_iter *iter); | 
|---|
| 166 | bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); | 
|---|
| 167 |  | 
|---|
| 168 | unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); | 
|---|
| 169 | unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer); | 
|---|
| 170 |  | 
|---|
| 171 | void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); | 
|---|
| 172 | void ring_buffer_reset_online_cpus(struct trace_buffer *buffer); | 
|---|
| 173 | void ring_buffer_reset(struct trace_buffer *buffer); | 
|---|
| 174 |  | 
|---|
| 175 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP | 
|---|
| 176 | int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, | 
|---|
| 177 | struct trace_buffer *buffer_b, int cpu); | 
|---|
| 178 | #else | 
|---|
| 179 | static inline int | 
|---|
| 180 | ring_buffer_swap_cpu(struct trace_buffer *buffer_a, | 
|---|
| 181 | struct trace_buffer *buffer_b, int cpu) | 
|---|
| 182 | { | 
|---|
| 183 | return -ENODEV; | 
|---|
| 184 | } | 
|---|
| 185 | #endif | 
|---|
| 186 |  | 
|---|
| 187 | bool ring_buffer_empty(struct trace_buffer *buffer); | 
|---|
| 188 | bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu); | 
|---|
| 189 |  | 
|---|
| 190 | void ring_buffer_record_disable(struct trace_buffer *buffer); | 
|---|
| 191 | void ring_buffer_record_enable(struct trace_buffer *buffer); | 
|---|
| 192 | void ring_buffer_record_off(struct trace_buffer *buffer); | 
|---|
| 193 | void ring_buffer_record_on(struct trace_buffer *buffer); | 
|---|
| 194 | bool ring_buffer_record_is_on(struct trace_buffer *buffer); | 
|---|
| 195 | bool ring_buffer_record_is_set_on(struct trace_buffer *buffer); | 
|---|
| 196 | bool ring_buffer_record_is_on_cpu(struct trace_buffer *buffer, int cpu); | 
|---|
| 197 | void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu); | 
|---|
| 198 | void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu); | 
|---|
| 199 |  | 
|---|
| 200 | u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu); | 
|---|
| 201 | unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu); | 
|---|
| 202 | unsigned long ring_buffer_entries(struct trace_buffer *buffer); | 
|---|
| 203 | unsigned long ring_buffer_overruns(struct trace_buffer *buffer); | 
|---|
| 204 | unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu); | 
|---|
| 205 | unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu); | 
|---|
| 206 | unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu); | 
|---|
| 207 | unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu); | 
|---|
| 208 | unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu); | 
|---|
| 209 |  | 
|---|
| 210 | u64 ring_buffer_time_stamp(struct trace_buffer *buffer); | 
|---|
| 211 | void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, | 
|---|
| 212 | int cpu, u64 *ts); | 
|---|
| 213 | void ring_buffer_set_clock(struct trace_buffer *buffer, | 
|---|
| 214 | u64 (*clock)(void)); | 
|---|
| 215 | void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs); | 
|---|
| 216 | bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer); | 
|---|
| 217 |  | 
|---|
| 218 | size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu); | 
|---|
| 219 |  | 
|---|
| 220 | struct buffer_data_read_page; | 
|---|
| 221 | struct buffer_data_read_page * | 
|---|
| 222 | ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu); | 
|---|
| 223 | void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, | 
|---|
| 224 | struct buffer_data_read_page *page); | 
|---|
| 225 | int ring_buffer_read_page(struct trace_buffer *buffer, | 
|---|
| 226 | struct buffer_data_read_page *data_page, | 
|---|
| 227 | size_t len, int cpu, int full); | 
|---|
| 228 | void *ring_buffer_read_page_data(struct buffer_data_read_page *page); | 
|---|
| 229 |  | 
|---|
| 230 | struct trace_seq; | 
|---|
| 231 |  | 
|---|
| 232 | int (struct trace_seq *s); | 
|---|
| 233 | int (struct trace_buffer *buffer, struct trace_seq *s); | 
|---|
| 234 |  | 
|---|
| 235 | int ring_buffer_subbuf_order_get(struct trace_buffer *buffer); | 
|---|
| 236 | int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order); | 
|---|
| 237 | int ring_buffer_subbuf_size_get(struct trace_buffer *buffer); | 
|---|
| 238 |  | 
|---|
| 239 | enum ring_buffer_flags { | 
|---|
| 240 | RB_FL_OVERWRITE		= 1 << 0, | 
|---|
| 241 | }; | 
|---|
| 242 |  | 
|---|
| 243 | #ifdef CONFIG_RING_BUFFER | 
|---|
| 244 | int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node); | 
|---|
| 245 | #else | 
|---|
| 246 | #define trace_rb_cpu_prepare	NULL | 
|---|
| 247 | #endif | 
|---|
| 248 |  | 
|---|
| 249 | int ring_buffer_map(struct trace_buffer *buffer, int cpu, | 
|---|
| 250 | struct vm_area_struct *vma); | 
|---|
| 251 | int ring_buffer_unmap(struct trace_buffer *buffer, int cpu); | 
|---|
| 252 | int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu); | 
|---|
| 253 | #endif /* _LINUX_RING_BUFFER_H */ | 
|---|
| 254 |  | 
|---|