1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * page allocation tagging
4 */
5#ifndef _LINUX_PGALLOC_TAG_H
6#define _LINUX_PGALLOC_TAG_H
7
8#include <linux/alloc_tag.h>
9
10#ifdef CONFIG_MEM_ALLOC_PROFILING
11
12#include <linux/page_ext.h>
13
14extern struct page_ext_operations page_alloc_tagging_ops;
15extern unsigned long alloc_tag_ref_mask;
16extern int alloc_tag_ref_offs;
17extern struct alloc_tag_kernel_section kernel_tags;
18
19DECLARE_STATIC_KEY_FALSE(mem_profiling_compressed);
20
21typedef u16 pgalloc_tag_idx;
22
23union pgtag_ref_handle {
24 union codetag_ref *ref; /* reference in page extension */
25 struct page *page; /* reference in page flags */
26};
27
28/* Reserved indexes */
29#define CODETAG_ID_NULL 0
30#define CODETAG_ID_EMPTY 1
31#define CODETAG_ID_FIRST 2
32
33#ifdef CONFIG_MODULES
34
35extern struct alloc_tag_module_section module_tags;
36
37static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx)
38{
39 return &module_tags.first_tag[idx - kernel_tags.count];
40}
41
42static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag)
43{
44 return CODETAG_ID_FIRST + kernel_tags.count + (tag - module_tags.first_tag);
45}
46
47#else /* CONFIG_MODULES */
48
49static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx)
50{
51 pr_warn("invalid page tag reference %lu\n", (unsigned long)idx);
52 return NULL;
53}
54
55static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag)
56{
57 pr_warn("invalid page tag 0x%lx\n", (unsigned long)tag);
58 return CODETAG_ID_NULL;
59}
60
61#endif /* CONFIG_MODULES */
62
63static inline void idx_to_ref(pgalloc_tag_idx idx, union codetag_ref *ref)
64{
65 switch (idx) {
66 case (CODETAG_ID_NULL):
67 ref->ct = NULL;
68 break;
69 case (CODETAG_ID_EMPTY):
70 set_codetag_empty(ref);
71 break;
72 default:
73 idx -= CODETAG_ID_FIRST;
74 ref->ct = idx < kernel_tags.count ?
75 &kernel_tags.first_tag[idx].ct :
76 &module_idx_to_tag(idx)->ct;
77 break;
78 }
79}
80
81static inline pgalloc_tag_idx ref_to_idx(union codetag_ref *ref)
82{
83 struct alloc_tag *tag;
84
85 if (!ref->ct)
86 return CODETAG_ID_NULL;
87
88 if (is_codetag_empty(ref))
89 return CODETAG_ID_EMPTY;
90
91 tag = ct_to_alloc_tag(ref->ct);
92 if (tag >= kernel_tags.first_tag && tag < kernel_tags.first_tag + kernel_tags.count)
93 return CODETAG_ID_FIRST + (tag - kernel_tags.first_tag);
94
95 return module_tag_to_idx(tag);
96}
97
98
99
100/* Should be called only if mem_alloc_profiling_enabled() */
101static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref,
102 union pgtag_ref_handle *handle)
103{
104 if (!page)
105 return false;
106
107 if (static_key_enabled(&mem_profiling_compressed)) {
108 pgalloc_tag_idx idx;
109
110 idx = (page->flags.f >> alloc_tag_ref_offs) &
111 alloc_tag_ref_mask;
112 idx_to_ref(idx, ref);
113 handle->page = page;
114 } else {
115 struct page_ext *page_ext;
116 union codetag_ref *tmp;
117
118 page_ext = page_ext_get(page);
119 if (!page_ext)
120 return false;
121
122 tmp = (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops);
123 ref->ct = tmp->ct;
124 handle->ref = tmp;
125 }
126
127 return true;
128}
129
130static inline void put_page_tag_ref(union pgtag_ref_handle handle)
131{
132 if (WARN_ON(!handle.ref))
133 return;
134
135 if (!static_key_enabled(&mem_profiling_compressed))
136 page_ext_put((void *)handle.ref - page_alloc_tagging_ops.offset);
137}
138
139static inline void update_page_tag_ref(union pgtag_ref_handle handle, union codetag_ref *ref)
140{
141 if (static_key_enabled(&mem_profiling_compressed)) {
142 struct page *page = handle.page;
143 unsigned long old_flags;
144 unsigned long flags;
145 unsigned long idx;
146
147 if (WARN_ON(!page || !ref))
148 return;
149
150 idx = (unsigned long)ref_to_idx(ref);
151 idx = (idx & alloc_tag_ref_mask) << alloc_tag_ref_offs;
152 do {
153 old_flags = READ_ONCE(page->flags.f);
154 flags = old_flags;
155 flags &= ~(alloc_tag_ref_mask << alloc_tag_ref_offs);
156 flags |= idx;
157 } while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
158 } else {
159 if (WARN_ON(!handle.ref || !ref))
160 return;
161
162 handle.ref->ct = ref->ct;
163 }
164}
165
166/* Should be called only if mem_alloc_profiling_enabled() */
167void __clear_page_tag_ref(struct page *page);
168
169static inline void clear_page_tag_ref(struct page *page)
170{
171 if (mem_alloc_profiling_enabled())
172 __clear_page_tag_ref(page);
173}
174
175/* Should be called only if mem_alloc_profiling_enabled() */
176static inline struct alloc_tag *__pgalloc_tag_get(struct page *page)
177{
178 struct alloc_tag *tag = NULL;
179 union pgtag_ref_handle handle;
180 union codetag_ref ref;
181
182 if (get_page_tag_ref(page, &ref, &handle)) {
183 alloc_tag_sub_check(&ref);
184 if (ref.ct)
185 tag = ct_to_alloc_tag(ref.ct);
186 put_page_tag_ref(handle);
187 }
188
189 return tag;
190}
191
192static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
193{
194 if (mem_alloc_profiling_enabled())
195 return __pgalloc_tag_get(page);
196 return NULL;
197}
198
199void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
200void pgalloc_tag_swap(struct folio *new, struct folio *old);
201
202void __init alloc_tag_sec_init(void);
203
204#else /* CONFIG_MEM_ALLOC_PROFILING */
205
206static inline void clear_page_tag_ref(struct page *page) {}
207static inline void alloc_tag_sec_init(void) {}
208static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {}
209static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {}
210static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; }
211
212#endif /* CONFIG_MEM_ALLOC_PROFILING */
213
214#endif /* _LINUX_PGALLOC_TAG_H */
215