1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HIGHMEM_INTERNAL_H
3#define _LINUX_HIGHMEM_INTERNAL_H
4
5/*
6 * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
7 */
8#ifdef CONFIG_KMAP_LOCAL
9void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
10void *__kmap_local_page_prot(const struct page *page, pgprot_t prot);
11void kunmap_local_indexed(const void *vaddr);
12void kmap_local_fork(struct task_struct *tsk);
13void __kmap_local_sched_out(void);
14void __kmap_local_sched_in(void);
15static inline void kmap_assert_nomap(void)
16{
17 DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
18}
19#else
20static inline void kmap_local_fork(struct task_struct *tsk) { }
21static inline void kmap_assert_nomap(void) { }
22#endif
23
24#ifdef CONFIG_HIGHMEM
25#include <asm/highmem.h>
26
27#ifndef ARCH_HAS_KMAP_FLUSH_TLB
28static inline void kmap_flush_tlb(unsigned long addr) { }
29#endif
30
31#ifndef kmap_prot
32#define kmap_prot PAGE_KERNEL
33#endif
34
35void *kmap_high(struct page *page);
36void kunmap_high(const struct page *page);
37void __kmap_flush_unused(void);
38struct page *__kmap_to_page(void *addr);
39
40static inline void *kmap(struct page *page)
41{
42 void *addr;
43
44 might_sleep();
45 if (!PageHighMem(page))
46 addr = page_address(page);
47 else
48 addr = kmap_high(page);
49 kmap_flush_tlb((unsigned long)addr);
50 return addr;
51}
52
53static inline void kunmap(const struct page *page)
54{
55 might_sleep();
56 if (!PageHighMem(page))
57 return;
58 kunmap_high(page);
59}
60
61static inline struct page *kmap_to_page(void *addr)
62{
63 return __kmap_to_page(addr);
64}
65
66static inline void kmap_flush_unused(void)
67{
68 __kmap_flush_unused();
69}
70
71static inline void *kmap_local_page(const struct page *page)
72{
73 return __kmap_local_page_prot(page, kmap_prot);
74}
75
76static inline void *kmap_local_page_try_from_panic(const struct page *page)
77{
78 if (!PageHighMem(page))
79 return page_address(page);
80 /* If the page is in HighMem, it's not safe to kmap it.*/
81 return NULL;
82}
83
84static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
85{
86 const struct page *page = folio_page(folio, offset / PAGE_SIZE);
87 return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
88}
89
90static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
91{
92 return __kmap_local_page_prot(page, prot);
93}
94
95static inline void *kmap_local_pfn(unsigned long pfn)
96{
97 return __kmap_local_pfn_prot(pfn, kmap_prot);
98}
99
100static inline void __kunmap_local(const void *vaddr)
101{
102 kunmap_local_indexed(vaddr);
103}
104
105static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
106{
107 if (IS_ENABLED(CONFIG_PREEMPT_RT))
108 migrate_disable();
109 else
110 preempt_disable();
111
112 pagefault_disable();
113 return __kmap_local_page_prot(page, prot);
114}
115
116static inline void *kmap_atomic(const struct page *page)
117{
118 return kmap_atomic_prot(page, kmap_prot);
119}
120
121static inline void *kmap_atomic_pfn(unsigned long pfn)
122{
123 if (IS_ENABLED(CONFIG_PREEMPT_RT))
124 migrate_disable();
125 else
126 preempt_disable();
127
128 pagefault_disable();
129 return __kmap_local_pfn_prot(pfn, kmap_prot);
130}
131
132static inline void __kunmap_atomic(const void *addr)
133{
134 kunmap_local_indexed(addr);
135 pagefault_enable();
136 if (IS_ENABLED(CONFIG_PREEMPT_RT))
137 migrate_enable();
138 else
139 preempt_enable();
140}
141
142unsigned long __nr_free_highpages(void);
143unsigned long __totalhigh_pages(void);
144
145static inline unsigned long nr_free_highpages(void)
146{
147 return __nr_free_highpages();
148}
149
150static inline unsigned long totalhigh_pages(void)
151{
152 return __totalhigh_pages();
153}
154
155static inline bool is_kmap_addr(const void *x)
156{
157 unsigned long addr = (unsigned long)x;
158
159 return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
160 (addr >= __fix_to_virt(FIX_KMAP_END) &&
161 addr < __fix_to_virt(FIX_KMAP_BEGIN));
162}
163#else /* CONFIG_HIGHMEM */
164
165static inline struct page *kmap_to_page(void *addr)
166{
167 return virt_to_page(addr);
168}
169
170static inline void *kmap(struct page *page)
171{
172 might_sleep();
173 return page_address(page);
174}
175
176static inline void kunmap_high(const struct page *page) { }
177static inline void kmap_flush_unused(void) { }
178
179static inline void kunmap(const struct page *page)
180{
181#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
182 kunmap_flush_on_unmap(page_address(page));
183#endif
184}
185
186static inline void *kmap_local_page(const struct page *page)
187{
188 return page_address(page);
189}
190
191static inline void *kmap_local_page_try_from_panic(const struct page *page)
192{
193 return page_address(page);
194}
195
196static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
197{
198 return folio_address(folio) + offset;
199}
200
201static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
202{
203 return kmap_local_page(page);
204}
205
206static inline void *kmap_local_pfn(unsigned long pfn)
207{
208 return kmap_local_page(pfn_to_page(pfn));
209}
210
211static inline void __kunmap_local(const void *addr)
212{
213#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
214 kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
215#endif
216}
217
218static inline void *kmap_atomic(const struct page *page)
219{
220 if (IS_ENABLED(CONFIG_PREEMPT_RT))
221 migrate_disable();
222 else
223 preempt_disable();
224 pagefault_disable();
225 return page_address(page);
226}
227
228static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
229{
230 return kmap_atomic(page);
231}
232
233static inline void *kmap_atomic_pfn(unsigned long pfn)
234{
235 return kmap_atomic(pfn_to_page(pfn));
236}
237
238static inline void __kunmap_atomic(const void *addr)
239{
240#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
241 kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
242#endif
243 pagefault_enable();
244 if (IS_ENABLED(CONFIG_PREEMPT_RT))
245 migrate_enable();
246 else
247 preempt_enable();
248}
249
250static inline unsigned long nr_free_highpages(void) { return 0; }
251static inline unsigned long totalhigh_pages(void) { return 0; }
252
253static inline bool is_kmap_addr(const void *x)
254{
255 return false;
256}
257
258#endif /* CONFIG_HIGHMEM */
259
260/**
261 * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
262 * @__addr: Virtual address to be unmapped
263 *
264 * Unmaps an address previously mapped by kmap_atomic() and re-enables
265 * pagefaults. Depending on PREEMP_RT configuration, re-enables also
266 * migration and preemption. Users should not count on these side effects.
267 *
268 * Mappings should be unmapped in the reverse order that they were mapped.
269 * See kmap_local_page() for details on nesting.
270 *
271 * @__addr can be any address within the mapped page, so there is no need
272 * to subtract any offset that has been added. In contrast to kunmap(),
273 * this function takes the address returned from kmap_atomic(), not the
274 * page passed to it. The compiler will warn you if you pass the page.
275 */
276#define kunmap_atomic(__addr) \
277do { \
278 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
279 __kunmap_atomic(__addr); \
280} while (0)
281
282/**
283 * kunmap_local - Unmap a page mapped via kmap_local_page().
284 * @__addr: An address within the page mapped
285 *
286 * @__addr can be any address within the mapped page. Commonly it is the
287 * address return from kmap_local_page(), but it can also include offsets.
288 *
289 * Unmapping should be done in the reverse order of the mapping. See
290 * kmap_local_page() for details.
291 */
292#define kunmap_local(__addr) \
293do { \
294 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
295 __kunmap_local(__addr); \
296} while (0)
297
298#endif
299