| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _LINUX_HIGHMEM_H | 
|---|
| 3 | #define _LINUX_HIGHMEM_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/fs.h> | 
|---|
| 6 | #include <linux/kernel.h> | 
|---|
| 7 | #include <linux/bug.h> | 
|---|
| 8 | #include <linux/cacheflush.h> | 
|---|
| 9 | #include <linux/kmsan.h> | 
|---|
| 10 | #include <linux/mm.h> | 
|---|
| 11 | #include <linux/uaccess.h> | 
|---|
| 12 | #include <linux/hardirq.h> | 
|---|
| 13 |  | 
|---|
| 14 | #include "highmem-internal.h" | 
|---|
| 15 |  | 
|---|
| 16 | /** | 
|---|
| 17 | * kmap - Map a page for long term usage | 
|---|
| 18 | * @page:	Pointer to the page to be mapped | 
|---|
| 19 | * | 
|---|
| 20 | * Returns: The virtual address of the mapping | 
|---|
| 21 | * | 
|---|
| 22 | * Can only be invoked from preemptible task context because on 32bit | 
|---|
| 23 | * systems with CONFIG_HIGHMEM enabled this function might sleep. | 
|---|
| 24 | * | 
|---|
| 25 | * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area | 
|---|
| 26 | * this returns the virtual address of the direct kernel mapping. | 
|---|
| 27 | * | 
|---|
| 28 | * The returned virtual address is globally visible and valid up to the | 
|---|
| 29 | * point where it is unmapped via kunmap(). The pointer can be handed to | 
|---|
| 30 | * other contexts. | 
|---|
| 31 | * | 
|---|
| 32 | * For highmem pages on 32bit systems this can be slow as the mapping space | 
|---|
| 33 | * is limited and protected by a global lock. In case that there is no | 
|---|
| 34 | * mapping slot available the function blocks until a slot is released via | 
|---|
| 35 | * kunmap(). | 
|---|
| 36 | */ | 
|---|
| 37 | static inline void *kmap(struct page *page); | 
|---|
| 38 |  | 
|---|
| 39 | /** | 
|---|
| 40 | * kunmap - Unmap the virtual address mapped by kmap() | 
|---|
| 41 | * @page:	Pointer to the page which was mapped by kmap() | 
|---|
| 42 | * | 
|---|
| 43 | * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of | 
|---|
| 44 | * pages in the low memory area. | 
|---|
| 45 | */ | 
|---|
| 46 | static inline void kunmap(const struct page *page); | 
|---|
| 47 |  | 
|---|
| 48 | /** | 
|---|
| 49 | * kmap_to_page - Get the page for a kmap'ed address | 
|---|
| 50 | * @addr:	The address to look up | 
|---|
| 51 | * | 
|---|
| 52 | * Returns: The page which is mapped to @addr. | 
|---|
| 53 | */ | 
|---|
| 54 | static inline struct page *kmap_to_page(void *addr); | 
|---|
| 55 |  | 
|---|
| 56 | /** | 
|---|
| 57 | * kmap_flush_unused - Flush all unused kmap mappings in order to | 
|---|
| 58 | *		       remove stray mappings | 
|---|
| 59 | */ | 
|---|
| 60 | static inline void kmap_flush_unused(void); | 
|---|
| 61 |  | 
|---|
| 62 | /** | 
|---|
| 63 | * kmap_local_page - Map a page for temporary usage | 
|---|
| 64 | * @page: Pointer to the page to be mapped | 
|---|
| 65 | * | 
|---|
| 66 | * Returns: The virtual address of the mapping | 
|---|
| 67 | * | 
|---|
| 68 | * Can be invoked from any context, including interrupts. | 
|---|
| 69 | * | 
|---|
| 70 | * Requires careful handling when nesting multiple mappings because the map | 
|---|
| 71 | * management is stack based. The unmap has to be in the reverse order of | 
|---|
| 72 | * the map operation: | 
|---|
| 73 | * | 
|---|
| 74 | * addr1 = kmap_local_page(page1); | 
|---|
| 75 | * addr2 = kmap_local_page(page2); | 
|---|
| 76 | * ... | 
|---|
| 77 | * kunmap_local(addr2); | 
|---|
| 78 | * kunmap_local(addr1); | 
|---|
| 79 | * | 
|---|
| 80 | * Unmapping addr1 before addr2 is invalid and causes malfunction. | 
|---|
| 81 | * | 
|---|
| 82 | * Contrary to kmap() mappings the mapping is only valid in the context of | 
|---|
| 83 | * the caller and cannot be handed to other contexts. | 
|---|
| 84 | * | 
|---|
| 85 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the | 
|---|
| 86 | * virtual address of the direct mapping. Only real highmem pages are | 
|---|
| 87 | * temporarily mapped. | 
|---|
| 88 | * | 
|---|
| 89 | * While kmap_local_page() is significantly faster than kmap() for the highmem | 
|---|
| 90 | * case it comes with restrictions about the pointer validity. | 
|---|
| 91 | * | 
|---|
| 92 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of | 
|---|
| 93 | * disabling migration in order to keep the virtual address stable across | 
|---|
| 94 | * preemption. No caller of kmap_local_page() can rely on this side effect. | 
|---|
| 95 | */ | 
|---|
| 96 | static inline void *kmap_local_page(const struct page *page); | 
|---|
| 97 |  | 
|---|
| 98 | /** | 
|---|
| 99 | * kmap_local_folio - Map a page in this folio for temporary usage | 
|---|
| 100 | * @folio: The folio containing the page. | 
|---|
| 101 | * @offset: The byte offset within the folio which identifies the page. | 
|---|
| 102 | * | 
|---|
| 103 | * Requires careful handling when nesting multiple mappings because the map | 
|---|
| 104 | * management is stack based. The unmap has to be in the reverse order of | 
|---|
| 105 | * the map operation:: | 
|---|
| 106 | * | 
|---|
| 107 | *   addr1 = kmap_local_folio(folio1, offset1); | 
|---|
| 108 | *   addr2 = kmap_local_folio(folio2, offset2); | 
|---|
| 109 | *   ... | 
|---|
| 110 | *   kunmap_local(addr2); | 
|---|
| 111 | *   kunmap_local(addr1); | 
|---|
| 112 | * | 
|---|
| 113 | * Unmapping addr1 before addr2 is invalid and causes malfunction. | 
|---|
| 114 | * | 
|---|
| 115 | * Contrary to kmap() mappings the mapping is only valid in the context of | 
|---|
| 116 | * the caller and cannot be handed to other contexts. | 
|---|
| 117 | * | 
|---|
| 118 | * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the | 
|---|
| 119 | * virtual address of the direct mapping. Only real highmem pages are | 
|---|
| 120 | * temporarily mapped. | 
|---|
| 121 | * | 
|---|
| 122 | * While it is significantly faster than kmap() for the highmem case it | 
|---|
| 123 | * comes with restrictions about the pointer validity. | 
|---|
| 124 | * | 
|---|
| 125 | * On HIGHMEM enabled systems mapping a highmem page has the side effect of | 
|---|
| 126 | * disabling migration in order to keep the virtual address stable across | 
|---|
| 127 | * preemption. No caller of kmap_local_folio() can rely on this side effect. | 
|---|
| 128 | * | 
|---|
| 129 | * Context: Can be invoked from any context. | 
|---|
| 130 | * Return: The virtual address of @offset. | 
|---|
| 131 | */ | 
|---|
| 132 | static inline void *kmap_local_folio(const struct folio *folio, size_t offset); | 
|---|
| 133 |  | 
|---|
| 134 | /** | 
|---|
| 135 | * kmap_atomic - Atomically map a page for temporary usage - Deprecated! | 
|---|
| 136 | * @page:	Pointer to the page to be mapped | 
|---|
| 137 | * | 
|---|
| 138 | * Returns: The virtual address of the mapping | 
|---|
| 139 | * | 
|---|
| 140 | * In fact a wrapper around kmap_local_page() which also disables pagefaults | 
|---|
| 141 | * and, depending on PREEMPT_RT configuration, also CPU migration and | 
|---|
| 142 | * preemption. Therefore users should not count on the latter two side effects. | 
|---|
| 143 | * | 
|---|
| 144 | * Mappings should always be released by kunmap_atomic(). | 
|---|
| 145 | * | 
|---|
| 146 | * Do not use in new code. Use kmap_local_page() instead. | 
|---|
| 147 | * | 
|---|
| 148 | * It is used in atomic context when code wants to access the contents of a | 
|---|
| 149 | * page that might be allocated from high memory (see __GFP_HIGHMEM), for | 
|---|
| 150 | * example a page in the pagecache.  The API has two functions, and they | 
|---|
| 151 | * can be used in a manner similar to the following:: | 
|---|
| 152 | * | 
|---|
| 153 | *   // Find the page of interest. | 
|---|
| 154 | *   struct page *page = find_get_page(mapping, offset); | 
|---|
| 155 | * | 
|---|
| 156 | *   // Gain access to the contents of that page. | 
|---|
| 157 | *   void *vaddr = kmap_atomic(page); | 
|---|
| 158 | * | 
|---|
| 159 | *   // Do something to the contents of that page. | 
|---|
| 160 | *   memset(vaddr, 0, PAGE_SIZE); | 
|---|
| 161 | * | 
|---|
| 162 | *   // Unmap that page. | 
|---|
| 163 | *   kunmap_atomic(vaddr); | 
|---|
| 164 | * | 
|---|
| 165 | * Note that the kunmap_atomic() call takes the result of the kmap_atomic() | 
|---|
| 166 | * call, not the argument. | 
|---|
| 167 | * | 
|---|
| 168 | * If you need to map two pages because you want to copy from one page to | 
|---|
| 169 | * another you need to keep the kmap_atomic calls strictly nested, like: | 
|---|
| 170 | * | 
|---|
| 171 | * vaddr1 = kmap_atomic(page1); | 
|---|
| 172 | * vaddr2 = kmap_atomic(page2); | 
|---|
| 173 | * | 
|---|
| 174 | * memcpy(vaddr1, vaddr2, PAGE_SIZE); | 
|---|
| 175 | * | 
|---|
| 176 | * kunmap_atomic(vaddr2); | 
|---|
| 177 | * kunmap_atomic(vaddr1); | 
|---|
| 178 | */ | 
|---|
| 179 | static inline void *kmap_atomic(const struct page *page); | 
|---|
| 180 |  | 
|---|
| 181 | /* Highmem related interfaces for management code */ | 
|---|
| 182 | static inline unsigned long nr_free_highpages(void); | 
|---|
| 183 | static inline unsigned long totalhigh_pages(void); | 
|---|
| 184 |  | 
|---|
| 185 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE | 
|---|
| 186 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | 
|---|
| 187 | { | 
|---|
| 188 | } | 
|---|
| 189 | #endif | 
|---|
| 190 |  | 
|---|
| 191 | #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE | 
|---|
| 192 | static inline void flush_kernel_vmap_range(void *vaddr, int size) | 
|---|
| 193 | { | 
|---|
| 194 | } | 
|---|
| 195 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) | 
|---|
| 196 | { | 
|---|
| 197 | } | 
|---|
| 198 | #endif | 
|---|
| 199 |  | 
|---|
| 200 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ | 
|---|
| 201 | #ifndef clear_user_highpage | 
|---|
| 202 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) | 
|---|
| 203 | { | 
|---|
| 204 | void *addr = kmap_local_page(page); | 
|---|
| 205 | clear_user_page(page: addr, vaddr, pg: page); | 
|---|
| 206 | kunmap_local(addr); | 
|---|
| 207 | } | 
|---|
| 208 | #endif | 
|---|
| 209 |  | 
|---|
| 210 | #ifndef vma_alloc_zeroed_movable_folio | 
|---|
| 211 | /** | 
|---|
| 212 | * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA. | 
|---|
| 213 | * @vma: The VMA the page is to be allocated for. | 
|---|
| 214 | * @vaddr: The virtual address the page will be inserted into. | 
|---|
| 215 | * | 
|---|
| 216 | * This function will allocate a page suitable for inserting into this | 
|---|
| 217 | * VMA at this virtual address.  It may be allocated from highmem or | 
|---|
| 218 | * the movable zone.  An architecture may provide its own implementation. | 
|---|
| 219 | * | 
|---|
| 220 | * Return: A folio containing one allocated and zeroed page or NULL if | 
|---|
| 221 | * we are out of memory. | 
|---|
| 222 | */ | 
|---|
| 223 | static inline | 
|---|
| 224 | struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, | 
|---|
| 225 | unsigned long vaddr) | 
|---|
| 226 | { | 
|---|
| 227 | struct folio *folio; | 
|---|
| 228 |  | 
|---|
| 229 | folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); | 
|---|
| 230 | if (folio && user_alloc_needs_zeroing()) | 
|---|
| 231 | clear_user_highpage(&folio->page, vaddr); | 
|---|
| 232 |  | 
|---|
| 233 | return folio; | 
|---|
| 234 | } | 
|---|
| 235 | #endif | 
|---|
| 236 |  | 
|---|
| 237 | static inline void clear_highpage(struct page *page) | 
|---|
| 238 | { | 
|---|
| 239 | void *kaddr = kmap_local_page(page); | 
|---|
| 240 | clear_page(page: kaddr); | 
|---|
| 241 | kunmap_local(kaddr); | 
|---|
| 242 | } | 
|---|
| 243 |  | 
|---|
| 244 | static inline void clear_highpage_kasan_tagged(struct page *page) | 
|---|
| 245 | { | 
|---|
| 246 | void *kaddr = kmap_local_page(page); | 
|---|
| 247 |  | 
|---|
| 248 | clear_page(page: kasan_reset_tag(addr: kaddr)); | 
|---|
| 249 | kunmap_local(kaddr); | 
|---|
| 250 | } | 
|---|
| 251 |  | 
|---|
| 252 | #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE | 
|---|
| 253 |  | 
|---|
| 254 | static inline void tag_clear_highpage(struct page *page) | 
|---|
| 255 | { | 
|---|
| 256 | } | 
|---|
| 257 |  | 
|---|
| 258 | #endif | 
|---|
| 259 |  | 
|---|
| 260 | /* | 
|---|
| 261 | * If we pass in a base or tail page, we can zero up to PAGE_SIZE. | 
|---|
| 262 | * If we pass in a head page, we can zero up to the size of the compound page. | 
|---|
| 263 | */ | 
|---|
| 264 | #ifdef CONFIG_HIGHMEM | 
|---|
| 265 | void zero_user_segments(struct page *page, unsigned start1, unsigned end1, | 
|---|
| 266 | unsigned start2, unsigned end2); | 
|---|
| 267 | #else | 
|---|
| 268 | static inline void zero_user_segments(struct page *page, | 
|---|
| 269 | unsigned start1, unsigned end1, | 
|---|
| 270 | unsigned start2, unsigned end2) | 
|---|
| 271 | { | 
|---|
| 272 | void *kaddr = kmap_local_page(page); | 
|---|
| 273 | unsigned int i; | 
|---|
| 274 |  | 
|---|
| 275 | BUG_ON(end1 > page_size(page) || end2 > page_size(page)); | 
|---|
| 276 |  | 
|---|
| 277 | if (end1 > start1) | 
|---|
| 278 | memset(s: kaddr + start1, c: 0, n: end1 - start1); | 
|---|
| 279 |  | 
|---|
| 280 | if (end2 > start2) | 
|---|
| 281 | memset(s: kaddr + start2, c: 0, n: end2 - start2); | 
|---|
| 282 |  | 
|---|
| 283 | kunmap_local(kaddr); | 
|---|
| 284 | for (i = 0; i < compound_nr(page); i++) | 
|---|
| 285 | flush_dcache_page(page: page + i); | 
|---|
| 286 | } | 
|---|
| 287 | #endif | 
|---|
| 288 |  | 
|---|
| 289 | static inline void zero_user_segment(struct page *page, | 
|---|
| 290 | unsigned start, unsigned end) | 
|---|
| 291 | { | 
|---|
| 292 | zero_user_segments(page, start1: start, end1: end, start2: 0, end2: 0); | 
|---|
| 293 | } | 
|---|
| 294 |  | 
|---|
| 295 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE | 
|---|
| 296 |  | 
|---|
| 297 | static inline void copy_user_highpage(struct page *to, struct page *from, | 
|---|
| 298 | unsigned long vaddr, struct vm_area_struct *vma) | 
|---|
| 299 | { | 
|---|
| 300 | char *vfrom, *vto; | 
|---|
| 301 |  | 
|---|
| 302 | vfrom = kmap_local_page(page: from); | 
|---|
| 303 | vto = kmap_local_page(page: to); | 
|---|
| 304 | copy_user_page(to: vto, from: vfrom, vaddr, topage: to); | 
|---|
| 305 | kmsan_unpoison_memory(page_address(to), PAGE_SIZE); | 
|---|
| 306 | kunmap_local(vto); | 
|---|
| 307 | kunmap_local(vfrom); | 
|---|
| 308 | } | 
|---|
| 309 |  | 
|---|
| 310 | #endif | 
|---|
| 311 |  | 
|---|
| 312 | #ifndef __HAVE_ARCH_COPY_HIGHPAGE | 
|---|
| 313 |  | 
|---|
| 314 | static inline void copy_highpage(struct page *to, struct page *from) | 
|---|
| 315 | { | 
|---|
| 316 | char *vfrom, *vto; | 
|---|
| 317 |  | 
|---|
| 318 | vfrom = kmap_local_page(page: from); | 
|---|
| 319 | vto = kmap_local_page(page: to); | 
|---|
| 320 | copy_page(to: vto, from: vfrom); | 
|---|
| 321 | kmsan_copy_page_meta(dst: to, src: from); | 
|---|
| 322 | kunmap_local(vto); | 
|---|
| 323 | kunmap_local(vfrom); | 
|---|
| 324 | } | 
|---|
| 325 |  | 
|---|
| 326 | #endif | 
|---|
| 327 |  | 
|---|
| 328 | #ifdef copy_mc_to_kernel | 
|---|
| 329 | /* | 
|---|
| 330 | * If architecture supports machine check exception handling, define the | 
|---|
| 331 | * #MC versions of copy_user_highpage and copy_highpage. They copy a memory | 
|---|
| 332 | * page with #MC in source page (@from) handled, and return the number | 
|---|
| 333 | * of bytes not copied if there was a #MC, otherwise 0 for success. | 
|---|
| 334 | */ | 
|---|
| 335 | static inline int copy_mc_user_highpage(struct page *to, struct page *from, | 
|---|
| 336 | unsigned long vaddr, struct vm_area_struct *vma) | 
|---|
| 337 | { | 
|---|
| 338 | unsigned long ret; | 
|---|
| 339 | char *vfrom, *vto; | 
|---|
| 340 |  | 
|---|
| 341 | vfrom = kmap_local_page(page: from); | 
|---|
| 342 | vto = kmap_local_page(page: to); | 
|---|
| 343 | ret = copy_mc_to_kernel(to: vto, from: vfrom, PAGE_SIZE); | 
|---|
| 344 | if (!ret) | 
|---|
| 345 | kmsan_unpoison_memory(page_address(to), PAGE_SIZE); | 
|---|
| 346 | kunmap_local(vto); | 
|---|
| 347 | kunmap_local(vfrom); | 
|---|
| 348 |  | 
|---|
| 349 | if (ret) | 
|---|
| 350 | memory_failure_queue(page_to_pfn(from), flags: 0); | 
|---|
| 351 |  | 
|---|
| 352 | return ret; | 
|---|
| 353 | } | 
|---|
| 354 |  | 
|---|
| 355 | static inline int copy_mc_highpage(struct page *to, struct page *from) | 
|---|
| 356 | { | 
|---|
| 357 | unsigned long ret; | 
|---|
| 358 | char *vfrom, *vto; | 
|---|
| 359 |  | 
|---|
| 360 | vfrom = kmap_local_page(page: from); | 
|---|
| 361 | vto = kmap_local_page(page: to); | 
|---|
| 362 | ret = copy_mc_to_kernel(to: vto, from: vfrom, PAGE_SIZE); | 
|---|
| 363 | if (!ret) | 
|---|
| 364 | kmsan_copy_page_meta(dst: to, src: from); | 
|---|
| 365 | kunmap_local(vto); | 
|---|
| 366 | kunmap_local(vfrom); | 
|---|
| 367 |  | 
|---|
| 368 | if (ret) | 
|---|
| 369 | memory_failure_queue(page_to_pfn(from), flags: 0); | 
|---|
| 370 |  | 
|---|
| 371 | return ret; | 
|---|
| 372 | } | 
|---|
| 373 | #else | 
|---|
| 374 | static inline int copy_mc_user_highpage(struct page *to, struct page *from, | 
|---|
| 375 | unsigned long vaddr, struct vm_area_struct *vma) | 
|---|
| 376 | { | 
|---|
| 377 | copy_user_highpage(to, from, vaddr, vma); | 
|---|
| 378 | return 0; | 
|---|
| 379 | } | 
|---|
| 380 |  | 
|---|
| 381 | static inline int copy_mc_highpage(struct page *to, struct page *from) | 
|---|
| 382 | { | 
|---|
| 383 | copy_highpage(to, from); | 
|---|
| 384 | return 0; | 
|---|
| 385 | } | 
|---|
| 386 | #endif | 
|---|
| 387 |  | 
|---|
| 388 | static inline void memcpy_page(struct page *dst_page, size_t dst_off, | 
|---|
| 389 | struct page *src_page, size_t src_off, | 
|---|
| 390 | size_t len) | 
|---|
| 391 | { | 
|---|
| 392 | char *dst = kmap_local_page(page: dst_page); | 
|---|
| 393 | char *src = kmap_local_page(page: src_page); | 
|---|
| 394 |  | 
|---|
| 395 | VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); | 
|---|
| 396 | memcpy(to: dst + dst_off, from: src + src_off, len); | 
|---|
| 397 | kunmap_local(src); | 
|---|
| 398 | kunmap_local(dst); | 
|---|
| 399 | } | 
|---|
| 400 |  | 
|---|
| 401 | static inline void memcpy_folio(struct folio *dst_folio, size_t dst_off, | 
|---|
| 402 | struct folio *src_folio, size_t src_off, size_t len) | 
|---|
| 403 | { | 
|---|
| 404 | VM_BUG_ON(dst_off + len > folio_size(dst_folio)); | 
|---|
| 405 | VM_BUG_ON(src_off + len > folio_size(src_folio)); | 
|---|
| 406 |  | 
|---|
| 407 | do { | 
|---|
| 408 | char *dst = kmap_local_folio(folio: dst_folio, offset: dst_off); | 
|---|
| 409 | const char *src = kmap_local_folio(folio: src_folio, offset: src_off); | 
|---|
| 410 | size_t chunk = len; | 
|---|
| 411 |  | 
|---|
| 412 | if (folio_test_highmem(folio: dst_folio) && | 
|---|
| 413 | chunk > PAGE_SIZE - offset_in_page(dst_off)) | 
|---|
| 414 | chunk = PAGE_SIZE - offset_in_page(dst_off); | 
|---|
| 415 | if (folio_test_highmem(folio: src_folio) && | 
|---|
| 416 | chunk > PAGE_SIZE - offset_in_page(src_off)) | 
|---|
| 417 | chunk = PAGE_SIZE - offset_in_page(src_off); | 
|---|
| 418 | memcpy(to: dst, from: src, len: chunk); | 
|---|
| 419 | kunmap_local(src); | 
|---|
| 420 | kunmap_local(dst); | 
|---|
| 421 |  | 
|---|
| 422 | dst_off += chunk; | 
|---|
| 423 | src_off += chunk; | 
|---|
| 424 | len -= chunk; | 
|---|
| 425 | } while (len > 0); | 
|---|
| 426 | } | 
|---|
| 427 |  | 
|---|
| 428 | static inline void memset_page(struct page *page, size_t offset, int val, | 
|---|
| 429 | size_t len) | 
|---|
| 430 | { | 
|---|
| 431 | char *addr = kmap_local_page(page); | 
|---|
| 432 |  | 
|---|
| 433 | VM_BUG_ON(offset + len > PAGE_SIZE); | 
|---|
| 434 | memset(s: addr + offset, c: val, n: len); | 
|---|
| 435 | kunmap_local(addr); | 
|---|
| 436 | } | 
|---|
| 437 |  | 
|---|
| 438 | static inline void memcpy_from_page(char *to, struct page *page, | 
|---|
| 439 | size_t offset, size_t len) | 
|---|
| 440 | { | 
|---|
| 441 | char *from = kmap_local_page(page); | 
|---|
| 442 |  | 
|---|
| 443 | VM_BUG_ON(offset + len > PAGE_SIZE); | 
|---|
| 444 | memcpy(to, from: from + offset, len); | 
|---|
| 445 | kunmap_local(from); | 
|---|
| 446 | } | 
|---|
| 447 |  | 
|---|
| 448 | static inline void memcpy_to_page(struct page *page, size_t offset, | 
|---|
| 449 | const char *from, size_t len) | 
|---|
| 450 | { | 
|---|
| 451 | char *to = kmap_local_page(page); | 
|---|
| 452 |  | 
|---|
| 453 | VM_BUG_ON(offset + len > PAGE_SIZE); | 
|---|
| 454 | memcpy(to: to + offset, from, len); | 
|---|
| 455 | flush_dcache_page(page); | 
|---|
| 456 | kunmap_local(to); | 
|---|
| 457 | } | 
|---|
| 458 |  | 
|---|
| 459 | static inline void memzero_page(struct page *page, size_t offset, size_t len) | 
|---|
| 460 | { | 
|---|
| 461 | char *addr = kmap_local_page(page); | 
|---|
| 462 |  | 
|---|
| 463 | VM_BUG_ON(offset + len > PAGE_SIZE); | 
|---|
| 464 | memset(s: addr + offset, c: 0, n: len); | 
|---|
| 465 | flush_dcache_page(page); | 
|---|
| 466 | kunmap_local(addr); | 
|---|
| 467 | } | 
|---|
| 468 |  | 
|---|
| 469 | /** | 
|---|
| 470 | * memcpy_from_folio - Copy a range of bytes from a folio. | 
|---|
| 471 | * @to: The memory to copy to. | 
|---|
| 472 | * @folio: The folio to read from. | 
|---|
| 473 | * @offset: The first byte in the folio to read. | 
|---|
| 474 | * @len: The number of bytes to copy. | 
|---|
| 475 | */ | 
|---|
| 476 | static inline void memcpy_from_folio(char *to, struct folio *folio, | 
|---|
| 477 | size_t offset, size_t len) | 
|---|
| 478 | { | 
|---|
| 479 | VM_BUG_ON(offset + len > folio_size(folio)); | 
|---|
| 480 |  | 
|---|
| 481 | do { | 
|---|
| 482 | const char *from = kmap_local_folio(folio, offset); | 
|---|
| 483 | size_t chunk = len; | 
|---|
| 484 |  | 
|---|
| 485 | if (folio_test_partial_kmap(folio) && | 
|---|
| 486 | chunk > PAGE_SIZE - offset_in_page(offset)) | 
|---|
| 487 | chunk = PAGE_SIZE - offset_in_page(offset); | 
|---|
| 488 | memcpy(to, from, len: chunk); | 
|---|
| 489 | kunmap_local(from); | 
|---|
| 490 |  | 
|---|
| 491 | to += chunk; | 
|---|
| 492 | offset += chunk; | 
|---|
| 493 | len -= chunk; | 
|---|
| 494 | } while (len > 0); | 
|---|
| 495 | } | 
|---|
| 496 |  | 
|---|
| 497 | /** | 
|---|
| 498 | * memcpy_to_folio - Copy a range of bytes to a folio. | 
|---|
| 499 | * @folio: The folio to write to. | 
|---|
| 500 | * @offset: The first byte in the folio to store to. | 
|---|
| 501 | * @from: The memory to copy from. | 
|---|
| 502 | * @len: The number of bytes to copy. | 
|---|
| 503 | */ | 
|---|
| 504 | static inline void memcpy_to_folio(struct folio *folio, size_t offset, | 
|---|
| 505 | const char *from, size_t len) | 
|---|
| 506 | { | 
|---|
| 507 | VM_BUG_ON(offset + len > folio_size(folio)); | 
|---|
| 508 |  | 
|---|
| 509 | do { | 
|---|
| 510 | char *to = kmap_local_folio(folio, offset); | 
|---|
| 511 | size_t chunk = len; | 
|---|
| 512 |  | 
|---|
| 513 | if (folio_test_partial_kmap(folio) && | 
|---|
| 514 | chunk > PAGE_SIZE - offset_in_page(offset)) | 
|---|
| 515 | chunk = PAGE_SIZE - offset_in_page(offset); | 
|---|
| 516 | memcpy(to, from, len: chunk); | 
|---|
| 517 | kunmap_local(to); | 
|---|
| 518 |  | 
|---|
| 519 | from += chunk; | 
|---|
| 520 | offset += chunk; | 
|---|
| 521 | len -= chunk; | 
|---|
| 522 | } while (len > 0); | 
|---|
| 523 |  | 
|---|
| 524 | flush_dcache_folio(folio); | 
|---|
| 525 | } | 
|---|
| 526 |  | 
|---|
| 527 | /** | 
|---|
| 528 | * folio_zero_tail - Zero the tail of a folio. | 
|---|
| 529 | * @folio: The folio to zero. | 
|---|
| 530 | * @offset: The byte offset in the folio to start zeroing at. | 
|---|
| 531 | * @kaddr: The address the folio is currently mapped to. | 
|---|
| 532 | * | 
|---|
| 533 | * If you have already used kmap_local_folio() to map a folio, written | 
|---|
| 534 | * some data to it and now need to zero the end of the folio (and flush | 
|---|
| 535 | * the dcache), you can use this function.  If you do not have the | 
|---|
| 536 | * folio kmapped (eg the folio has been partially populated by DMA), | 
|---|
| 537 | * use folio_zero_range() or folio_zero_segment() instead. | 
|---|
| 538 | * | 
|---|
| 539 | * Return: An address which can be passed to kunmap_local(). | 
|---|
| 540 | */ | 
|---|
| 541 | static inline __must_check void *folio_zero_tail(struct folio *folio, | 
|---|
| 542 | size_t offset, void *kaddr) | 
|---|
| 543 | { | 
|---|
| 544 | size_t len = folio_size(folio) - offset; | 
|---|
| 545 |  | 
|---|
| 546 | if (folio_test_partial_kmap(folio)) { | 
|---|
| 547 | size_t max = PAGE_SIZE - offset_in_page(offset); | 
|---|
| 548 |  | 
|---|
| 549 | while (len > max) { | 
|---|
| 550 | memset(s: kaddr, c: 0, n: max); | 
|---|
| 551 | kunmap_local(kaddr); | 
|---|
| 552 | len -= max; | 
|---|
| 553 | offset += max; | 
|---|
| 554 | max = PAGE_SIZE; | 
|---|
| 555 | kaddr = kmap_local_folio(folio, offset); | 
|---|
| 556 | } | 
|---|
| 557 | } | 
|---|
| 558 |  | 
|---|
| 559 | memset(s: kaddr, c: 0, n: len); | 
|---|
| 560 | flush_dcache_folio(folio); | 
|---|
| 561 |  | 
|---|
| 562 | return kaddr; | 
|---|
| 563 | } | 
|---|
| 564 |  | 
|---|
| 565 | /** | 
|---|
| 566 | * folio_fill_tail - Copy some data to a folio and pad with zeroes. | 
|---|
| 567 | * @folio: The destination folio. | 
|---|
| 568 | * @offset: The offset into @folio at which to start copying. | 
|---|
| 569 | * @from: The data to copy. | 
|---|
| 570 | * @len: How many bytes of data to copy. | 
|---|
| 571 | * | 
|---|
| 572 | * This function is most useful for filesystems which support inline data. | 
|---|
| 573 | * When they want to copy data from the inode into the page cache, this | 
|---|
| 574 | * function does everything for them.  It supports large folios even on | 
|---|
| 575 | * HIGHMEM configurations. | 
|---|
| 576 | */ | 
|---|
| 577 | static inline void folio_fill_tail(struct folio *folio, size_t offset, | 
|---|
| 578 | const char *from, size_t len) | 
|---|
| 579 | { | 
|---|
| 580 | char *to = kmap_local_folio(folio, offset); | 
|---|
| 581 |  | 
|---|
| 582 | VM_BUG_ON(offset + len > folio_size(folio)); | 
|---|
| 583 |  | 
|---|
| 584 | if (folio_test_partial_kmap(folio)) { | 
|---|
| 585 | size_t max = PAGE_SIZE - offset_in_page(offset); | 
|---|
| 586 |  | 
|---|
| 587 | while (len > max) { | 
|---|
| 588 | memcpy(to, from, len: max); | 
|---|
| 589 | kunmap_local(to); | 
|---|
| 590 | len -= max; | 
|---|
| 591 | from += max; | 
|---|
| 592 | offset += max; | 
|---|
| 593 | max = PAGE_SIZE; | 
|---|
| 594 | to = kmap_local_folio(folio, offset); | 
|---|
| 595 | } | 
|---|
| 596 | } | 
|---|
| 597 |  | 
|---|
| 598 | memcpy(to, from, len); | 
|---|
| 599 | to = folio_zero_tail(folio, offset: offset + len, kaddr: to + len); | 
|---|
| 600 | kunmap_local(to); | 
|---|
| 601 | } | 
|---|
| 602 |  | 
|---|
| 603 | /** | 
|---|
| 604 | * memcpy_from_file_folio - Copy some bytes from a file folio. | 
|---|
| 605 | * @to: The destination buffer. | 
|---|
| 606 | * @folio: The folio to copy from. | 
|---|
| 607 | * @pos: The position in the file. | 
|---|
| 608 | * @len: The maximum number of bytes to copy. | 
|---|
| 609 | * | 
|---|
| 610 | * Copy up to @len bytes from this folio.  This may be limited by PAGE_SIZE | 
|---|
| 611 | * if the folio comes from HIGHMEM, and by the size of the folio. | 
|---|
| 612 | * | 
|---|
| 613 | * Return: The number of bytes copied from the folio. | 
|---|
| 614 | */ | 
|---|
| 615 | static inline size_t memcpy_from_file_folio(char *to, struct folio *folio, | 
|---|
| 616 | loff_t pos, size_t len) | 
|---|
| 617 | { | 
|---|
| 618 | size_t offset = offset_in_folio(folio, pos); | 
|---|
| 619 | char *from = kmap_local_folio(folio, offset); | 
|---|
| 620 |  | 
|---|
| 621 | if (folio_test_partial_kmap(folio)) { | 
|---|
| 622 | offset = offset_in_page(offset); | 
|---|
| 623 | len = min_t(size_t, len, PAGE_SIZE - offset); | 
|---|
| 624 | } else | 
|---|
| 625 | len = min(len, folio_size(folio) - offset); | 
|---|
| 626 |  | 
|---|
| 627 | memcpy(to, from, len); | 
|---|
| 628 | kunmap_local(from); | 
|---|
| 629 |  | 
|---|
| 630 | return len; | 
|---|
| 631 | } | 
|---|
| 632 |  | 
|---|
| 633 | /** | 
|---|
| 634 | * folio_zero_segments() - Zero two byte ranges in a folio. | 
|---|
| 635 | * @folio: The folio to write to. | 
|---|
| 636 | * @start1: The first byte to zero. | 
|---|
| 637 | * @xend1: One more than the last byte in the first range. | 
|---|
| 638 | * @start2: The first byte to zero in the second range. | 
|---|
| 639 | * @xend2: One more than the last byte in the second range. | 
|---|
| 640 | */ | 
|---|
| 641 | static inline void folio_zero_segments(struct folio *folio, | 
|---|
| 642 | size_t start1, size_t xend1, size_t start2, size_t xend2) | 
|---|
| 643 | { | 
|---|
| 644 | zero_user_segments(page: &folio->page, start1, end1: xend1, start2, end2: xend2); | 
|---|
| 645 | } | 
|---|
| 646 |  | 
|---|
| 647 | /** | 
|---|
| 648 | * folio_zero_segment() - Zero a byte range in a folio. | 
|---|
| 649 | * @folio: The folio to write to. | 
|---|
| 650 | * @start: The first byte to zero. | 
|---|
| 651 | * @xend: One more than the last byte to zero. | 
|---|
| 652 | */ | 
|---|
| 653 | static inline void folio_zero_segment(struct folio *folio, | 
|---|
| 654 | size_t start, size_t xend) | 
|---|
| 655 | { | 
|---|
| 656 | zero_user_segments(page: &folio->page, start1: start, end1: xend, start2: 0, end2: 0); | 
|---|
| 657 | } | 
|---|
| 658 |  | 
|---|
| 659 | /** | 
|---|
| 660 | * folio_zero_range() - Zero a byte range in a folio. | 
|---|
| 661 | * @folio: The folio to write to. | 
|---|
| 662 | * @start: The first byte to zero. | 
|---|
| 663 | * @length: The number of bytes to zero. | 
|---|
| 664 | */ | 
|---|
| 665 | static inline void folio_zero_range(struct folio *folio, | 
|---|
| 666 | size_t start, size_t length) | 
|---|
| 667 | { | 
|---|
| 668 | zero_user_segments(page: &folio->page, start1: start, end1: start + length, start2: 0, end2: 0); | 
|---|
| 669 | } | 
|---|
| 670 |  | 
|---|
| 671 | /** | 
|---|
| 672 | * folio_release_kmap - Unmap a folio and drop a refcount. | 
|---|
| 673 | * @folio: The folio to release. | 
|---|
| 674 | * @addr: The address previously returned by a call to kmap_local_folio(). | 
|---|
| 675 | * | 
|---|
| 676 | * It is common, eg in directory handling to kmap a folio.  This function | 
|---|
| 677 | * unmaps the folio and drops the refcount that was being held to keep the | 
|---|
| 678 | * folio alive while we accessed it. | 
|---|
| 679 | */ | 
|---|
| 680 | static inline void folio_release_kmap(struct folio *folio, void *addr) | 
|---|
| 681 | { | 
|---|
| 682 | kunmap_local(addr); | 
|---|
| 683 | folio_put(folio); | 
|---|
| 684 | } | 
|---|
| 685 | #endif /* _LINUX_HIGHMEM_H */ | 
|---|
| 686 |  | 
|---|