| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _LINUX_SWAPOPS_H | 
|---|
| 3 | #define _LINUX_SWAPOPS_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/radix-tree.h> | 
|---|
| 6 | #include <linux/bug.h> | 
|---|
| 7 | #include <linux/mm_types.h> | 
|---|
| 8 |  | 
|---|
| 9 | #ifdef CONFIG_MMU | 
|---|
| 10 |  | 
|---|
| 11 | #ifdef CONFIG_SWAP | 
|---|
| 12 | #include <linux/swapfile.h> | 
|---|
| 13 | #endif	/* CONFIG_SWAP */ | 
|---|
| 14 |  | 
|---|
| 15 | /* | 
|---|
| 16 | * swapcache pages are stored in the swapper_space radix tree.  We want to | 
|---|
| 17 | * get good packing density in that tree, so the index should be dense in | 
|---|
| 18 | * the low-order bits. | 
|---|
| 19 | * | 
|---|
| 20 | * We arrange the `type' and `offset' fields so that `type' is at the six | 
|---|
| 21 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the | 
|---|
| 22 | * remaining bits.  Although `type' itself needs only five bits, we allow for | 
|---|
| 23 | * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry(). | 
|---|
| 24 | * | 
|---|
| 25 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. | 
|---|
| 26 | */ | 
|---|
| 27 | #define SWP_TYPE_SHIFT	(BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) | 
|---|
| 28 | #define SWP_OFFSET_MASK	((1UL << SWP_TYPE_SHIFT) - 1) | 
|---|
| 29 |  | 
|---|
| 30 | /* | 
|---|
| 31 | * Definitions only for PFN swap entries (see is_pfn_swap_entry()).  To | 
|---|
| 32 | * store PFN, we only need SWP_PFN_BITS bits.  Each of the pfn swap entries | 
|---|
| 33 | * can use the extra bits to store other information besides PFN. | 
|---|
| 34 | */ | 
|---|
| 35 | #ifdef MAX_PHYSMEM_BITS | 
|---|
| 36 | #define SWP_PFN_BITS		(MAX_PHYSMEM_BITS - PAGE_SHIFT) | 
|---|
| 37 | #else  /* MAX_PHYSMEM_BITS */ | 
|---|
| 38 | #define SWP_PFN_BITS		min_t(int, \ | 
|---|
| 39 | sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \ | 
|---|
| 40 | SWP_TYPE_SHIFT) | 
|---|
| 41 | #endif	/* MAX_PHYSMEM_BITS */ | 
|---|
| 42 | #define SWP_PFN_MASK		(BIT(SWP_PFN_BITS) - 1) | 
|---|
| 43 |  | 
|---|
| 44 | /** | 
|---|
| 45 | * Migration swap entry specific bitfield definitions.  Layout: | 
|---|
| 46 | * | 
|---|
| 47 | *   |----------+--------------------| | 
|---|
| 48 | *   | swp_type | swp_offset         | | 
|---|
| 49 | *   |----------+--------+-+-+-------| | 
|---|
| 50 | *   |          | resv   |D|A|  PFN  | | 
|---|
| 51 | *   |----------+--------+-+-+-------| | 
|---|
| 52 | * | 
|---|
| 53 | * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A) | 
|---|
| 54 | * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D) | 
|---|
| 55 | * | 
|---|
| 56 | * Note: A/D bits will be stored in migration entries iff there're enough | 
|---|
| 57 | * free bits in arch specific swp offset.  By default we'll ignore A/D bits | 
|---|
| 58 | * when migrating a page.  Please refer to migration_entry_supports_ad() | 
|---|
| 59 | * for more information.  If there're more bits besides PFN and A/D bits, | 
|---|
| 60 | * they should be reserved and always be zeros. | 
|---|
| 61 | */ | 
|---|
| 62 | #define SWP_MIG_YOUNG_BIT		(SWP_PFN_BITS) | 
|---|
| 63 | #define SWP_MIG_DIRTY_BIT		(SWP_PFN_BITS + 1) | 
|---|
| 64 | #define SWP_MIG_TOTAL_BITS		(SWP_PFN_BITS + 2) | 
|---|
| 65 |  | 
|---|
| 66 | #define SWP_MIG_YOUNG			BIT(SWP_MIG_YOUNG_BIT) | 
|---|
| 67 | #define SWP_MIG_DIRTY			BIT(SWP_MIG_DIRTY_BIT) | 
|---|
| 68 |  | 
|---|
| 69 | static inline bool is_pfn_swap_entry(swp_entry_t entry); | 
|---|
| 70 |  | 
|---|
| 71 | /* Clear all flags but only keep swp_entry_t related information */ | 
|---|
| 72 | static inline pte_t pte_swp_clear_flags(pte_t pte) | 
|---|
| 73 | { | 
|---|
| 74 | if (pte_swp_exclusive(pte)) | 
|---|
| 75 | pte = pte_swp_clear_exclusive(pte); | 
|---|
| 76 | if (pte_swp_soft_dirty(pte)) | 
|---|
| 77 | pte = pte_swp_clear_soft_dirty(pte); | 
|---|
| 78 | if (pte_swp_uffd_wp(pte)) | 
|---|
| 79 | pte = pte_swp_clear_uffd_wp(pte); | 
|---|
| 80 | return pte; | 
|---|
| 81 | } | 
|---|
| 82 |  | 
|---|
| 83 | /* | 
|---|
| 84 | * Store a type+offset into a swp_entry_t in an arch-independent format | 
|---|
| 85 | */ | 
|---|
| 86 | static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) | 
|---|
| 87 | { | 
|---|
| 88 | swp_entry_t ret; | 
|---|
| 89 |  | 
|---|
| 90 | ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK); | 
|---|
| 91 | return ret; | 
|---|
| 92 | } | 
|---|
| 93 |  | 
|---|
| 94 | /* | 
|---|
| 95 | * Extract the `type' field from a swp_entry_t.  The swp_entry_t is in | 
|---|
| 96 | * arch-independent format | 
|---|
| 97 | */ | 
|---|
| 98 | static inline unsigned swp_type(swp_entry_t entry) | 
|---|
| 99 | { | 
|---|
| 100 | return (entry.val >> SWP_TYPE_SHIFT); | 
|---|
| 101 | } | 
|---|
| 102 |  | 
|---|
| 103 | /* | 
|---|
| 104 | * Extract the `offset' field from a swp_entry_t.  The swp_entry_t is in | 
|---|
| 105 | * arch-independent format | 
|---|
| 106 | */ | 
|---|
| 107 | static inline pgoff_t swp_offset(swp_entry_t entry) | 
|---|
| 108 | { | 
|---|
| 109 | return entry.val & SWP_OFFSET_MASK; | 
|---|
| 110 | } | 
|---|
| 111 |  | 
|---|
| 112 | /* | 
|---|
| 113 | * This should only be called upon a pfn swap entry to get the PFN stored | 
|---|
| 114 | * in the swap entry.  Please refers to is_pfn_swap_entry() for definition | 
|---|
| 115 | * of pfn swap entry. | 
|---|
| 116 | */ | 
|---|
| 117 | static inline unsigned long swp_offset_pfn(swp_entry_t entry) | 
|---|
| 118 | { | 
|---|
| 119 | VM_BUG_ON(!is_pfn_swap_entry(entry)); | 
|---|
| 120 | return swp_offset(entry) & SWP_PFN_MASK; | 
|---|
| 121 | } | 
|---|
| 122 |  | 
|---|
| 123 | /* check whether a pte points to a swap entry */ | 
|---|
| 124 | static inline int is_swap_pte(pte_t pte) | 
|---|
| 125 | { | 
|---|
| 126 | return !pte_none(pte) && !pte_present(a: pte); | 
|---|
| 127 | } | 
|---|
| 128 |  | 
|---|
| 129 | /* | 
|---|
| 130 | * Convert the arch-dependent pte representation of a swp_entry_t into an | 
|---|
| 131 | * arch-independent swp_entry_t. | 
|---|
| 132 | */ | 
|---|
| 133 | static inline swp_entry_t pte_to_swp_entry(pte_t pte) | 
|---|
| 134 | { | 
|---|
| 135 | swp_entry_t arch_entry; | 
|---|
| 136 |  | 
|---|
| 137 | pte = pte_swp_clear_flags(pte); | 
|---|
| 138 | arch_entry = __pte_to_swp_entry(pte); | 
|---|
| 139 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); | 
|---|
| 140 | } | 
|---|
| 141 |  | 
|---|
| 142 | /* | 
|---|
| 143 | * Convert the arch-independent representation of a swp_entry_t into the | 
|---|
| 144 | * arch-dependent pte representation. | 
|---|
| 145 | */ | 
|---|
| 146 | static inline pte_t swp_entry_to_pte(swp_entry_t entry) | 
|---|
| 147 | { | 
|---|
| 148 | swp_entry_t arch_entry; | 
|---|
| 149 |  | 
|---|
| 150 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); | 
|---|
| 151 | return __swp_entry_to_pte(arch_entry); | 
|---|
| 152 | } | 
|---|
| 153 |  | 
|---|
| 154 | static inline swp_entry_t radix_to_swp_entry(void *arg) | 
|---|
| 155 | { | 
|---|
| 156 | swp_entry_t entry; | 
|---|
| 157 |  | 
|---|
| 158 | entry.val = xa_to_value(entry: arg); | 
|---|
| 159 | return entry; | 
|---|
| 160 | } | 
|---|
| 161 |  | 
|---|
| 162 | static inline void *swp_to_radix_entry(swp_entry_t entry) | 
|---|
| 163 | { | 
|---|
| 164 | return xa_mk_value(v: entry.val); | 
|---|
| 165 | } | 
|---|
| 166 |  | 
|---|
| 167 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) | 
|---|
| 168 | static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) | 
|---|
| 169 | { | 
|---|
| 170 | return swp_entry(SWP_DEVICE_READ, offset); | 
|---|
| 171 | } | 
|---|
| 172 |  | 
|---|
| 173 | static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) | 
|---|
| 174 | { | 
|---|
| 175 | return swp_entry(SWP_DEVICE_WRITE, offset); | 
|---|
| 176 | } | 
|---|
| 177 |  | 
|---|
| 178 | static inline bool is_device_private_entry(swp_entry_t entry) | 
|---|
| 179 | { | 
|---|
| 180 | int type = swp_type(entry); | 
|---|
| 181 | return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; | 
|---|
| 182 | } | 
|---|
| 183 |  | 
|---|
| 184 | static inline bool is_writable_device_private_entry(swp_entry_t entry) | 
|---|
| 185 | { | 
|---|
| 186 | return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); | 
|---|
| 187 | } | 
|---|
| 188 |  | 
|---|
| 189 | static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) | 
|---|
| 190 | { | 
|---|
| 191 | return swp_entry(SWP_DEVICE_EXCLUSIVE, offset); | 
|---|
| 192 | } | 
|---|
| 193 |  | 
|---|
| 194 | static inline bool is_device_exclusive_entry(swp_entry_t entry) | 
|---|
| 195 | { | 
|---|
| 196 | return swp_type(entry) == SWP_DEVICE_EXCLUSIVE; | 
|---|
| 197 | } | 
|---|
| 198 |  | 
|---|
| 199 | #else /* CONFIG_DEVICE_PRIVATE */ | 
|---|
| 200 | static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) | 
|---|
| 201 | { | 
|---|
| 202 | return swp_entry(type: 0, offset: 0); | 
|---|
| 203 | } | 
|---|
| 204 |  | 
|---|
| 205 | static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) | 
|---|
| 206 | { | 
|---|
| 207 | return swp_entry(type: 0, offset: 0); | 
|---|
| 208 | } | 
|---|
| 209 |  | 
|---|
| 210 | static inline bool is_device_private_entry(swp_entry_t entry) | 
|---|
| 211 | { | 
|---|
| 212 | return false; | 
|---|
| 213 | } | 
|---|
| 214 |  | 
|---|
| 215 | static inline bool is_writable_device_private_entry(swp_entry_t entry) | 
|---|
| 216 | { | 
|---|
| 217 | return false; | 
|---|
| 218 | } | 
|---|
| 219 |  | 
|---|
| 220 | static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) | 
|---|
| 221 | { | 
|---|
| 222 | return swp_entry(type: 0, offset: 0); | 
|---|
| 223 | } | 
|---|
| 224 |  | 
|---|
| 225 | static inline bool is_device_exclusive_entry(swp_entry_t entry) | 
|---|
| 226 | { | 
|---|
| 227 | return false; | 
|---|
| 228 | } | 
|---|
| 229 |  | 
|---|
| 230 | #endif /* CONFIG_DEVICE_PRIVATE */ | 
|---|
| 231 |  | 
|---|
| 232 | #ifdef CONFIG_MIGRATION | 
|---|
| 233 | static inline int is_migration_entry(swp_entry_t entry) | 
|---|
| 234 | { | 
|---|
| 235 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ || | 
|---|
| 236 | swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE || | 
|---|
| 237 | swp_type(entry) == SWP_MIGRATION_WRITE); | 
|---|
| 238 | } | 
|---|
| 239 |  | 
|---|
| 240 | static inline int is_writable_migration_entry(swp_entry_t entry) | 
|---|
| 241 | { | 
|---|
| 242 | return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); | 
|---|
| 243 | } | 
|---|
| 244 |  | 
|---|
| 245 | static inline int is_readable_migration_entry(swp_entry_t entry) | 
|---|
| 246 | { | 
|---|
| 247 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ); | 
|---|
| 248 | } | 
|---|
| 249 |  | 
|---|
| 250 | static inline int is_readable_exclusive_migration_entry(swp_entry_t entry) | 
|---|
| 251 | { | 
|---|
| 252 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE); | 
|---|
| 253 | } | 
|---|
| 254 |  | 
|---|
| 255 | static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) | 
|---|
| 256 | { | 
|---|
| 257 | return swp_entry(SWP_MIGRATION_READ, offset); | 
|---|
| 258 | } | 
|---|
| 259 |  | 
|---|
| 260 | static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) | 
|---|
| 261 | { | 
|---|
| 262 | return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset); | 
|---|
| 263 | } | 
|---|
| 264 |  | 
|---|
| 265 | static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) | 
|---|
| 266 | { | 
|---|
| 267 | return swp_entry(SWP_MIGRATION_WRITE, offset); | 
|---|
| 268 | } | 
|---|
| 269 |  | 
|---|
| 270 | /* | 
|---|
| 271 | * Returns whether the host has large enough swap offset field to support | 
|---|
| 272 | * carrying over pgtable A/D bits for page migrations.  The result is | 
|---|
| 273 | * pretty much arch specific. | 
|---|
| 274 | */ | 
|---|
| 275 | static inline bool migration_entry_supports_ad(void) | 
|---|
| 276 | { | 
|---|
| 277 | #ifdef CONFIG_SWAP | 
|---|
| 278 | return swap_migration_ad_supported; | 
|---|
| 279 | #else  /* CONFIG_SWAP */ | 
|---|
| 280 | return false; | 
|---|
| 281 | #endif	/* CONFIG_SWAP */ | 
|---|
| 282 | } | 
|---|
| 283 |  | 
|---|
| 284 | static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) | 
|---|
| 285 | { | 
|---|
| 286 | if (migration_entry_supports_ad()) | 
|---|
| 287 | return swp_entry(type: swp_type(entry), | 
|---|
| 288 | offset: swp_offset(entry) | SWP_MIG_YOUNG); | 
|---|
| 289 | return entry; | 
|---|
| 290 | } | 
|---|
| 291 |  | 
|---|
| 292 | static inline bool is_migration_entry_young(swp_entry_t entry) | 
|---|
| 293 | { | 
|---|
| 294 | if (migration_entry_supports_ad()) | 
|---|
| 295 | return swp_offset(entry) & SWP_MIG_YOUNG; | 
|---|
| 296 | /* Keep the old behavior of aging page after migration */ | 
|---|
| 297 | return false; | 
|---|
| 298 | } | 
|---|
| 299 |  | 
|---|
| 300 | static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) | 
|---|
| 301 | { | 
|---|
| 302 | if (migration_entry_supports_ad()) | 
|---|
| 303 | return swp_entry(type: swp_type(entry), | 
|---|
| 304 | offset: swp_offset(entry) | SWP_MIG_DIRTY); | 
|---|
| 305 | return entry; | 
|---|
| 306 | } | 
|---|
| 307 |  | 
|---|
| 308 | static inline bool is_migration_entry_dirty(swp_entry_t entry) | 
|---|
| 309 | { | 
|---|
| 310 | if (migration_entry_supports_ad()) | 
|---|
| 311 | return swp_offset(entry) & SWP_MIG_DIRTY; | 
|---|
| 312 | /* Keep the old behavior of clean page after migration */ | 
|---|
| 313 | return false; | 
|---|
| 314 | } | 
|---|
| 315 |  | 
|---|
| 316 | extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | 
|---|
| 317 | unsigned long address); | 
|---|
| 318 | extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte); | 
|---|
| 319 | #else  /* CONFIG_MIGRATION */ | 
|---|
| 320 | static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) | 
|---|
| 321 | { | 
|---|
| 322 | return swp_entry(0, 0); | 
|---|
| 323 | } | 
|---|
| 324 |  | 
|---|
| 325 | static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) | 
|---|
| 326 | { | 
|---|
| 327 | return swp_entry(0, 0); | 
|---|
| 328 | } | 
|---|
| 329 |  | 
|---|
| 330 | static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) | 
|---|
| 331 | { | 
|---|
| 332 | return swp_entry(0, 0); | 
|---|
| 333 | } | 
|---|
| 334 |  | 
|---|
| 335 | static inline int is_migration_entry(swp_entry_t swp) | 
|---|
| 336 | { | 
|---|
| 337 | return 0; | 
|---|
| 338 | } | 
|---|
| 339 |  | 
|---|
| 340 | static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | 
|---|
| 341 | unsigned long address) { } | 
|---|
| 342 | static inline void migration_entry_wait_huge(struct vm_area_struct *vma, | 
|---|
| 343 | unsigned long addr, pte_t *pte) { } | 
|---|
| 344 | static inline int is_writable_migration_entry(swp_entry_t entry) | 
|---|
| 345 | { | 
|---|
| 346 | return 0; | 
|---|
| 347 | } | 
|---|
| 348 | static inline int is_readable_migration_entry(swp_entry_t entry) | 
|---|
| 349 | { | 
|---|
| 350 | return 0; | 
|---|
| 351 | } | 
|---|
| 352 |  | 
|---|
| 353 | static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) | 
|---|
| 354 | { | 
|---|
| 355 | return entry; | 
|---|
| 356 | } | 
|---|
| 357 |  | 
|---|
| 358 | static inline bool is_migration_entry_young(swp_entry_t entry) | 
|---|
| 359 | { | 
|---|
| 360 | return false; | 
|---|
| 361 | } | 
|---|
| 362 |  | 
|---|
| 363 | static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) | 
|---|
| 364 | { | 
|---|
| 365 | return entry; | 
|---|
| 366 | } | 
|---|
| 367 |  | 
|---|
| 368 | static inline bool is_migration_entry_dirty(swp_entry_t entry) | 
|---|
| 369 | { | 
|---|
| 370 | return false; | 
|---|
| 371 | } | 
|---|
| 372 | #endif	/* CONFIG_MIGRATION */ | 
|---|
| 373 |  | 
|---|
| 374 | #ifdef CONFIG_MEMORY_FAILURE | 
|---|
| 375 |  | 
|---|
| 376 | /* | 
|---|
| 377 | * Support for hardware poisoned pages | 
|---|
| 378 | */ | 
|---|
| 379 | static inline swp_entry_t make_hwpoison_entry(struct page *page) | 
|---|
| 380 | { | 
|---|
| 381 | BUG_ON(!PageLocked(page)); | 
|---|
| 382 | return swp_entry(SWP_HWPOISON, page_to_pfn(page)); | 
|---|
| 383 | } | 
|---|
| 384 |  | 
|---|
| 385 | static inline int is_hwpoison_entry(swp_entry_t entry) | 
|---|
| 386 | { | 
|---|
| 387 | return swp_type(entry) == SWP_HWPOISON; | 
|---|
| 388 | } | 
|---|
| 389 |  | 
|---|
| 390 | #else | 
|---|
| 391 |  | 
|---|
| 392 | static inline swp_entry_t make_hwpoison_entry(struct page *page) | 
|---|
| 393 | { | 
|---|
| 394 | return swp_entry(type: 0, offset: 0); | 
|---|
| 395 | } | 
|---|
| 396 |  | 
|---|
| 397 | static inline int is_hwpoison_entry(swp_entry_t swp) | 
|---|
| 398 | { | 
|---|
| 399 | return 0; | 
|---|
| 400 | } | 
|---|
| 401 | #endif | 
|---|
| 402 |  | 
|---|
| 403 | typedef unsigned long pte_marker; | 
|---|
| 404 |  | 
|---|
| 405 | #define  PTE_MARKER_UFFD_WP			BIT(0) | 
|---|
| 406 | /* | 
|---|
| 407 | * "Poisoned" here is meant in the very general sense of "future accesses are | 
|---|
| 408 | * invalid", instead of referring very specifically to hardware memory errors. | 
|---|
| 409 | * This marker is meant to represent any of various different causes of this. | 
|---|
| 410 | * | 
|---|
| 411 | * Note that, when encountered by the faulting logic, PTEs with this marker will | 
|---|
| 412 | * result in VM_FAULT_HWPOISON and thus regardless trigger hardware memory error | 
|---|
| 413 | * logic. | 
|---|
| 414 | */ | 
|---|
| 415 | #define  PTE_MARKER_POISONED			BIT(1) | 
|---|
| 416 | /* | 
|---|
| 417 | * Indicates that, on fault, this PTE will case a SIGSEGV signal to be | 
|---|
| 418 | * sent. This means guard markers behave in effect as if the region were mapped | 
|---|
| 419 | * PROT_NONE, rather than if they were a memory hole or equivalent. | 
|---|
| 420 | */ | 
|---|
| 421 | #define  PTE_MARKER_GUARD			BIT(2) | 
|---|
| 422 | #define  PTE_MARKER_MASK			(BIT(3) - 1) | 
|---|
| 423 |  | 
|---|
| 424 | static inline swp_entry_t make_pte_marker_entry(pte_marker marker) | 
|---|
| 425 | { | 
|---|
| 426 | return swp_entry(SWP_PTE_MARKER, offset: marker); | 
|---|
| 427 | } | 
|---|
| 428 |  | 
|---|
| 429 | static inline bool is_pte_marker_entry(swp_entry_t entry) | 
|---|
| 430 | { | 
|---|
| 431 | return swp_type(entry) == SWP_PTE_MARKER; | 
|---|
| 432 | } | 
|---|
| 433 |  | 
|---|
| 434 | static inline pte_marker pte_marker_get(swp_entry_t entry) | 
|---|
| 435 | { | 
|---|
| 436 | return swp_offset(entry) & PTE_MARKER_MASK; | 
|---|
| 437 | } | 
|---|
| 438 |  | 
|---|
| 439 | static inline bool is_pte_marker(pte_t pte) | 
|---|
| 440 | { | 
|---|
| 441 | return is_swap_pte(pte) && is_pte_marker_entry(entry: pte_to_swp_entry(pte)); | 
|---|
| 442 | } | 
|---|
| 443 |  | 
|---|
| 444 | static inline pte_t make_pte_marker(pte_marker marker) | 
|---|
| 445 | { | 
|---|
| 446 | return swp_entry_to_pte(entry: make_pte_marker_entry(marker)); | 
|---|
| 447 | } | 
|---|
| 448 |  | 
|---|
| 449 | static inline swp_entry_t make_poisoned_swp_entry(void) | 
|---|
| 450 | { | 
|---|
| 451 | return make_pte_marker_entry(PTE_MARKER_POISONED); | 
|---|
| 452 | } | 
|---|
| 453 |  | 
|---|
| 454 | static inline int is_poisoned_swp_entry(swp_entry_t entry) | 
|---|
| 455 | { | 
|---|
| 456 | return is_pte_marker_entry(entry) && | 
|---|
| 457 | (pte_marker_get(entry) & PTE_MARKER_POISONED); | 
|---|
| 458 |  | 
|---|
| 459 | } | 
|---|
| 460 |  | 
|---|
| 461 | static inline swp_entry_t make_guard_swp_entry(void) | 
|---|
| 462 | { | 
|---|
| 463 | return make_pte_marker_entry(PTE_MARKER_GUARD); | 
|---|
| 464 | } | 
|---|
| 465 |  | 
|---|
| 466 | static inline int is_guard_swp_entry(swp_entry_t entry) | 
|---|
| 467 | { | 
|---|
| 468 | return is_pte_marker_entry(entry) && | 
|---|
| 469 | (pte_marker_get(entry) & PTE_MARKER_GUARD); | 
|---|
| 470 | } | 
|---|
| 471 |  | 
|---|
| 472 | /* | 
|---|
| 473 | * This is a special version to check pte_none() just to cover the case when | 
|---|
| 474 | * the pte is a pte marker.  It existed because in many cases the pte marker | 
|---|
| 475 | * should be seen as a none pte; it's just that we have stored some information | 
|---|
| 476 | * onto the none pte so it becomes not-none any more. | 
|---|
| 477 | * | 
|---|
| 478 | * It should be used when the pte is file-backed, ram-based and backing | 
|---|
| 479 | * userspace pages, like shmem.  It is not needed upon pgtables that do not | 
|---|
| 480 | * support pte markers at all.  For example, it's not needed on anonymous | 
|---|
| 481 | * memory, kernel-only memory (including when the system is during-boot), | 
|---|
| 482 | * non-ram based generic file-system.  It's fine to be used even there, but the | 
|---|
| 483 | * extra pte marker check will be pure overhead. | 
|---|
| 484 | */ | 
|---|
| 485 | static inline int pte_none_mostly(pte_t pte) | 
|---|
| 486 | { | 
|---|
| 487 | return pte_none(pte) || is_pte_marker(pte); | 
|---|
| 488 | } | 
|---|
| 489 |  | 
|---|
| 490 | static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry) | 
|---|
| 491 | { | 
|---|
| 492 | struct page *p = pfn_to_page(swp_offset_pfn(entry)); | 
|---|
| 493 |  | 
|---|
| 494 | /* | 
|---|
| 495 | * Any use of migration entries may only occur while the | 
|---|
| 496 | * corresponding page is locked | 
|---|
| 497 | */ | 
|---|
| 498 | BUG_ON(is_migration_entry(entry) && !PageLocked(p)); | 
|---|
| 499 |  | 
|---|
| 500 | return p; | 
|---|
| 501 | } | 
|---|
| 502 |  | 
|---|
| 503 | static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry) | 
|---|
| 504 | { | 
|---|
| 505 | struct folio *folio = pfn_folio(pfn: swp_offset_pfn(entry)); | 
|---|
| 506 |  | 
|---|
| 507 | /* | 
|---|
| 508 | * Any use of migration entries may only occur while the | 
|---|
| 509 | * corresponding folio is locked | 
|---|
| 510 | */ | 
|---|
| 511 | BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio)); | 
|---|
| 512 |  | 
|---|
| 513 | return folio; | 
|---|
| 514 | } | 
|---|
| 515 |  | 
|---|
| 516 | /* | 
|---|
| 517 | * A pfn swap entry is a special type of swap entry that always has a pfn stored | 
|---|
| 518 | * in the swap offset. They can either be used to represent unaddressable device | 
|---|
| 519 | * memory, to restrict access to a page undergoing migration or to represent a | 
|---|
| 520 | * pfn which has been hwpoisoned and unmapped. | 
|---|
| 521 | */ | 
|---|
| 522 | static inline bool is_pfn_swap_entry(swp_entry_t entry) | 
|---|
| 523 | { | 
|---|
| 524 | /* Make sure the swp offset can always store the needed fields */ | 
|---|
| 525 | BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS); | 
|---|
| 526 |  | 
|---|
| 527 | return is_migration_entry(entry) || is_device_private_entry(entry) || | 
|---|
| 528 | is_device_exclusive_entry(entry) || is_hwpoison_entry(swp: entry); | 
|---|
| 529 | } | 
|---|
| 530 |  | 
|---|
| 531 | struct page_vma_mapped_walk; | 
|---|
| 532 |  | 
|---|
| 533 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | 
|---|
| 534 | extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, | 
|---|
| 535 | struct page *page); | 
|---|
| 536 |  | 
|---|
| 537 | extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, | 
|---|
| 538 | struct page *new); | 
|---|
| 539 |  | 
|---|
| 540 | extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); | 
|---|
| 541 |  | 
|---|
| 542 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) | 
|---|
| 543 | { | 
|---|
| 544 | swp_entry_t arch_entry; | 
|---|
| 545 |  | 
|---|
| 546 | if (pmd_swp_soft_dirty(pmd)) | 
|---|
| 547 | pmd = pmd_swp_clear_soft_dirty(pmd); | 
|---|
| 548 | if (pmd_swp_uffd_wp(pmd)) | 
|---|
| 549 | pmd = pmd_swp_clear_uffd_wp(pmd); | 
|---|
| 550 | arch_entry = __pmd_to_swp_entry(pmd); | 
|---|
| 551 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); | 
|---|
| 552 | } | 
|---|
| 553 |  | 
|---|
| 554 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) | 
|---|
| 555 | { | 
|---|
| 556 | swp_entry_t arch_entry; | 
|---|
| 557 |  | 
|---|
| 558 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); | 
|---|
| 559 | return __swp_entry_to_pmd(arch_entry); | 
|---|
| 560 | } | 
|---|
| 561 |  | 
|---|
| 562 | static inline int is_pmd_migration_entry(pmd_t pmd) | 
|---|
| 563 | { | 
|---|
| 564 | return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); | 
|---|
| 565 | } | 
|---|
| 566 | #else  /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ | 
|---|
| 567 | static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, | 
|---|
| 568 | struct page *page) | 
|---|
| 569 | { | 
|---|
| 570 | BUILD_BUG(); | 
|---|
| 571 | } | 
|---|
| 572 |  | 
|---|
| 573 | static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, | 
|---|
| 574 | struct page *new) | 
|---|
| 575 | { | 
|---|
| 576 | BUILD_BUG(); | 
|---|
| 577 | } | 
|---|
| 578 |  | 
|---|
| 579 | static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } | 
|---|
| 580 |  | 
|---|
| 581 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) | 
|---|
| 582 | { | 
|---|
| 583 | return swp_entry(type: 0, offset: 0); | 
|---|
| 584 | } | 
|---|
| 585 |  | 
|---|
| 586 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) | 
|---|
| 587 | { | 
|---|
| 588 | return __pmd(0); | 
|---|
| 589 | } | 
|---|
| 590 |  | 
|---|
| 591 | static inline int is_pmd_migration_entry(pmd_t pmd) | 
|---|
| 592 | { | 
|---|
| 593 | return 0; | 
|---|
| 594 | } | 
|---|
| 595 | #endif  /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ | 
|---|
| 596 |  | 
|---|
| 597 | static inline int non_swap_entry(swp_entry_t entry) | 
|---|
| 598 | { | 
|---|
| 599 | return swp_type(entry) >= MAX_SWAPFILES; | 
|---|
| 600 | } | 
|---|
| 601 |  | 
|---|
| 602 | #endif /* CONFIG_MMU */ | 
|---|
| 603 | #endif /* _LINUX_SWAPOPS_H */ | 
|---|
| 604 |  | 
|---|