| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _MM_SWAP_TABLE_H |
| 3 | #define _MM_SWAP_TABLE_H |
| 4 | |
| 5 | #include <linux/rcupdate.h> |
| 6 | #include <linux/atomic.h> |
| 7 | #include "swap.h" |
| 8 | |
| 9 | /* A typical flat array in each cluster as swap table */ |
| 10 | struct swap_table { |
| 11 | atomic_long_t entries[SWAPFILE_CLUSTER]; |
| 12 | }; |
| 13 | |
| 14 | #define SWP_TABLE_USE_PAGE (sizeof(struct swap_table) == PAGE_SIZE) |
| 15 | |
| 16 | /* |
| 17 | * A swap table entry represents the status of a swap slot on a swap |
| 18 | * (physical or virtual) device. The swap table in each cluster is a |
| 19 | * 1:1 map of the swap slots in this cluster. |
| 20 | * |
| 21 | * Each swap table entry could be a pointer (folio), a XA_VALUE |
| 22 | * (shadow), or NULL. |
| 23 | */ |
| 24 | |
| 25 | /* |
| 26 | * Helpers for casting one type of info into a swap table entry. |
| 27 | */ |
| 28 | static inline unsigned long null_to_swp_tb(void) |
| 29 | { |
| 30 | BUILD_BUG_ON(sizeof(unsigned long) != sizeof(atomic_long_t)); |
| 31 | return 0; |
| 32 | } |
| 33 | |
| 34 | static inline unsigned long folio_to_swp_tb(struct folio *folio) |
| 35 | { |
| 36 | BUILD_BUG_ON(sizeof(unsigned long) != sizeof(void *)); |
| 37 | return (unsigned long)folio; |
| 38 | } |
| 39 | |
| 40 | static inline unsigned long shadow_swp_to_tb(void *shadow) |
| 41 | { |
| 42 | BUILD_BUG_ON((BITS_PER_XA_VALUE + 1) != |
| 43 | BITS_PER_BYTE * sizeof(unsigned long)); |
| 44 | VM_WARN_ON_ONCE(shadow && !xa_is_value(shadow)); |
| 45 | return (unsigned long)shadow; |
| 46 | } |
| 47 | |
| 48 | /* |
| 49 | * Helpers for swap table entry type checking. |
| 50 | */ |
| 51 | static inline bool swp_tb_is_null(unsigned long swp_tb) |
| 52 | { |
| 53 | return !swp_tb; |
| 54 | } |
| 55 | |
| 56 | static inline bool swp_tb_is_folio(unsigned long swp_tb) |
| 57 | { |
| 58 | return !xa_is_value(entry: (void *)swp_tb) && !swp_tb_is_null(swp_tb); |
| 59 | } |
| 60 | |
| 61 | static inline bool swp_tb_is_shadow(unsigned long swp_tb) |
| 62 | { |
| 63 | return xa_is_value(entry: (void *)swp_tb); |
| 64 | } |
| 65 | |
| 66 | /* |
| 67 | * Helpers for retrieving info from swap table. |
| 68 | */ |
| 69 | static inline struct folio *swp_tb_to_folio(unsigned long swp_tb) |
| 70 | { |
| 71 | VM_WARN_ON(!swp_tb_is_folio(swp_tb)); |
| 72 | return (void *)swp_tb; |
| 73 | } |
| 74 | |
| 75 | static inline void *swp_tb_to_shadow(unsigned long swp_tb) |
| 76 | { |
| 77 | VM_WARN_ON(!swp_tb_is_shadow(swp_tb)); |
| 78 | return (void *)swp_tb; |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * Helpers for accessing or modifying the swap table of a cluster, |
| 83 | * the swap cluster must be locked. |
| 84 | */ |
| 85 | static inline void __swap_table_set(struct swap_cluster_info *ci, |
| 86 | unsigned int off, unsigned long swp_tb) |
| 87 | { |
| 88 | atomic_long_t *table = rcu_dereference_protected(ci->table, true); |
| 89 | |
| 90 | lockdep_assert_held(&ci->lock); |
| 91 | VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER); |
| 92 | atomic_long_set(v: &table[off], i: swp_tb); |
| 93 | } |
| 94 | |
| 95 | static inline unsigned long __swap_table_xchg(struct swap_cluster_info *ci, |
| 96 | unsigned int off, unsigned long swp_tb) |
| 97 | { |
| 98 | atomic_long_t *table = rcu_dereference_protected(ci->table, true); |
| 99 | |
| 100 | lockdep_assert_held(&ci->lock); |
| 101 | VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER); |
| 102 | /* Ordering is guaranteed by cluster lock, relax */ |
| 103 | return atomic_long_xchg_relaxed(v: &table[off], new: swp_tb); |
| 104 | } |
| 105 | |
| 106 | static inline unsigned long __swap_table_get(struct swap_cluster_info *ci, |
| 107 | unsigned int off) |
| 108 | { |
| 109 | atomic_long_t *table; |
| 110 | |
| 111 | VM_WARN_ON_ONCE(off >= SWAPFILE_CLUSTER); |
| 112 | table = rcu_dereference_check(ci->table, lockdep_is_held(&ci->lock)); |
| 113 | |
| 114 | return atomic_long_read(v: &table[off]); |
| 115 | } |
| 116 | |
| 117 | static inline unsigned long swap_table_get(struct swap_cluster_info *ci, |
| 118 | unsigned int off) |
| 119 | { |
| 120 | atomic_long_t *table; |
| 121 | unsigned long swp_tb; |
| 122 | |
| 123 | rcu_read_lock(); |
| 124 | table = rcu_dereference(ci->table); |
| 125 | swp_tb = table ? atomic_long_read(v: &table[off]) : null_to_swp_tb(); |
| 126 | rcu_read_unlock(); |
| 127 | |
| 128 | return swp_tb; |
| 129 | } |
| 130 | #endif |
| 131 | |