| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _LINUX_PAGEMAP_H | 
|---|
| 3 | #define _LINUX_PAGEMAP_H | 
|---|
| 4 |  | 
|---|
| 5 | /* | 
|---|
| 6 | * Copyright 1995 Linus Torvalds | 
|---|
| 7 | */ | 
|---|
| 8 | #include <linux/mm.h> | 
|---|
| 9 | #include <linux/fs.h> | 
|---|
| 10 | #include <linux/list.h> | 
|---|
| 11 | #include <linux/highmem.h> | 
|---|
| 12 | #include <linux/compiler.h> | 
|---|
| 13 | #include <linux/uaccess.h> | 
|---|
| 14 | #include <linux/gfp.h> | 
|---|
| 15 | #include <linux/bitops.h> | 
|---|
| 16 | #include <linux/hardirq.h> /* for in_interrupt() */ | 
|---|
| 17 | #include <linux/hugetlb_inline.h> | 
|---|
| 18 |  | 
|---|
| 19 | struct folio_batch; | 
|---|
| 20 |  | 
|---|
| 21 | unsigned long invalidate_mapping_pages(struct address_space *mapping, | 
|---|
| 22 | pgoff_t start, pgoff_t end); | 
|---|
| 23 |  | 
|---|
| 24 | static inline void invalidate_remote_inode(struct inode *inode) | 
|---|
| 25 | { | 
|---|
| 26 | if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | 
|---|
| 27 | S_ISLNK(inode->i_mode)) | 
|---|
| 28 | invalidate_mapping_pages(mapping: inode->i_mapping, start: 0, end: -1); | 
|---|
| 29 | } | 
|---|
| 30 | int invalidate_inode_pages2(struct address_space *mapping); | 
|---|
| 31 | int invalidate_inode_pages2_range(struct address_space *mapping, | 
|---|
| 32 | pgoff_t start, pgoff_t end); | 
|---|
| 33 | int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); | 
|---|
| 34 | void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); | 
|---|
| 35 | int filemap_invalidate_pages(struct address_space *mapping, | 
|---|
| 36 | loff_t pos, loff_t end, bool nowait); | 
|---|
| 37 |  | 
|---|
| 38 | int write_inode_now(struct inode *, int sync); | 
|---|
| 39 | int filemap_fdatawrite(struct address_space *); | 
|---|
| 40 | int filemap_flush(struct address_space *); | 
|---|
| 41 | int filemap_fdatawait_keep_errors(struct address_space *mapping); | 
|---|
| 42 | int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); | 
|---|
| 43 | int filemap_fdatawait_range_keep_errors(struct address_space *mapping, | 
|---|
| 44 | loff_t start_byte, loff_t end_byte); | 
|---|
| 45 | int filemap_invalidate_inode(struct inode *inode, bool flush, | 
|---|
| 46 | loff_t start, loff_t end); | 
|---|
| 47 |  | 
|---|
| 48 | static inline int filemap_fdatawait(struct address_space *mapping) | 
|---|
| 49 | { | 
|---|
| 50 | return filemap_fdatawait_range(mapping, lstart: 0, LLONG_MAX); | 
|---|
| 51 | } | 
|---|
| 52 |  | 
|---|
| 53 | bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); | 
|---|
| 54 | int filemap_write_and_wait_range(struct address_space *mapping, | 
|---|
| 55 | loff_t lstart, loff_t lend); | 
|---|
| 56 | int __filemap_fdatawrite_range(struct address_space *mapping, | 
|---|
| 57 | loff_t start, loff_t end, int sync_mode); | 
|---|
| 58 | int filemap_fdatawrite_range(struct address_space *mapping, | 
|---|
| 59 | loff_t start, loff_t end); | 
|---|
| 60 | int filemap_check_errors(struct address_space *mapping); | 
|---|
| 61 | void __filemap_set_wb_err(struct address_space *mapping, int err); | 
|---|
| 62 | int filemap_fdatawrite_wbc(struct address_space *mapping, | 
|---|
| 63 | struct writeback_control *wbc); | 
|---|
| 64 | int kiocb_write_and_wait(struct kiocb *iocb, size_t count); | 
|---|
| 65 |  | 
|---|
| 66 | static inline int filemap_write_and_wait(struct address_space *mapping) | 
|---|
| 67 | { | 
|---|
| 68 | return filemap_write_and_wait_range(mapping, lstart: 0, LLONG_MAX); | 
|---|
| 69 | } | 
|---|
| 70 |  | 
|---|
| 71 | /** | 
|---|
| 72 | * filemap_set_wb_err - set a writeback error on an address_space | 
|---|
| 73 | * @mapping: mapping in which to set writeback error | 
|---|
| 74 | * @err: error to be set in mapping | 
|---|
| 75 | * | 
|---|
| 76 | * When writeback fails in some way, we must record that error so that | 
|---|
| 77 | * userspace can be informed when fsync and the like are called.  We endeavor | 
|---|
| 78 | * to report errors on any file that was open at the time of the error.  Some | 
|---|
| 79 | * internal callers also need to know when writeback errors have occurred. | 
|---|
| 80 | * | 
|---|
| 81 | * When a writeback error occurs, most filesystems will want to call | 
|---|
| 82 | * filemap_set_wb_err to record the error in the mapping so that it will be | 
|---|
| 83 | * automatically reported whenever fsync is called on the file. | 
|---|
| 84 | */ | 
|---|
| 85 | static inline void filemap_set_wb_err(struct address_space *mapping, int err) | 
|---|
| 86 | { | 
|---|
| 87 | /* Fastpath for common case of no error */ | 
|---|
| 88 | if (unlikely(err)) | 
|---|
| 89 | __filemap_set_wb_err(mapping, err); | 
|---|
| 90 | } | 
|---|
| 91 |  | 
|---|
| 92 | /** | 
|---|
| 93 | * filemap_check_wb_err - has an error occurred since the mark was sampled? | 
|---|
| 94 | * @mapping: mapping to check for writeback errors | 
|---|
| 95 | * @since: previously-sampled errseq_t | 
|---|
| 96 | * | 
|---|
| 97 | * Grab the errseq_t value from the mapping, and see if it has changed "since" | 
|---|
| 98 | * the given value was sampled. | 
|---|
| 99 | * | 
|---|
| 100 | * If it has then report the latest error set, otherwise return 0. | 
|---|
| 101 | */ | 
|---|
| 102 | static inline int filemap_check_wb_err(struct address_space *mapping, | 
|---|
| 103 | errseq_t since) | 
|---|
| 104 | { | 
|---|
| 105 | return errseq_check(eseq: &mapping->wb_err, since); | 
|---|
| 106 | } | 
|---|
| 107 |  | 
|---|
| 108 | /** | 
|---|
| 109 | * filemap_sample_wb_err - sample the current errseq_t to test for later errors | 
|---|
| 110 | * @mapping: mapping to be sampled | 
|---|
| 111 | * | 
|---|
| 112 | * Writeback errors are always reported relative to a particular sample point | 
|---|
| 113 | * in the past. This function provides those sample points. | 
|---|
| 114 | */ | 
|---|
| 115 | static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) | 
|---|
| 116 | { | 
|---|
| 117 | return errseq_sample(eseq: &mapping->wb_err); | 
|---|
| 118 | } | 
|---|
| 119 |  | 
|---|
| 120 | /** | 
|---|
| 121 | * file_sample_sb_err - sample the current errseq_t to test for later errors | 
|---|
| 122 | * @file: file pointer to be sampled | 
|---|
| 123 | * | 
|---|
| 124 | * Grab the most current superblock-level errseq_t value for the given | 
|---|
| 125 | * struct file. | 
|---|
| 126 | */ | 
|---|
| 127 | static inline errseq_t file_sample_sb_err(struct file *file) | 
|---|
| 128 | { | 
|---|
| 129 | return errseq_sample(eseq: &file->f_path.dentry->d_sb->s_wb_err); | 
|---|
| 130 | } | 
|---|
| 131 |  | 
|---|
| 132 | /* | 
|---|
| 133 | * Flush file data before changing attributes.  Caller must hold any locks | 
|---|
| 134 | * required to prevent further writes to this file until we're done setting | 
|---|
| 135 | * flags. | 
|---|
| 136 | */ | 
|---|
| 137 | static inline int inode_drain_writes(struct inode *inode) | 
|---|
| 138 | { | 
|---|
| 139 | inode_dio_wait(inode); | 
|---|
| 140 | return filemap_write_and_wait(mapping: inode->i_mapping); | 
|---|
| 141 | } | 
|---|
| 142 |  | 
|---|
| 143 | static inline bool mapping_empty(const struct address_space *mapping) | 
|---|
| 144 | { | 
|---|
| 145 | return xa_empty(xa: &mapping->i_pages); | 
|---|
| 146 | } | 
|---|
| 147 |  | 
|---|
| 148 | /* | 
|---|
| 149 | * mapping_shrinkable - test if page cache state allows inode reclaim | 
|---|
| 150 | * @mapping: the page cache mapping | 
|---|
| 151 | * | 
|---|
| 152 | * This checks the mapping's cache state for the pupose of inode | 
|---|
| 153 | * reclaim and LRU management. | 
|---|
| 154 | * | 
|---|
| 155 | * The caller is expected to hold the i_lock, but is not required to | 
|---|
| 156 | * hold the i_pages lock, which usually protects cache state. That's | 
|---|
| 157 | * because the i_lock and the list_lru lock that protect the inode and | 
|---|
| 158 | * its LRU state don't nest inside the irq-safe i_pages lock. | 
|---|
| 159 | * | 
|---|
| 160 | * Cache deletions are performed under the i_lock, which ensures that | 
|---|
| 161 | * when an inode goes empty, it will reliably get queued on the LRU. | 
|---|
| 162 | * | 
|---|
| 163 | * Cache additions do not acquire the i_lock and may race with this | 
|---|
| 164 | * check, in which case we'll report the inode as shrinkable when it | 
|---|
| 165 | * has cache pages. This is okay: the shrinker also checks the | 
|---|
| 166 | * refcount and the referenced bit, which will be elevated or set in | 
|---|
| 167 | * the process of adding new cache pages to an inode. | 
|---|
| 168 | */ | 
|---|
| 169 | static inline bool mapping_shrinkable(const struct address_space *mapping) | 
|---|
| 170 | { | 
|---|
| 171 | void *head; | 
|---|
| 172 |  | 
|---|
| 173 | /* | 
|---|
| 174 | * On highmem systems, there could be lowmem pressure from the | 
|---|
| 175 | * inodes before there is highmem pressure from the page | 
|---|
| 176 | * cache. Make inodes shrinkable regardless of cache state. | 
|---|
| 177 | */ | 
|---|
| 178 | if (IS_ENABLED(CONFIG_HIGHMEM)) | 
|---|
| 179 | return true; | 
|---|
| 180 |  | 
|---|
| 181 | /* Cache completely empty? Shrink away. */ | 
|---|
| 182 | head = rcu_access_pointer(mapping->i_pages.xa_head); | 
|---|
| 183 | if (!head) | 
|---|
| 184 | return true; | 
|---|
| 185 |  | 
|---|
| 186 | /* | 
|---|
| 187 | * The xarray stores single offset-0 entries directly in the | 
|---|
| 188 | * head pointer, which allows non-resident page cache entries | 
|---|
| 189 | * to escape the shadow shrinker's list of xarray nodes. The | 
|---|
| 190 | * inode shrinker needs to pick them up under memory pressure. | 
|---|
| 191 | */ | 
|---|
| 192 | if (!xa_is_node(entry: head) && xa_is_value(entry: head)) | 
|---|
| 193 | return true; | 
|---|
| 194 |  | 
|---|
| 195 | return false; | 
|---|
| 196 | } | 
|---|
| 197 |  | 
|---|
| 198 | /* | 
|---|
| 199 | * Bits in mapping->flags. | 
|---|
| 200 | */ | 
|---|
| 201 | enum mapping_flags { | 
|---|
| 202 | AS_EIO		= 0,	/* IO error on async write */ | 
|---|
| 203 | AS_ENOSPC	= 1,	/* ENOSPC on async write */ | 
|---|
| 204 | AS_MM_ALL_LOCKS	= 2,	/* under mm_take_all_locks() */ | 
|---|
| 205 | AS_UNEVICTABLE	= 3,	/* e.g., ramdisk, SHM_LOCK */ | 
|---|
| 206 | AS_EXITING	= 4, 	/* final truncate in progress */ | 
|---|
| 207 | /* writeback related tags are not used */ | 
|---|
| 208 | AS_NO_WRITEBACK_TAGS = 5, | 
|---|
| 209 | AS_RELEASE_ALWAYS = 6,	/* Call ->release_folio(), even if no private data */ | 
|---|
| 210 | AS_STABLE_WRITES = 7,	/* must wait for writeback before modifying | 
|---|
| 211 | folio contents */ | 
|---|
| 212 | AS_INACCESSIBLE = 8,	/* Do not attempt direct R/W access to the mapping */ | 
|---|
| 213 | AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM = 9, | 
|---|
| 214 | AS_KERNEL_FILE = 10,	/* mapping for a fake kernel file that shouldn't | 
|---|
| 215 | account usage to user cgroups */ | 
|---|
| 216 | /* Bits 16-25 are used for FOLIO_ORDER */ | 
|---|
| 217 | AS_FOLIO_ORDER_BITS = 5, | 
|---|
| 218 | AS_FOLIO_ORDER_MIN = 16, | 
|---|
| 219 | AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS, | 
|---|
| 220 | }; | 
|---|
| 221 |  | 
|---|
| 222 | #define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1) | 
|---|
| 223 | #define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN) | 
|---|
| 224 | #define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX) | 
|---|
| 225 | #define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK) | 
|---|
| 226 |  | 
|---|
| 227 | /** | 
|---|
| 228 | * mapping_set_error - record a writeback error in the address_space | 
|---|
| 229 | * @mapping: the mapping in which an error should be set | 
|---|
| 230 | * @error: the error to set in the mapping | 
|---|
| 231 | * | 
|---|
| 232 | * When writeback fails in some way, we must record that error so that | 
|---|
| 233 | * userspace can be informed when fsync and the like are called.  We endeavor | 
|---|
| 234 | * to report errors on any file that was open at the time of the error.  Some | 
|---|
| 235 | * internal callers also need to know when writeback errors have occurred. | 
|---|
| 236 | * | 
|---|
| 237 | * When a writeback error occurs, most filesystems will want to call | 
|---|
| 238 | * mapping_set_error to record the error in the mapping so that it can be | 
|---|
| 239 | * reported when the application calls fsync(2). | 
|---|
| 240 | */ | 
|---|
| 241 | static inline void mapping_set_error(struct address_space *mapping, int error) | 
|---|
| 242 | { | 
|---|
| 243 | if (likely(!error)) | 
|---|
| 244 | return; | 
|---|
| 245 |  | 
|---|
| 246 | /* Record in wb_err for checkers using errseq_t based tracking */ | 
|---|
| 247 | __filemap_set_wb_err(mapping, err: error); | 
|---|
| 248 |  | 
|---|
| 249 | /* Record it in superblock */ | 
|---|
| 250 | if (mapping->host) | 
|---|
| 251 | errseq_set(eseq: &mapping->host->i_sb->s_wb_err, err: error); | 
|---|
| 252 |  | 
|---|
| 253 | /* Record it in flags for now, for legacy callers */ | 
|---|
| 254 | if (error == -ENOSPC) | 
|---|
| 255 | set_bit(nr: AS_ENOSPC, addr: &mapping->flags); | 
|---|
| 256 | else | 
|---|
| 257 | set_bit(nr: AS_EIO, addr: &mapping->flags); | 
|---|
| 258 | } | 
|---|
| 259 |  | 
|---|
| 260 | static inline void mapping_set_unevictable(struct address_space *mapping) | 
|---|
| 261 | { | 
|---|
| 262 | set_bit(nr: AS_UNEVICTABLE, addr: &mapping->flags); | 
|---|
| 263 | } | 
|---|
| 264 |  | 
|---|
| 265 | static inline void mapping_clear_unevictable(struct address_space *mapping) | 
|---|
| 266 | { | 
|---|
| 267 | clear_bit(nr: AS_UNEVICTABLE, addr: &mapping->flags); | 
|---|
| 268 | } | 
|---|
| 269 |  | 
|---|
| 270 | static inline bool mapping_unevictable(const struct address_space *mapping) | 
|---|
| 271 | { | 
|---|
| 272 | return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); | 
|---|
| 273 | } | 
|---|
| 274 |  | 
|---|
| 275 | static inline void mapping_set_exiting(struct address_space *mapping) | 
|---|
| 276 | { | 
|---|
| 277 | set_bit(nr: AS_EXITING, addr: &mapping->flags); | 
|---|
| 278 | } | 
|---|
| 279 |  | 
|---|
| 280 | static inline int mapping_exiting(const struct address_space *mapping) | 
|---|
| 281 | { | 
|---|
| 282 | return test_bit(AS_EXITING, &mapping->flags); | 
|---|
| 283 | } | 
|---|
| 284 |  | 
|---|
| 285 | static inline void mapping_set_no_writeback_tags(struct address_space *mapping) | 
|---|
| 286 | { | 
|---|
| 287 | set_bit(nr: AS_NO_WRITEBACK_TAGS, addr: &mapping->flags); | 
|---|
| 288 | } | 
|---|
| 289 |  | 
|---|
| 290 | static inline int mapping_use_writeback_tags(const struct address_space *mapping) | 
|---|
| 291 | { | 
|---|
| 292 | return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | 
|---|
| 293 | } | 
|---|
| 294 |  | 
|---|
| 295 | static inline bool mapping_release_always(const struct address_space *mapping) | 
|---|
| 296 | { | 
|---|
| 297 | return test_bit(AS_RELEASE_ALWAYS, &mapping->flags); | 
|---|
| 298 | } | 
|---|
| 299 |  | 
|---|
| 300 | static inline void mapping_set_release_always(struct address_space *mapping) | 
|---|
| 301 | { | 
|---|
| 302 | set_bit(nr: AS_RELEASE_ALWAYS, addr: &mapping->flags); | 
|---|
| 303 | } | 
|---|
| 304 |  | 
|---|
| 305 | static inline void mapping_clear_release_always(struct address_space *mapping) | 
|---|
| 306 | { | 
|---|
| 307 | clear_bit(nr: AS_RELEASE_ALWAYS, addr: &mapping->flags); | 
|---|
| 308 | } | 
|---|
| 309 |  | 
|---|
| 310 | static inline bool mapping_stable_writes(const struct address_space *mapping) | 
|---|
| 311 | { | 
|---|
| 312 | return test_bit(AS_STABLE_WRITES, &mapping->flags); | 
|---|
| 313 | } | 
|---|
| 314 |  | 
|---|
| 315 | static inline void mapping_set_stable_writes(struct address_space *mapping) | 
|---|
| 316 | { | 
|---|
| 317 | set_bit(nr: AS_STABLE_WRITES, addr: &mapping->flags); | 
|---|
| 318 | } | 
|---|
| 319 |  | 
|---|
| 320 | static inline void mapping_clear_stable_writes(struct address_space *mapping) | 
|---|
| 321 | { | 
|---|
| 322 | clear_bit(nr: AS_STABLE_WRITES, addr: &mapping->flags); | 
|---|
| 323 | } | 
|---|
| 324 |  | 
|---|
| 325 | static inline void mapping_set_inaccessible(struct address_space *mapping) | 
|---|
| 326 | { | 
|---|
| 327 | /* | 
|---|
| 328 | * It's expected inaccessible mappings are also unevictable. Compaction | 
|---|
| 329 | * migrate scanner (isolate_migratepages_block()) relies on this to | 
|---|
| 330 | * reduce page locking. | 
|---|
| 331 | */ | 
|---|
| 332 | set_bit(nr: AS_UNEVICTABLE, addr: &mapping->flags); | 
|---|
| 333 | set_bit(nr: AS_INACCESSIBLE, addr: &mapping->flags); | 
|---|
| 334 | } | 
|---|
| 335 |  | 
|---|
| 336 | static inline bool mapping_inaccessible(const struct address_space *mapping) | 
|---|
| 337 | { | 
|---|
| 338 | return test_bit(AS_INACCESSIBLE, &mapping->flags); | 
|---|
| 339 | } | 
|---|
| 340 |  | 
|---|
| 341 | static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_space *mapping) | 
|---|
| 342 | { | 
|---|
| 343 | set_bit(nr: AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, addr: &mapping->flags); | 
|---|
| 344 | } | 
|---|
| 345 |  | 
|---|
| 346 | static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *mapping) | 
|---|
| 347 | { | 
|---|
| 348 | return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags); | 
|---|
| 349 | } | 
|---|
| 350 |  | 
|---|
| 351 | static inline gfp_t mapping_gfp_mask(const struct address_space *mapping) | 
|---|
| 352 | { | 
|---|
| 353 | return mapping->gfp_mask; | 
|---|
| 354 | } | 
|---|
| 355 |  | 
|---|
| 356 | /* Restricts the given gfp_mask to what the mapping allows. */ | 
|---|
| 357 | static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping, | 
|---|
| 358 | gfp_t gfp_mask) | 
|---|
| 359 | { | 
|---|
| 360 | return mapping_gfp_mask(mapping) & gfp_mask; | 
|---|
| 361 | } | 
|---|
| 362 |  | 
|---|
| 363 | /* | 
|---|
| 364 | * This is non-atomic.  Only to be used before the mapping is activated. | 
|---|
| 365 | * Probably needs a barrier... | 
|---|
| 366 | */ | 
|---|
| 367 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) | 
|---|
| 368 | { | 
|---|
| 369 | m->gfp_mask = mask; | 
|---|
| 370 | } | 
|---|
| 371 |  | 
|---|
| 372 | /* | 
|---|
| 373 | * There are some parts of the kernel which assume that PMD entries | 
|---|
| 374 | * are exactly HPAGE_PMD_ORDER.  Those should be fixed, but until then, | 
|---|
| 375 | * limit the maximum allocation order to PMD size.  I'm not aware of any | 
|---|
| 376 | * assumptions about maximum order if THP are disabled, but 8 seems like | 
|---|
| 377 | * a good order (that's 1MB if you're using 4kB pages) | 
|---|
| 378 | */ | 
|---|
| 379 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|---|
| 380 | #define PREFERRED_MAX_PAGECACHE_ORDER	HPAGE_PMD_ORDER | 
|---|
| 381 | #else | 
|---|
| 382 | #define PREFERRED_MAX_PAGECACHE_ORDER	8 | 
|---|
| 383 | #endif | 
|---|
| 384 |  | 
|---|
| 385 | /* | 
|---|
| 386 | * xas_split_alloc() does not support arbitrary orders. This implies no | 
|---|
| 387 | * 512MB THP on ARM64 with 64KB base page size. | 
|---|
| 388 | */ | 
|---|
| 389 | #define MAX_XAS_ORDER		(XA_CHUNK_SHIFT * 2 - 1) | 
|---|
| 390 | #define MAX_PAGECACHE_ORDER	min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER) | 
|---|
| 391 |  | 
|---|
| 392 | /* | 
|---|
| 393 | * mapping_max_folio_size_supported() - Check the max folio size supported | 
|---|
| 394 | * | 
|---|
| 395 | * The filesystem should call this function at mount time if there is a | 
|---|
| 396 | * requirement on the folio mapping size in the page cache. | 
|---|
| 397 | */ | 
|---|
| 398 | static inline size_t mapping_max_folio_size_supported(void) | 
|---|
| 399 | { | 
|---|
| 400 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) | 
|---|
| 401 | return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER); | 
|---|
| 402 | return PAGE_SIZE; | 
|---|
| 403 | } | 
|---|
| 404 |  | 
|---|
| 405 | /* | 
|---|
| 406 | * mapping_set_folio_order_range() - Set the orders supported by a file. | 
|---|
| 407 | * @mapping: The address space of the file. | 
|---|
| 408 | * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive). | 
|---|
| 409 | * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive). | 
|---|
| 410 | * | 
|---|
| 411 | * The filesystem should call this function in its inode constructor to | 
|---|
| 412 | * indicate which base size (min) and maximum size (max) of folio the VFS | 
|---|
| 413 | * can use to cache the contents of the file.  This should only be used | 
|---|
| 414 | * if the filesystem needs special handling of folio sizes (ie there is | 
|---|
| 415 | * something the core cannot know). | 
|---|
| 416 | * Do not tune it based on, eg, i_size. | 
|---|
| 417 | * | 
|---|
| 418 | * Context: This should not be called while the inode is active as it | 
|---|
| 419 | * is non-atomic. | 
|---|
| 420 | */ | 
|---|
| 421 | static inline void mapping_set_folio_order_range(struct address_space *mapping, | 
|---|
| 422 | unsigned int min, | 
|---|
| 423 | unsigned int max) | 
|---|
| 424 | { | 
|---|
| 425 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) | 
|---|
| 426 | return; | 
|---|
| 427 |  | 
|---|
| 428 | if (min > MAX_PAGECACHE_ORDER) | 
|---|
| 429 | min = MAX_PAGECACHE_ORDER; | 
|---|
| 430 |  | 
|---|
| 431 | if (max > MAX_PAGECACHE_ORDER) | 
|---|
| 432 | max = MAX_PAGECACHE_ORDER; | 
|---|
| 433 |  | 
|---|
| 434 | if (max < min) | 
|---|
| 435 | max = min; | 
|---|
| 436 |  | 
|---|
| 437 | mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) | | 
|---|
| 438 | (min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX); | 
|---|
| 439 | } | 
|---|
| 440 |  | 
|---|
| 441 | static inline void mapping_set_folio_min_order(struct address_space *mapping, | 
|---|
| 442 | unsigned int min) | 
|---|
| 443 | { | 
|---|
| 444 | mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER); | 
|---|
| 445 | } | 
|---|
| 446 |  | 
|---|
| 447 | /** | 
|---|
| 448 | * mapping_set_large_folios() - Indicate the file supports large folios. | 
|---|
| 449 | * @mapping: The address space of the file. | 
|---|
| 450 | * | 
|---|
| 451 | * The filesystem should call this function in its inode constructor to | 
|---|
| 452 | * indicate that the VFS can use large folios to cache the contents of | 
|---|
| 453 | * the file. | 
|---|
| 454 | * | 
|---|
| 455 | * Context: This should not be called while the inode is active as it | 
|---|
| 456 | * is non-atomic. | 
|---|
| 457 | */ | 
|---|
| 458 | static inline void mapping_set_large_folios(struct address_space *mapping) | 
|---|
| 459 | { | 
|---|
| 460 | mapping_set_folio_order_range(mapping, min: 0, MAX_PAGECACHE_ORDER); | 
|---|
| 461 | } | 
|---|
| 462 |  | 
|---|
| 463 | static inline unsigned int | 
|---|
| 464 | mapping_max_folio_order(const struct address_space *mapping) | 
|---|
| 465 | { | 
|---|
| 466 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) | 
|---|
| 467 | return 0; | 
|---|
| 468 | return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX; | 
|---|
| 469 | } | 
|---|
| 470 |  | 
|---|
| 471 | static inline unsigned int | 
|---|
| 472 | mapping_min_folio_order(const struct address_space *mapping) | 
|---|
| 473 | { | 
|---|
| 474 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) | 
|---|
| 475 | return 0; | 
|---|
| 476 | return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN; | 
|---|
| 477 | } | 
|---|
| 478 |  | 
|---|
| 479 | static inline unsigned long | 
|---|
| 480 | mapping_min_folio_nrpages(const struct address_space *mapping) | 
|---|
| 481 | { | 
|---|
| 482 | return 1UL << mapping_min_folio_order(mapping); | 
|---|
| 483 | } | 
|---|
| 484 |  | 
|---|
| 485 | static inline unsigned long | 
|---|
| 486 | mapping_min_folio_nrbytes(const struct address_space *mapping) | 
|---|
| 487 | { | 
|---|
| 488 | return mapping_min_folio_nrpages(mapping) << PAGE_SHIFT; | 
|---|
| 489 | } | 
|---|
| 490 |  | 
|---|
| 491 | /** | 
|---|
| 492 | * mapping_align_index() - Align index for this mapping. | 
|---|
| 493 | * @mapping: The address_space. | 
|---|
| 494 | * @index: The page index. | 
|---|
| 495 | * | 
|---|
| 496 | * The index of a folio must be naturally aligned.  If you are adding a | 
|---|
| 497 | * new folio to the page cache and need to know what index to give it, | 
|---|
| 498 | * call this function. | 
|---|
| 499 | */ | 
|---|
| 500 | static inline pgoff_t mapping_align_index(const struct address_space *mapping, | 
|---|
| 501 | pgoff_t index) | 
|---|
| 502 | { | 
|---|
| 503 | return round_down(index, mapping_min_folio_nrpages(mapping)); | 
|---|
| 504 | } | 
|---|
| 505 |  | 
|---|
| 506 | /* | 
|---|
| 507 | * Large folio support currently depends on THP.  These dependencies are | 
|---|
| 508 | * being worked on but are not yet fixed. | 
|---|
| 509 | */ | 
|---|
| 510 | static inline bool mapping_large_folio_support(const struct address_space *mapping) | 
|---|
| 511 | { | 
|---|
| 512 | /* AS_FOLIO_ORDER is only reasonable for pagecache folios */ | 
|---|
| 513 | VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON, | 
|---|
| 514 | "Anonymous mapping always supports large folio"); | 
|---|
| 515 |  | 
|---|
| 516 | return mapping_max_folio_order(mapping) > 0; | 
|---|
| 517 | } | 
|---|
| 518 |  | 
|---|
| 519 | /* Return the maximum folio size for this pagecache mapping, in bytes. */ | 
|---|
| 520 | static inline size_t mapping_max_folio_size(const struct address_space *mapping) | 
|---|
| 521 | { | 
|---|
| 522 | return PAGE_SIZE << mapping_max_folio_order(mapping); | 
|---|
| 523 | } | 
|---|
| 524 |  | 
|---|
| 525 | static inline int filemap_nr_thps(const struct address_space *mapping) | 
|---|
| 526 | { | 
|---|
| 527 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | 
|---|
| 528 | return atomic_read(&mapping->nr_thps); | 
|---|
| 529 | #else | 
|---|
| 530 | return 0; | 
|---|
| 531 | #endif | 
|---|
| 532 | } | 
|---|
| 533 |  | 
|---|
| 534 | static inline void filemap_nr_thps_inc(struct address_space *mapping) | 
|---|
| 535 | { | 
|---|
| 536 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | 
|---|
| 537 | if (!mapping_large_folio_support(mapping)) | 
|---|
| 538 | atomic_inc(&mapping->nr_thps); | 
|---|
| 539 | #else | 
|---|
| 540 | WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); | 
|---|
| 541 | #endif | 
|---|
| 542 | } | 
|---|
| 543 |  | 
|---|
| 544 | static inline void filemap_nr_thps_dec(struct address_space *mapping) | 
|---|
| 545 | { | 
|---|
| 546 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | 
|---|
| 547 | if (!mapping_large_folio_support(mapping)) | 
|---|
| 548 | atomic_dec(&mapping->nr_thps); | 
|---|
| 549 | #else | 
|---|
| 550 | WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); | 
|---|
| 551 | #endif | 
|---|
| 552 | } | 
|---|
| 553 |  | 
|---|
| 554 | struct address_space *folio_mapping(const struct folio *folio); | 
|---|
| 555 |  | 
|---|
| 556 | /** | 
|---|
| 557 | * folio_flush_mapping - Find the file mapping this folio belongs to. | 
|---|
| 558 | * @folio: The folio. | 
|---|
| 559 | * | 
|---|
| 560 | * For folios which are in the page cache, return the mapping that this | 
|---|
| 561 | * page belongs to.  Anonymous folios return NULL, even if they're in | 
|---|
| 562 | * the swap cache.  Other kinds of folio also return NULL. | 
|---|
| 563 | * | 
|---|
| 564 | * This is ONLY used by architecture cache flushing code.  If you aren't | 
|---|
| 565 | * writing cache flushing code, you want either folio_mapping() or | 
|---|
| 566 | * folio_file_mapping(). | 
|---|
| 567 | */ | 
|---|
| 568 | static inline struct address_space *folio_flush_mapping(struct folio *folio) | 
|---|
| 569 | { | 
|---|
| 570 | if (unlikely(folio_test_swapcache(folio))) | 
|---|
| 571 | return NULL; | 
|---|
| 572 |  | 
|---|
| 573 | return folio_mapping(folio); | 
|---|
| 574 | } | 
|---|
| 575 |  | 
|---|
| 576 | /** | 
|---|
| 577 | * folio_inode - Get the host inode for this folio. | 
|---|
| 578 | * @folio: The folio. | 
|---|
| 579 | * | 
|---|
| 580 | * For folios which are in the page cache, return the inode that this folio | 
|---|
| 581 | * belongs to. | 
|---|
| 582 | * | 
|---|
| 583 | * Do not call this for folios which aren't in the page cache. | 
|---|
| 584 | */ | 
|---|
| 585 | static inline struct inode *folio_inode(struct folio *folio) | 
|---|
| 586 | { | 
|---|
| 587 | return folio->mapping->host; | 
|---|
| 588 | } | 
|---|
| 589 |  | 
|---|
| 590 | /** | 
|---|
| 591 | * folio_attach_private - Attach private data to a folio. | 
|---|
| 592 | * @folio: Folio to attach data to. | 
|---|
| 593 | * @data: Data to attach to folio. | 
|---|
| 594 | * | 
|---|
| 595 | * Attaching private data to a folio increments the page's reference count. | 
|---|
| 596 | * The data must be detached before the folio will be freed. | 
|---|
| 597 | */ | 
|---|
| 598 | static inline void folio_attach_private(struct folio *folio, void *data) | 
|---|
| 599 | { | 
|---|
| 600 | folio_get(folio); | 
|---|
| 601 | folio->private = data; | 
|---|
| 602 | folio_set_private(folio); | 
|---|
| 603 | } | 
|---|
| 604 |  | 
|---|
| 605 | /** | 
|---|
| 606 | * folio_change_private - Change private data on a folio. | 
|---|
| 607 | * @folio: Folio to change the data on. | 
|---|
| 608 | * @data: Data to set on the folio. | 
|---|
| 609 | * | 
|---|
| 610 | * Change the private data attached to a folio and return the old | 
|---|
| 611 | * data.  The page must previously have had data attached and the data | 
|---|
| 612 | * must be detached before the folio will be freed. | 
|---|
| 613 | * | 
|---|
| 614 | * Return: Data that was previously attached to the folio. | 
|---|
| 615 | */ | 
|---|
| 616 | static inline void *folio_change_private(struct folio *folio, void *data) | 
|---|
| 617 | { | 
|---|
| 618 | void *old = folio_get_private(folio); | 
|---|
| 619 |  | 
|---|
| 620 | folio->private = data; | 
|---|
| 621 | return old; | 
|---|
| 622 | } | 
|---|
| 623 |  | 
|---|
| 624 | /** | 
|---|
| 625 | * folio_detach_private - Detach private data from a folio. | 
|---|
| 626 | * @folio: Folio to detach data from. | 
|---|
| 627 | * | 
|---|
| 628 | * Removes the data that was previously attached to the folio and decrements | 
|---|
| 629 | * the refcount on the page. | 
|---|
| 630 | * | 
|---|
| 631 | * Return: Data that was attached to the folio. | 
|---|
| 632 | */ | 
|---|
| 633 | static inline void *folio_detach_private(struct folio *folio) | 
|---|
| 634 | { | 
|---|
| 635 | void *data = folio_get_private(folio); | 
|---|
| 636 |  | 
|---|
| 637 | if (!folio_test_private(folio)) | 
|---|
| 638 | return NULL; | 
|---|
| 639 | folio_clear_private(folio); | 
|---|
| 640 | folio->private = NULL; | 
|---|
| 641 | folio_put(folio); | 
|---|
| 642 |  | 
|---|
| 643 | return data; | 
|---|
| 644 | } | 
|---|
| 645 |  | 
|---|
| 646 | static inline void attach_page_private(struct page *page, void *data) | 
|---|
| 647 | { | 
|---|
| 648 | folio_attach_private(page_folio(page), data); | 
|---|
| 649 | } | 
|---|
| 650 |  | 
|---|
| 651 | static inline void *detach_page_private(struct page *page) | 
|---|
| 652 | { | 
|---|
| 653 | return folio_detach_private(page_folio(page)); | 
|---|
| 654 | } | 
|---|
| 655 |  | 
|---|
| 656 | #ifdef CONFIG_NUMA | 
|---|
| 657 | struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); | 
|---|
| 658 | #else | 
|---|
| 659 | static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) | 
|---|
| 660 | { | 
|---|
| 661 | return folio_alloc_noprof(gfp, order); | 
|---|
| 662 | } | 
|---|
| 663 | #endif | 
|---|
| 664 |  | 
|---|
| 665 | #define filemap_alloc_folio(...)				\ | 
|---|
| 666 | alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__)) | 
|---|
| 667 |  | 
|---|
| 668 | static inline struct page *__page_cache_alloc(gfp_t gfp) | 
|---|
| 669 | { | 
|---|
| 670 | return &filemap_alloc_folio(gfp, 0)->page; | 
|---|
| 671 | } | 
|---|
| 672 |  | 
|---|
| 673 | static inline gfp_t readahead_gfp_mask(struct address_space *x) | 
|---|
| 674 | { | 
|---|
| 675 | return mapping_gfp_mask(mapping: x) | __GFP_NORETRY | __GFP_NOWARN; | 
|---|
| 676 | } | 
|---|
| 677 |  | 
|---|
| 678 | typedef int filler_t(struct file *, struct folio *); | 
|---|
| 679 |  | 
|---|
| 680 | pgoff_t page_cache_next_miss(struct address_space *mapping, | 
|---|
| 681 | pgoff_t index, unsigned long max_scan); | 
|---|
| 682 | pgoff_t page_cache_prev_miss(struct address_space *mapping, | 
|---|
| 683 | pgoff_t index, unsigned long max_scan); | 
|---|
| 684 |  | 
|---|
| 685 | /** | 
|---|
| 686 | * typedef fgf_t - Flags for getting folios from the page cache. | 
|---|
| 687 | * | 
|---|
| 688 | * Most users of the page cache will not need to use these flags; | 
|---|
| 689 | * there are convenience functions such as filemap_get_folio() and | 
|---|
| 690 | * filemap_lock_folio().  For users which need more control over exactly | 
|---|
| 691 | * what is done with the folios, these flags to __filemap_get_folio() | 
|---|
| 692 | * are available. | 
|---|
| 693 | * | 
|---|
| 694 | * * %FGP_ACCESSED - The folio will be marked accessed. | 
|---|
| 695 | * * %FGP_LOCK - The folio is returned locked. | 
|---|
| 696 | * * %FGP_CREAT - If no folio is present then a new folio is allocated, | 
|---|
| 697 | *   added to the page cache and the VM's LRU list.  The folio is | 
|---|
| 698 | *   returned locked. | 
|---|
| 699 | * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the | 
|---|
| 700 | *   folio is already in cache.  If the folio was allocated, unlock it | 
|---|
| 701 | *   before returning so the caller can do the same dance. | 
|---|
| 702 | * * %FGP_WRITE - The folio will be written to by the caller. | 
|---|
| 703 | * * %FGP_NOFS - __GFP_FS will get cleared in gfp. | 
|---|
| 704 | * * %FGP_NOWAIT - Don't block on the folio lock. | 
|---|
| 705 | * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) | 
|---|
| 706 | * * %FGP_DONTCACHE - Uncached buffered IO | 
|---|
| 707 | * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin() | 
|---|
| 708 | *   implementation. | 
|---|
| 709 | */ | 
|---|
| 710 | typedef unsigned int __bitwise fgf_t; | 
|---|
| 711 |  | 
|---|
| 712 | #define FGP_ACCESSED		((__force fgf_t)0x00000001) | 
|---|
| 713 | #define FGP_LOCK		((__force fgf_t)0x00000002) | 
|---|
| 714 | #define FGP_CREAT		((__force fgf_t)0x00000004) | 
|---|
| 715 | #define FGP_WRITE		((__force fgf_t)0x00000008) | 
|---|
| 716 | #define FGP_NOFS		((__force fgf_t)0x00000010) | 
|---|
| 717 | #define FGP_NOWAIT		((__force fgf_t)0x00000020) | 
|---|
| 718 | #define FGP_FOR_MMAP		((__force fgf_t)0x00000040) | 
|---|
| 719 | #define FGP_STABLE		((__force fgf_t)0x00000080) | 
|---|
| 720 | #define FGP_DONTCACHE		((__force fgf_t)0x00000100) | 
|---|
| 721 | #define FGF_GET_ORDER(fgf)	(((__force unsigned)fgf) >> 26)	/* top 6 bits */ | 
|---|
| 722 |  | 
|---|
| 723 | #define FGP_WRITEBEGIN		(FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) | 
|---|
| 724 |  | 
|---|
| 725 | static inline unsigned int filemap_get_order(size_t size) | 
|---|
| 726 | { | 
|---|
| 727 | unsigned int shift = ilog2(size); | 
|---|
| 728 |  | 
|---|
| 729 | if (shift <= PAGE_SHIFT) | 
|---|
| 730 | return 0; | 
|---|
| 731 |  | 
|---|
| 732 | return shift - PAGE_SHIFT; | 
|---|
| 733 | } | 
|---|
| 734 |  | 
|---|
| 735 | /** | 
|---|
| 736 | * fgf_set_order - Encode a length in the fgf_t flags. | 
|---|
| 737 | * @size: The suggested size of the folio to create. | 
|---|
| 738 | * | 
|---|
| 739 | * The caller of __filemap_get_folio() can use this to suggest a preferred | 
|---|
| 740 | * size for the folio that is created.  If there is already a folio at | 
|---|
| 741 | * the index, it will be returned, no matter what its size.  If a folio | 
|---|
| 742 | * is freshly created, it may be of a different size than requested | 
|---|
| 743 | * due to alignment constraints, memory pressure, or the presence of | 
|---|
| 744 | * other folios at nearby indices. | 
|---|
| 745 | */ | 
|---|
| 746 | static inline fgf_t fgf_set_order(size_t size) | 
|---|
| 747 | { | 
|---|
| 748 | unsigned int order = filemap_get_order(size); | 
|---|
| 749 |  | 
|---|
| 750 | if (!order) | 
|---|
| 751 | return 0; | 
|---|
| 752 | return (__force fgf_t)(order << 26); | 
|---|
| 753 | } | 
|---|
| 754 |  | 
|---|
| 755 | void *filemap_get_entry(struct address_space *mapping, pgoff_t index); | 
|---|
| 756 | struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, | 
|---|
| 757 | fgf_t fgp_flags, gfp_t gfp); | 
|---|
| 758 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, | 
|---|
| 759 | fgf_t fgp_flags, gfp_t gfp); | 
|---|
| 760 |  | 
|---|
| 761 | /** | 
|---|
| 762 | * write_begin_get_folio - Get folio for write_begin with flags. | 
|---|
| 763 | * @iocb: The kiocb passed from write_begin (may be NULL). | 
|---|
| 764 | * @mapping: The address space to search. | 
|---|
| 765 | * @index: The page cache index. | 
|---|
| 766 | * @len: Length of data being written. | 
|---|
| 767 | * | 
|---|
| 768 | * This is a helper for filesystem write_begin() implementations. | 
|---|
| 769 | * It wraps __filemap_get_folio(), setting appropriate flags in | 
|---|
| 770 | * the write begin context. | 
|---|
| 771 | * | 
|---|
| 772 | * Return: A folio or an ERR_PTR. | 
|---|
| 773 | */ | 
|---|
| 774 | static inline struct folio *write_begin_get_folio(const struct kiocb *iocb, | 
|---|
| 775 | struct address_space *mapping, pgoff_t index, size_t len) | 
|---|
| 776 | { | 
|---|
| 777 | fgf_t fgp_flags = FGP_WRITEBEGIN; | 
|---|
| 778 |  | 
|---|
| 779 | fgp_flags |= fgf_set_order(size: len); | 
|---|
| 780 |  | 
|---|
| 781 | if (iocb && iocb->ki_flags & IOCB_DONTCACHE) | 
|---|
| 782 | fgp_flags |= FGP_DONTCACHE; | 
|---|
| 783 |  | 
|---|
| 784 | return __filemap_get_folio(mapping, index, fgp_flags, | 
|---|
| 785 | gfp: mapping_gfp_mask(mapping)); | 
|---|
| 786 | } | 
|---|
| 787 |  | 
|---|
| 788 | /** | 
|---|
| 789 | * filemap_get_folio - Find and get a folio. | 
|---|
| 790 | * @mapping: The address_space to search. | 
|---|
| 791 | * @index: The page index. | 
|---|
| 792 | * | 
|---|
| 793 | * Looks up the page cache entry at @mapping & @index.  If a folio is | 
|---|
| 794 | * present, it is returned with an increased refcount. | 
|---|
| 795 | * | 
|---|
| 796 | * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for | 
|---|
| 797 | * this index.  Will not return a shadow, swap or DAX entry. | 
|---|
| 798 | */ | 
|---|
| 799 | static inline struct folio *filemap_get_folio(struct address_space *mapping, | 
|---|
| 800 | pgoff_t index) | 
|---|
| 801 | { | 
|---|
| 802 | return __filemap_get_folio(mapping, index, fgp_flags: 0, gfp: 0); | 
|---|
| 803 | } | 
|---|
| 804 |  | 
|---|
| 805 | /** | 
|---|
| 806 | * filemap_lock_folio - Find and lock a folio. | 
|---|
| 807 | * @mapping: The address_space to search. | 
|---|
| 808 | * @index: The page index. | 
|---|
| 809 | * | 
|---|
| 810 | * Looks up the page cache entry at @mapping & @index.  If a folio is | 
|---|
| 811 | * present, it is returned locked with an increased refcount. | 
|---|
| 812 | * | 
|---|
| 813 | * Context: May sleep. | 
|---|
| 814 | * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for | 
|---|
| 815 | * this index.  Will not return a shadow, swap or DAX entry. | 
|---|
| 816 | */ | 
|---|
| 817 | static inline struct folio *filemap_lock_folio(struct address_space *mapping, | 
|---|
| 818 | pgoff_t index) | 
|---|
| 819 | { | 
|---|
| 820 | return __filemap_get_folio(mapping, index, FGP_LOCK, gfp: 0); | 
|---|
| 821 | } | 
|---|
| 822 |  | 
|---|
| 823 | /** | 
|---|
| 824 | * filemap_grab_folio - grab a folio from the page cache | 
|---|
| 825 | * @mapping: The address space to search | 
|---|
| 826 | * @index: The page index | 
|---|
| 827 | * | 
|---|
| 828 | * Looks up the page cache entry at @mapping & @index. If no folio is found, | 
|---|
| 829 | * a new folio is created. The folio is locked, marked as accessed, and | 
|---|
| 830 | * returned. | 
|---|
| 831 | * | 
|---|
| 832 | * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found | 
|---|
| 833 | * and failed to create a folio. | 
|---|
| 834 | */ | 
|---|
| 835 | static inline struct folio *filemap_grab_folio(struct address_space *mapping, | 
|---|
| 836 | pgoff_t index) | 
|---|
| 837 | { | 
|---|
| 838 | return __filemap_get_folio(mapping, index, | 
|---|
| 839 | FGP_LOCK | FGP_ACCESSED | FGP_CREAT, | 
|---|
| 840 | gfp: mapping_gfp_mask(mapping)); | 
|---|
| 841 | } | 
|---|
| 842 |  | 
|---|
| 843 | /** | 
|---|
| 844 | * find_get_page - find and get a page reference | 
|---|
| 845 | * @mapping: the address_space to search | 
|---|
| 846 | * @offset: the page index | 
|---|
| 847 | * | 
|---|
| 848 | * Looks up the page cache slot at @mapping & @offset.  If there is a | 
|---|
| 849 | * page cache page, it is returned with an increased refcount. | 
|---|
| 850 | * | 
|---|
| 851 | * Otherwise, %NULL is returned. | 
|---|
| 852 | */ | 
|---|
| 853 | static inline struct page *find_get_page(struct address_space *mapping, | 
|---|
| 854 | pgoff_t offset) | 
|---|
| 855 | { | 
|---|
| 856 | return pagecache_get_page(mapping, index: offset, fgp_flags: 0, gfp: 0); | 
|---|
| 857 | } | 
|---|
| 858 |  | 
|---|
| 859 | static inline struct page *find_get_page_flags(struct address_space *mapping, | 
|---|
| 860 | pgoff_t offset, fgf_t fgp_flags) | 
|---|
| 861 | { | 
|---|
| 862 | return pagecache_get_page(mapping, index: offset, fgp_flags, gfp: 0); | 
|---|
| 863 | } | 
|---|
| 864 |  | 
|---|
| 865 | /** | 
|---|
| 866 | * find_lock_page - locate, pin and lock a pagecache page | 
|---|
| 867 | * @mapping: the address_space to search | 
|---|
| 868 | * @index: the page index | 
|---|
| 869 | * | 
|---|
| 870 | * Looks up the page cache entry at @mapping & @index.  If there is a | 
|---|
| 871 | * page cache page, it is returned locked and with an increased | 
|---|
| 872 | * refcount. | 
|---|
| 873 | * | 
|---|
| 874 | * Context: May sleep. | 
|---|
| 875 | * Return: A struct page or %NULL if there is no page in the cache for this | 
|---|
| 876 | * index. | 
|---|
| 877 | */ | 
|---|
| 878 | static inline struct page *find_lock_page(struct address_space *mapping, | 
|---|
| 879 | pgoff_t index) | 
|---|
| 880 | { | 
|---|
| 881 | return pagecache_get_page(mapping, index, FGP_LOCK, gfp: 0); | 
|---|
| 882 | } | 
|---|
| 883 |  | 
|---|
| 884 | /** | 
|---|
| 885 | * find_or_create_page - locate or add a pagecache page | 
|---|
| 886 | * @mapping: the page's address_space | 
|---|
| 887 | * @index: the page's index into the mapping | 
|---|
| 888 | * @gfp_mask: page allocation mode | 
|---|
| 889 | * | 
|---|
| 890 | * Looks up the page cache slot at @mapping & @offset.  If there is a | 
|---|
| 891 | * page cache page, it is returned locked and with an increased | 
|---|
| 892 | * refcount. | 
|---|
| 893 | * | 
|---|
| 894 | * If the page is not present, a new page is allocated using @gfp_mask | 
|---|
| 895 | * and added to the page cache and the VM's LRU list.  The page is | 
|---|
| 896 | * returned locked and with an increased refcount. | 
|---|
| 897 | * | 
|---|
| 898 | * On memory exhaustion, %NULL is returned. | 
|---|
| 899 | * | 
|---|
| 900 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | 
|---|
| 901 | * atomic allocation! | 
|---|
| 902 | */ | 
|---|
| 903 | static inline struct page *find_or_create_page(struct address_space *mapping, | 
|---|
| 904 | pgoff_t index, gfp_t gfp_mask) | 
|---|
| 905 | { | 
|---|
| 906 | return pagecache_get_page(mapping, index, | 
|---|
| 907 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, | 
|---|
| 908 | gfp: gfp_mask); | 
|---|
| 909 | } | 
|---|
| 910 |  | 
|---|
| 911 | /** | 
|---|
| 912 | * grab_cache_page_nowait - returns locked page at given index in given cache | 
|---|
| 913 | * @mapping: target address_space | 
|---|
| 914 | * @index: the page index | 
|---|
| 915 | * | 
|---|
| 916 | * Returns locked page at given index in given cache, creating it if | 
|---|
| 917 | * needed, but do not wait if the page is locked or to reclaim memory. | 
|---|
| 918 | * This is intended for speculative data generators, where the data can | 
|---|
| 919 | * be regenerated if the page couldn't be grabbed.  This routine should | 
|---|
| 920 | * be safe to call while holding the lock for another page. | 
|---|
| 921 | * | 
|---|
| 922 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | 
|---|
| 923 | * and deadlock against the caller's locked page. | 
|---|
| 924 | */ | 
|---|
| 925 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | 
|---|
| 926 | pgoff_t index) | 
|---|
| 927 | { | 
|---|
| 928 | return pagecache_get_page(mapping, index, | 
|---|
| 929 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | 
|---|
| 930 | gfp: mapping_gfp_mask(mapping)); | 
|---|
| 931 | } | 
|---|
| 932 |  | 
|---|
| 933 | /** | 
|---|
| 934 | * folio_next_index - Get the index of the next folio. | 
|---|
| 935 | * @folio: The current folio. | 
|---|
| 936 | * | 
|---|
| 937 | * Return: The index of the folio which follows this folio in the file. | 
|---|
| 938 | */ | 
|---|
| 939 | static inline pgoff_t folio_next_index(const struct folio *folio) | 
|---|
| 940 | { | 
|---|
| 941 | return folio->index + folio_nr_pages(folio); | 
|---|
| 942 | } | 
|---|
| 943 |  | 
|---|
| 944 | /** | 
|---|
| 945 | * folio_file_page - The page for a particular index. | 
|---|
| 946 | * @folio: The folio which contains this index. | 
|---|
| 947 | * @index: The index we want to look up. | 
|---|
| 948 | * | 
|---|
| 949 | * Sometimes after looking up a folio in the page cache, we need to | 
|---|
| 950 | * obtain the specific page for an index (eg a page fault). | 
|---|
| 951 | * | 
|---|
| 952 | * Return: The page containing the file data for this index. | 
|---|
| 953 | */ | 
|---|
| 954 | static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) | 
|---|
| 955 | { | 
|---|
| 956 | return folio_page(folio, index & (folio_nr_pages(folio) - 1)); | 
|---|
| 957 | } | 
|---|
| 958 |  | 
|---|
| 959 | /** | 
|---|
| 960 | * folio_contains - Does this folio contain this index? | 
|---|
| 961 | * @folio: The folio. | 
|---|
| 962 | * @index: The page index within the file. | 
|---|
| 963 | * | 
|---|
| 964 | * Context: The caller should have the folio locked and ensure | 
|---|
| 965 | * e.g., shmem did not move this folio to the swap cache. | 
|---|
| 966 | * Return: true or false. | 
|---|
| 967 | */ | 
|---|
| 968 | static inline bool folio_contains(const struct folio *folio, pgoff_t index) | 
|---|
| 969 | { | 
|---|
| 970 | VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio); | 
|---|
| 971 | return index - folio->index < folio_nr_pages(folio); | 
|---|
| 972 | } | 
|---|
| 973 |  | 
|---|
| 974 | unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, | 
|---|
| 975 | pgoff_t end, struct folio_batch *fbatch); | 
|---|
| 976 | unsigned filemap_get_folios_contig(struct address_space *mapping, | 
|---|
| 977 | pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); | 
|---|
| 978 | unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, | 
|---|
| 979 | pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); | 
|---|
| 980 |  | 
|---|
| 981 | struct folio *read_cache_folio(struct address_space *, pgoff_t index, | 
|---|
| 982 | filler_t *filler, struct file *file); | 
|---|
| 983 | struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index, | 
|---|
| 984 | gfp_t flags); | 
|---|
| 985 | struct page *read_cache_page(struct address_space *, pgoff_t index, | 
|---|
| 986 | filler_t *filler, struct file *file); | 
|---|
| 987 | extern struct page * read_cache_page_gfp(struct address_space *mapping, | 
|---|
| 988 | pgoff_t index, gfp_t gfp_mask); | 
|---|
| 989 |  | 
|---|
| 990 | static inline struct page *read_mapping_page(struct address_space *mapping, | 
|---|
| 991 | pgoff_t index, struct file *file) | 
|---|
| 992 | { | 
|---|
| 993 | return read_cache_page(mapping, index, NULL, file); | 
|---|
| 994 | } | 
|---|
| 995 |  | 
|---|
| 996 | static inline struct folio *read_mapping_folio(struct address_space *mapping, | 
|---|
| 997 | pgoff_t index, struct file *file) | 
|---|
| 998 | { | 
|---|
| 999 | return read_cache_folio(mapping, index, NULL, file); | 
|---|
| 1000 | } | 
|---|
| 1001 |  | 
|---|
| 1002 | /** | 
|---|
| 1003 | * page_pgoff - Calculate the logical page offset of this page. | 
|---|
| 1004 | * @folio: The folio containing this page. | 
|---|
| 1005 | * @page: The page which we need the offset of. | 
|---|
| 1006 | * | 
|---|
| 1007 | * For file pages, this is the offset from the beginning of the file | 
|---|
| 1008 | * in units of PAGE_SIZE.  For anonymous pages, this is the offset from | 
|---|
| 1009 | * the beginning of the anon_vma in units of PAGE_SIZE.  This will | 
|---|
| 1010 | * return nonsense for KSM pages. | 
|---|
| 1011 | * | 
|---|
| 1012 | * Context: Caller must have a reference on the folio or otherwise | 
|---|
| 1013 | * prevent it from being split or freed. | 
|---|
| 1014 | * | 
|---|
| 1015 | * Return: The offset in units of PAGE_SIZE. | 
|---|
| 1016 | */ | 
|---|
| 1017 | static inline pgoff_t page_pgoff(const struct folio *folio, | 
|---|
| 1018 | const struct page *page) | 
|---|
| 1019 | { | 
|---|
| 1020 | return folio->index + folio_page_idx(folio, page); | 
|---|
| 1021 | } | 
|---|
| 1022 |  | 
|---|
| 1023 | /** | 
|---|
| 1024 | * folio_pos - Returns the byte position of this folio in its file. | 
|---|
| 1025 | * @folio: The folio. | 
|---|
| 1026 | */ | 
|---|
| 1027 | static inline loff_t folio_pos(const struct folio *folio) | 
|---|
| 1028 | { | 
|---|
| 1029 | return ((loff_t)folio->index) * PAGE_SIZE; | 
|---|
| 1030 | } | 
|---|
| 1031 |  | 
|---|
| 1032 | /* | 
|---|
| 1033 | * Return byte-offset into filesystem object for page. | 
|---|
| 1034 | */ | 
|---|
| 1035 | static inline loff_t page_offset(struct page *page) | 
|---|
| 1036 | { | 
|---|
| 1037 | struct folio *folio = page_folio(page); | 
|---|
| 1038 |  | 
|---|
| 1039 | return folio_pos(folio) + folio_page_idx(folio, page) * PAGE_SIZE; | 
|---|
| 1040 | } | 
|---|
| 1041 |  | 
|---|
| 1042 | /* | 
|---|
| 1043 | * Get the offset in PAGE_SIZE (even for hugetlb folios). | 
|---|
| 1044 | */ | 
|---|
| 1045 | static inline pgoff_t folio_pgoff(const struct folio *folio) | 
|---|
| 1046 | { | 
|---|
| 1047 | return folio->index; | 
|---|
| 1048 | } | 
|---|
| 1049 |  | 
|---|
| 1050 | static inline pgoff_t linear_page_index(const struct vm_area_struct *vma, | 
|---|
| 1051 | const unsigned long address) | 
|---|
| 1052 | { | 
|---|
| 1053 | pgoff_t pgoff; | 
|---|
| 1054 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | 
|---|
| 1055 | pgoff += vma->vm_pgoff; | 
|---|
| 1056 | return pgoff; | 
|---|
| 1057 | } | 
|---|
| 1058 |  | 
|---|
| 1059 | struct wait_page_key { | 
|---|
| 1060 | struct folio *folio; | 
|---|
| 1061 | int bit_nr; | 
|---|
| 1062 | int page_match; | 
|---|
| 1063 | }; | 
|---|
| 1064 |  | 
|---|
| 1065 | struct wait_page_queue { | 
|---|
| 1066 | struct folio *folio; | 
|---|
| 1067 | int bit_nr; | 
|---|
| 1068 | wait_queue_entry_t wait; | 
|---|
| 1069 | }; | 
|---|
| 1070 |  | 
|---|
| 1071 | static inline bool wake_page_match(struct wait_page_queue *wait_page, | 
|---|
| 1072 | struct wait_page_key *key) | 
|---|
| 1073 | { | 
|---|
| 1074 | if (wait_page->folio != key->folio) | 
|---|
| 1075 | return false; | 
|---|
| 1076 | key->page_match = 1; | 
|---|
| 1077 |  | 
|---|
| 1078 | if (wait_page->bit_nr != key->bit_nr) | 
|---|
| 1079 | return false; | 
|---|
| 1080 |  | 
|---|
| 1081 | return true; | 
|---|
| 1082 | } | 
|---|
| 1083 |  | 
|---|
| 1084 | void __folio_lock(struct folio *folio); | 
|---|
| 1085 | int __folio_lock_killable(struct folio *folio); | 
|---|
| 1086 | vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf); | 
|---|
| 1087 | void unlock_page(struct page *page); | 
|---|
| 1088 | void folio_unlock(struct folio *folio); | 
|---|
| 1089 |  | 
|---|
| 1090 | /** | 
|---|
| 1091 | * folio_trylock() - Attempt to lock a folio. | 
|---|
| 1092 | * @folio: The folio to attempt to lock. | 
|---|
| 1093 | * | 
|---|
| 1094 | * Sometimes it is undesirable to wait for a folio to be unlocked (eg | 
|---|
| 1095 | * when the locks are being taken in the wrong order, or if making | 
|---|
| 1096 | * progress through a batch of folios is more important than processing | 
|---|
| 1097 | * them in order).  Usually folio_lock() is the correct function to call. | 
|---|
| 1098 | * | 
|---|
| 1099 | * Context: Any context. | 
|---|
| 1100 | * Return: Whether the lock was successfully acquired. | 
|---|
| 1101 | */ | 
|---|
| 1102 | static inline bool folio_trylock(struct folio *folio) | 
|---|
| 1103 | { | 
|---|
| 1104 | return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); | 
|---|
| 1105 | } | 
|---|
| 1106 |  | 
|---|
| 1107 | /* | 
|---|
| 1108 | * Return true if the page was successfully locked | 
|---|
| 1109 | */ | 
|---|
| 1110 | static inline bool trylock_page(struct page *page) | 
|---|
| 1111 | { | 
|---|
| 1112 | return folio_trylock(page_folio(page)); | 
|---|
| 1113 | } | 
|---|
| 1114 |  | 
|---|
| 1115 | /** | 
|---|
| 1116 | * folio_lock() - Lock this folio. | 
|---|
| 1117 | * @folio: The folio to lock. | 
|---|
| 1118 | * | 
|---|
| 1119 | * The folio lock protects against many things, probably more than it | 
|---|
| 1120 | * should.  It is primarily held while a folio is being brought uptodate, | 
|---|
| 1121 | * either from its backing file or from swap.  It is also held while a | 
|---|
| 1122 | * folio is being truncated from its address_space, so holding the lock | 
|---|
| 1123 | * is sufficient to keep folio->mapping stable. | 
|---|
| 1124 | * | 
|---|
| 1125 | * The folio lock is also held while write() is modifying the page to | 
|---|
| 1126 | * provide POSIX atomicity guarantees (as long as the write does not | 
|---|
| 1127 | * cross a page boundary).  Other modifications to the data in the folio | 
|---|
| 1128 | * do not hold the folio lock and can race with writes, eg DMA and stores | 
|---|
| 1129 | * to mapped pages. | 
|---|
| 1130 | * | 
|---|
| 1131 | * Context: May sleep.  If you need to acquire the locks of two or | 
|---|
| 1132 | * more folios, they must be in order of ascending index, if they are | 
|---|
| 1133 | * in the same address_space.  If they are in different address_spaces, | 
|---|
| 1134 | * acquire the lock of the folio which belongs to the address_space which | 
|---|
| 1135 | * has the lowest address in memory first. | 
|---|
| 1136 | */ | 
|---|
| 1137 | static inline void folio_lock(struct folio *folio) | 
|---|
| 1138 | { | 
|---|
| 1139 | might_sleep(); | 
|---|
| 1140 | if (!folio_trylock(folio)) | 
|---|
| 1141 | __folio_lock(folio); | 
|---|
| 1142 | } | 
|---|
| 1143 |  | 
|---|
| 1144 | /** | 
|---|
| 1145 | * lock_page() - Lock the folio containing this page. | 
|---|
| 1146 | * @page: The page to lock. | 
|---|
| 1147 | * | 
|---|
| 1148 | * See folio_lock() for a description of what the lock protects. | 
|---|
| 1149 | * This is a legacy function and new code should probably use folio_lock() | 
|---|
| 1150 | * instead. | 
|---|
| 1151 | * | 
|---|
| 1152 | * Context: May sleep.  Pages in the same folio share a lock, so do not | 
|---|
| 1153 | * attempt to lock two pages which share a folio. | 
|---|
| 1154 | */ | 
|---|
| 1155 | static inline void lock_page(struct page *page) | 
|---|
| 1156 | { | 
|---|
| 1157 | struct folio *folio; | 
|---|
| 1158 | might_sleep(); | 
|---|
| 1159 |  | 
|---|
| 1160 | folio = page_folio(page); | 
|---|
| 1161 | if (!folio_trylock(folio)) | 
|---|
| 1162 | __folio_lock(folio); | 
|---|
| 1163 | } | 
|---|
| 1164 |  | 
|---|
| 1165 | /** | 
|---|
| 1166 | * folio_lock_killable() - Lock this folio, interruptible by a fatal signal. | 
|---|
| 1167 | * @folio: The folio to lock. | 
|---|
| 1168 | * | 
|---|
| 1169 | * Attempts to lock the folio, like folio_lock(), except that the sleep | 
|---|
| 1170 | * to acquire the lock is interruptible by a fatal signal. | 
|---|
| 1171 | * | 
|---|
| 1172 | * Context: May sleep; see folio_lock(). | 
|---|
| 1173 | * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received. | 
|---|
| 1174 | */ | 
|---|
| 1175 | static inline int folio_lock_killable(struct folio *folio) | 
|---|
| 1176 | { | 
|---|
| 1177 | might_sleep(); | 
|---|
| 1178 | if (!folio_trylock(folio)) | 
|---|
| 1179 | return __folio_lock_killable(folio); | 
|---|
| 1180 | return 0; | 
|---|
| 1181 | } | 
|---|
| 1182 |  | 
|---|
| 1183 | /* | 
|---|
| 1184 | * folio_lock_or_retry - Lock the folio, unless this would block and the | 
|---|
| 1185 | * caller indicated that it can handle a retry. | 
|---|
| 1186 | * | 
|---|
| 1187 | * Return value and mmap_lock implications depend on flags; see | 
|---|
| 1188 | * __folio_lock_or_retry(). | 
|---|
| 1189 | */ | 
|---|
| 1190 | static inline vm_fault_t folio_lock_or_retry(struct folio *folio, | 
|---|
| 1191 | struct vm_fault *vmf) | 
|---|
| 1192 | { | 
|---|
| 1193 | might_sleep(); | 
|---|
| 1194 | if (!folio_trylock(folio)) | 
|---|
| 1195 | return __folio_lock_or_retry(folio, vmf); | 
|---|
| 1196 | return 0; | 
|---|
| 1197 | } | 
|---|
| 1198 |  | 
|---|
| 1199 | /* | 
|---|
| 1200 | * This is exported only for folio_wait_locked/folio_wait_writeback, etc., | 
|---|
| 1201 | * and should not be used directly. | 
|---|
| 1202 | */ | 
|---|
| 1203 | void folio_wait_bit(struct folio *folio, int bit_nr); | 
|---|
| 1204 | int folio_wait_bit_killable(struct folio *folio, int bit_nr); | 
|---|
| 1205 |  | 
|---|
| 1206 | /* | 
|---|
| 1207 | * Wait for a folio to be unlocked. | 
|---|
| 1208 | * | 
|---|
| 1209 | * This must be called with the caller "holding" the folio, | 
|---|
| 1210 | * ie with increased folio reference count so that the folio won't | 
|---|
| 1211 | * go away during the wait. | 
|---|
| 1212 | */ | 
|---|
| 1213 | static inline void folio_wait_locked(struct folio *folio) | 
|---|
| 1214 | { | 
|---|
| 1215 | if (folio_test_locked(folio)) | 
|---|
| 1216 | folio_wait_bit(folio, bit_nr: PG_locked); | 
|---|
| 1217 | } | 
|---|
| 1218 |  | 
|---|
| 1219 | static inline int folio_wait_locked_killable(struct folio *folio) | 
|---|
| 1220 | { | 
|---|
| 1221 | if (!folio_test_locked(folio)) | 
|---|
| 1222 | return 0; | 
|---|
| 1223 | return folio_wait_bit_killable(folio, bit_nr: PG_locked); | 
|---|
| 1224 | } | 
|---|
| 1225 |  | 
|---|
| 1226 | void folio_end_read(struct folio *folio, bool success); | 
|---|
| 1227 | void wait_on_page_writeback(struct page *page); | 
|---|
| 1228 | void folio_wait_writeback(struct folio *folio); | 
|---|
| 1229 | int folio_wait_writeback_killable(struct folio *folio); | 
|---|
| 1230 | void end_page_writeback(struct page *page); | 
|---|
| 1231 | void folio_end_writeback(struct folio *folio); | 
|---|
| 1232 | void folio_end_writeback_no_dropbehind(struct folio *folio); | 
|---|
| 1233 | void folio_end_dropbehind(struct folio *folio); | 
|---|
| 1234 | void folio_wait_stable(struct folio *folio); | 
|---|
| 1235 | void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); | 
|---|
| 1236 | void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); | 
|---|
| 1237 | void __folio_cancel_dirty(struct folio *folio); | 
|---|
| 1238 | static inline void folio_cancel_dirty(struct folio *folio) | 
|---|
| 1239 | { | 
|---|
| 1240 | /* Avoid atomic ops, locking, etc. when not actually needed. */ | 
|---|
| 1241 | if (folio_test_dirty(folio)) | 
|---|
| 1242 | __folio_cancel_dirty(folio); | 
|---|
| 1243 | } | 
|---|
| 1244 | bool folio_clear_dirty_for_io(struct folio *folio); | 
|---|
| 1245 | bool clear_page_dirty_for_io(struct page *page); | 
|---|
| 1246 | void folio_invalidate(struct folio *folio, size_t offset, size_t length); | 
|---|
| 1247 | bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); | 
|---|
| 1248 |  | 
|---|
| 1249 | #ifdef CONFIG_MIGRATION | 
|---|
| 1250 | int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, | 
|---|
| 1251 | struct folio *src, enum migrate_mode mode); | 
|---|
| 1252 | #else | 
|---|
| 1253 | #define filemap_migrate_folio NULL | 
|---|
| 1254 | #endif | 
|---|
| 1255 | void folio_end_private_2(struct folio *folio); | 
|---|
| 1256 | void folio_wait_private_2(struct folio *folio); | 
|---|
| 1257 | int folio_wait_private_2_killable(struct folio *folio); | 
|---|
| 1258 |  | 
|---|
| 1259 | /* | 
|---|
| 1260 | * Fault in userspace address range. | 
|---|
| 1261 | */ | 
|---|
| 1262 | size_t fault_in_writeable(char __user *uaddr, size_t size); | 
|---|
| 1263 | size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); | 
|---|
| 1264 | size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); | 
|---|
| 1265 | size_t fault_in_readable(const char __user *uaddr, size_t size); | 
|---|
| 1266 |  | 
|---|
| 1267 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | 
|---|
| 1268 | pgoff_t index, gfp_t gfp); | 
|---|
| 1269 | int filemap_add_folio(struct address_space *mapping, struct folio *folio, | 
|---|
| 1270 | pgoff_t index, gfp_t gfp); | 
|---|
| 1271 | void filemap_remove_folio(struct folio *folio); | 
|---|
| 1272 | void __filemap_remove_folio(struct folio *folio, void *shadow); | 
|---|
| 1273 | void replace_page_cache_folio(struct folio *old, struct folio *new); | 
|---|
| 1274 | void delete_from_page_cache_batch(struct address_space *mapping, | 
|---|
| 1275 | struct folio_batch *fbatch); | 
|---|
| 1276 | bool filemap_release_folio(struct folio *folio, gfp_t gfp); | 
|---|
| 1277 | loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, | 
|---|
| 1278 | int whence); | 
|---|
| 1279 |  | 
|---|
| 1280 | /* Must be non-static for BPF error injection */ | 
|---|
| 1281 | int __filemap_add_folio(struct address_space *mapping, struct folio *folio, | 
|---|
| 1282 | pgoff_t index, gfp_t gfp, void **shadowp); | 
|---|
| 1283 |  | 
|---|
| 1284 | bool filemap_range_has_writeback(struct address_space *mapping, | 
|---|
| 1285 | loff_t start_byte, loff_t end_byte); | 
|---|
| 1286 |  | 
|---|
| 1287 | /** | 
|---|
| 1288 | * filemap_range_needs_writeback - check if range potentially needs writeback | 
|---|
| 1289 | * @mapping:           address space within which to check | 
|---|
| 1290 | * @start_byte:        offset in bytes where the range starts | 
|---|
| 1291 | * @end_byte:          offset in bytes where the range ends (inclusive) | 
|---|
| 1292 | * | 
|---|
| 1293 | * Find at least one page in the range supplied, usually used to check if | 
|---|
| 1294 | * direct writing in this range will trigger a writeback. Used by O_DIRECT | 
|---|
| 1295 | * read/write with IOCB_NOWAIT, to see if the caller needs to do | 
|---|
| 1296 | * filemap_write_and_wait_range() before proceeding. | 
|---|
| 1297 | * | 
|---|
| 1298 | * Return: %true if the caller should do filemap_write_and_wait_range() before | 
|---|
| 1299 | * doing O_DIRECT to a page in this range, %false otherwise. | 
|---|
| 1300 | */ | 
|---|
| 1301 | static inline bool filemap_range_needs_writeback(struct address_space *mapping, | 
|---|
| 1302 | loff_t start_byte, | 
|---|
| 1303 | loff_t end_byte) | 
|---|
| 1304 | { | 
|---|
| 1305 | if (!mapping->nrpages) | 
|---|
| 1306 | return false; | 
|---|
| 1307 | if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && | 
|---|
| 1308 | !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) | 
|---|
| 1309 | return false; | 
|---|
| 1310 | return filemap_range_has_writeback(mapping, start_byte, end_byte); | 
|---|
| 1311 | } | 
|---|
| 1312 |  | 
|---|
| 1313 | /** | 
|---|
| 1314 | * struct readahead_control - Describes a readahead request. | 
|---|
| 1315 | * | 
|---|
| 1316 | * A readahead request is for consecutive pages.  Filesystems which | 
|---|
| 1317 | * implement the ->readahead method should call readahead_folio() or | 
|---|
| 1318 | * __readahead_batch() in a loop and attempt to start reads into each | 
|---|
| 1319 | * folio in the request. | 
|---|
| 1320 | * | 
|---|
| 1321 | * Most of the fields in this struct are private and should be accessed | 
|---|
| 1322 | * by the functions below. | 
|---|
| 1323 | * | 
|---|
| 1324 | * @file: The file, used primarily by network filesystems for authentication. | 
|---|
| 1325 | *	  May be NULL if invoked internally by the filesystem. | 
|---|
| 1326 | * @mapping: Readahead this filesystem object. | 
|---|
| 1327 | * @ra: File readahead state.  May be NULL. | 
|---|
| 1328 | */ | 
|---|
| 1329 | struct readahead_control { | 
|---|
| 1330 | struct file *file; | 
|---|
| 1331 | struct address_space *mapping; | 
|---|
| 1332 | struct file_ra_state *ra; | 
|---|
| 1333 | /* private: use the readahead_* accessors instead */ | 
|---|
| 1334 | pgoff_t _index; | 
|---|
| 1335 | unsigned int _nr_pages; | 
|---|
| 1336 | unsigned int _batch_count; | 
|---|
| 1337 | bool dropbehind; | 
|---|
| 1338 | bool _workingset; | 
|---|
| 1339 | unsigned long _pflags; | 
|---|
| 1340 | }; | 
|---|
| 1341 |  | 
|---|
| 1342 | #define DEFINE_READAHEAD(ractl, f, r, m, i)				\ | 
|---|
| 1343 | struct readahead_control ractl = {				\ | 
|---|
| 1344 | .file = f,						\ | 
|---|
| 1345 | .mapping = m,						\ | 
|---|
| 1346 | .ra = r,						\ | 
|---|
| 1347 | ._index = i,						\ | 
|---|
| 1348 | } | 
|---|
| 1349 |  | 
|---|
| 1350 | #define VM_READAHEAD_PAGES	(SZ_128K / PAGE_SIZE) | 
|---|
| 1351 |  | 
|---|
| 1352 | void page_cache_ra_unbounded(struct readahead_control *, | 
|---|
| 1353 | unsigned long nr_to_read, unsigned long lookahead_count); | 
|---|
| 1354 | void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); | 
|---|
| 1355 | void page_cache_async_ra(struct readahead_control *, struct folio *, | 
|---|
| 1356 | unsigned long req_count); | 
|---|
| 1357 | void readahead_expand(struct readahead_control *ractl, | 
|---|
| 1358 | loff_t new_start, size_t new_len); | 
|---|
| 1359 |  | 
|---|
| 1360 | /** | 
|---|
| 1361 | * page_cache_sync_readahead - generic file readahead | 
|---|
| 1362 | * @mapping: address_space which holds the pagecache and I/O vectors | 
|---|
| 1363 | * @ra: file_ra_state which holds the readahead state | 
|---|
| 1364 | * @file: Used by the filesystem for authentication. | 
|---|
| 1365 | * @index: Index of first page to be read. | 
|---|
| 1366 | * @req_count: Total number of pages being read by the caller. | 
|---|
| 1367 | * | 
|---|
| 1368 | * page_cache_sync_readahead() should be called when a cache miss happened: | 
|---|
| 1369 | * it will submit the read.  The readahead logic may decide to piggyback more | 
|---|
| 1370 | * pages onto the read request if access patterns suggest it will improve | 
|---|
| 1371 | * performance. | 
|---|
| 1372 | */ | 
|---|
| 1373 | static inline | 
|---|
| 1374 | void page_cache_sync_readahead(struct address_space *mapping, | 
|---|
| 1375 | struct file_ra_state *ra, struct file *file, pgoff_t index, | 
|---|
| 1376 | unsigned long req_count) | 
|---|
| 1377 | { | 
|---|
| 1378 | DEFINE_READAHEAD(ractl, file, ra, mapping, index); | 
|---|
| 1379 | page_cache_sync_ra(&ractl, req_count); | 
|---|
| 1380 | } | 
|---|
| 1381 |  | 
|---|
| 1382 | /** | 
|---|
| 1383 | * page_cache_async_readahead - file readahead for marked pages | 
|---|
| 1384 | * @mapping: address_space which holds the pagecache and I/O vectors | 
|---|
| 1385 | * @ra: file_ra_state which holds the readahead state | 
|---|
| 1386 | * @file: Used by the filesystem for authentication. | 
|---|
| 1387 | * @folio: The folio which triggered the readahead call. | 
|---|
| 1388 | * @req_count: Total number of pages being read by the caller. | 
|---|
| 1389 | * | 
|---|
| 1390 | * page_cache_async_readahead() should be called when a page is used which | 
|---|
| 1391 | * is marked as PageReadahead; this is a marker to suggest that the application | 
|---|
| 1392 | * has used up enough of the readahead window that we should start pulling in | 
|---|
| 1393 | * more pages. | 
|---|
| 1394 | */ | 
|---|
| 1395 | static inline | 
|---|
| 1396 | void page_cache_async_readahead(struct address_space *mapping, | 
|---|
| 1397 | struct file_ra_state *ra, struct file *file, | 
|---|
| 1398 | struct folio *folio, unsigned long req_count) | 
|---|
| 1399 | { | 
|---|
| 1400 | DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index); | 
|---|
| 1401 | page_cache_async_ra(&ractl, folio, req_count); | 
|---|
| 1402 | } | 
|---|
| 1403 |  | 
|---|
| 1404 | static inline struct folio *__readahead_folio(struct readahead_control *ractl) | 
|---|
| 1405 | { | 
|---|
| 1406 | struct folio *folio; | 
|---|
| 1407 |  | 
|---|
| 1408 | BUG_ON(ractl->_batch_count > ractl->_nr_pages); | 
|---|
| 1409 | ractl->_nr_pages -= ractl->_batch_count; | 
|---|
| 1410 | ractl->_index += ractl->_batch_count; | 
|---|
| 1411 |  | 
|---|
| 1412 | if (!ractl->_nr_pages) { | 
|---|
| 1413 | ractl->_batch_count = 0; | 
|---|
| 1414 | return NULL; | 
|---|
| 1415 | } | 
|---|
| 1416 |  | 
|---|
| 1417 | folio = xa_load(&ractl->mapping->i_pages, index: ractl->_index); | 
|---|
| 1418 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); | 
|---|
| 1419 | ractl->_batch_count = folio_nr_pages(folio); | 
|---|
| 1420 |  | 
|---|
| 1421 | return folio; | 
|---|
| 1422 | } | 
|---|
| 1423 |  | 
|---|
| 1424 | /** | 
|---|
| 1425 | * readahead_folio - Get the next folio to read. | 
|---|
| 1426 | * @ractl: The current readahead request. | 
|---|
| 1427 | * | 
|---|
| 1428 | * Context: The folio is locked.  The caller should unlock the folio once | 
|---|
| 1429 | * all I/O to that folio has completed. | 
|---|
| 1430 | * Return: A pointer to the next folio, or %NULL if we are done. | 
|---|
| 1431 | */ | 
|---|
| 1432 | static inline struct folio *readahead_folio(struct readahead_control *ractl) | 
|---|
| 1433 | { | 
|---|
| 1434 | struct folio *folio = __readahead_folio(ractl); | 
|---|
| 1435 |  | 
|---|
| 1436 | if (folio) | 
|---|
| 1437 | folio_put(folio); | 
|---|
| 1438 | return folio; | 
|---|
| 1439 | } | 
|---|
| 1440 |  | 
|---|
| 1441 | static inline unsigned int __readahead_batch(struct readahead_control *rac, | 
|---|
| 1442 | struct page **array, unsigned int array_sz) | 
|---|
| 1443 | { | 
|---|
| 1444 | unsigned int i = 0; | 
|---|
| 1445 | XA_STATE(xas, &rac->mapping->i_pages, 0); | 
|---|
| 1446 | struct folio *folio; | 
|---|
| 1447 |  | 
|---|
| 1448 | BUG_ON(rac->_batch_count > rac->_nr_pages); | 
|---|
| 1449 | rac->_nr_pages -= rac->_batch_count; | 
|---|
| 1450 | rac->_index += rac->_batch_count; | 
|---|
| 1451 | rac->_batch_count = 0; | 
|---|
| 1452 |  | 
|---|
| 1453 | xas_set(xas: &xas, index: rac->_index); | 
|---|
| 1454 | rcu_read_lock(); | 
|---|
| 1455 | xas_for_each(&xas, folio, rac->_index + rac->_nr_pages - 1) { | 
|---|
| 1456 | if (xas_retry(xas: &xas, entry: folio)) | 
|---|
| 1457 | continue; | 
|---|
| 1458 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); | 
|---|
| 1459 | array[i++] = folio_page(folio, 0); | 
|---|
| 1460 | rac->_batch_count += folio_nr_pages(folio); | 
|---|
| 1461 | if (i == array_sz) | 
|---|
| 1462 | break; | 
|---|
| 1463 | } | 
|---|
| 1464 | rcu_read_unlock(); | 
|---|
| 1465 |  | 
|---|
| 1466 | return i; | 
|---|
| 1467 | } | 
|---|
| 1468 |  | 
|---|
| 1469 | /** | 
|---|
| 1470 | * readahead_pos - The byte offset into the file of this readahead request. | 
|---|
| 1471 | * @rac: The readahead request. | 
|---|
| 1472 | */ | 
|---|
| 1473 | static inline loff_t readahead_pos(const struct readahead_control *rac) | 
|---|
| 1474 | { | 
|---|
| 1475 | return (loff_t)rac->_index * PAGE_SIZE; | 
|---|
| 1476 | } | 
|---|
| 1477 |  | 
|---|
| 1478 | /** | 
|---|
| 1479 | * readahead_length - The number of bytes in this readahead request. | 
|---|
| 1480 | * @rac: The readahead request. | 
|---|
| 1481 | */ | 
|---|
| 1482 | static inline size_t readahead_length(const struct readahead_control *rac) | 
|---|
| 1483 | { | 
|---|
| 1484 | return rac->_nr_pages * PAGE_SIZE; | 
|---|
| 1485 | } | 
|---|
| 1486 |  | 
|---|
| 1487 | /** | 
|---|
| 1488 | * readahead_index - The index of the first page in this readahead request. | 
|---|
| 1489 | * @rac: The readahead request. | 
|---|
| 1490 | */ | 
|---|
| 1491 | static inline pgoff_t readahead_index(const struct readahead_control *rac) | 
|---|
| 1492 | { | 
|---|
| 1493 | return rac->_index; | 
|---|
| 1494 | } | 
|---|
| 1495 |  | 
|---|
| 1496 | /** | 
|---|
| 1497 | * readahead_count - The number of pages in this readahead request. | 
|---|
| 1498 | * @rac: The readahead request. | 
|---|
| 1499 | */ | 
|---|
| 1500 | static inline unsigned int readahead_count(const struct readahead_control *rac) | 
|---|
| 1501 | { | 
|---|
| 1502 | return rac->_nr_pages; | 
|---|
| 1503 | } | 
|---|
| 1504 |  | 
|---|
| 1505 | /** | 
|---|
| 1506 | * readahead_batch_length - The number of bytes in the current batch. | 
|---|
| 1507 | * @rac: The readahead request. | 
|---|
| 1508 | */ | 
|---|
| 1509 | static inline size_t readahead_batch_length(const struct readahead_control *rac) | 
|---|
| 1510 | { | 
|---|
| 1511 | return rac->_batch_count * PAGE_SIZE; | 
|---|
| 1512 | } | 
|---|
| 1513 |  | 
|---|
| 1514 | static inline unsigned long dir_pages(const struct inode *inode) | 
|---|
| 1515 | { | 
|---|
| 1516 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> | 
|---|
| 1517 | PAGE_SHIFT; | 
|---|
| 1518 | } | 
|---|
| 1519 |  | 
|---|
| 1520 | /** | 
|---|
| 1521 | * folio_mkwrite_check_truncate - check if folio was truncated | 
|---|
| 1522 | * @folio: the folio to check | 
|---|
| 1523 | * @inode: the inode to check the folio against | 
|---|
| 1524 | * | 
|---|
| 1525 | * Return: the number of bytes in the folio up to EOF, | 
|---|
| 1526 | * or -EFAULT if the folio was truncated. | 
|---|
| 1527 | */ | 
|---|
| 1528 | static inline ssize_t folio_mkwrite_check_truncate(const struct folio *folio, | 
|---|
| 1529 | const struct inode *inode) | 
|---|
| 1530 | { | 
|---|
| 1531 | loff_t size = i_size_read(inode); | 
|---|
| 1532 | pgoff_t index = size >> PAGE_SHIFT; | 
|---|
| 1533 | size_t offset = offset_in_folio(folio, size); | 
|---|
| 1534 |  | 
|---|
| 1535 | if (!folio->mapping) | 
|---|
| 1536 | return -EFAULT; | 
|---|
| 1537 |  | 
|---|
| 1538 | /* folio is wholly inside EOF */ | 
|---|
| 1539 | if (folio_next_index(folio) - 1 < index) | 
|---|
| 1540 | return folio_size(folio); | 
|---|
| 1541 | /* folio is wholly past EOF */ | 
|---|
| 1542 | if (folio->index > index || !offset) | 
|---|
| 1543 | return -EFAULT; | 
|---|
| 1544 | /* folio is partially inside EOF */ | 
|---|
| 1545 | return offset; | 
|---|
| 1546 | } | 
|---|
| 1547 |  | 
|---|
| 1548 | /** | 
|---|
| 1549 | * i_blocks_per_folio - How many blocks fit in this folio. | 
|---|
| 1550 | * @inode: The inode which contains the blocks. | 
|---|
| 1551 | * @folio: The folio. | 
|---|
| 1552 | * | 
|---|
| 1553 | * If the block size is larger than the size of this folio, return zero. | 
|---|
| 1554 | * | 
|---|
| 1555 | * Context: The caller should hold a refcount on the folio to prevent it | 
|---|
| 1556 | * from being split. | 
|---|
| 1557 | * Return: The number of filesystem blocks covered by this folio. | 
|---|
| 1558 | */ | 
|---|
| 1559 | static inline | 
|---|
| 1560 | unsigned int i_blocks_per_folio(const struct inode *inode, | 
|---|
| 1561 | const struct folio *folio) | 
|---|
| 1562 | { | 
|---|
| 1563 | return folio_size(folio) >> inode->i_blkbits; | 
|---|
| 1564 | } | 
|---|
| 1565 | #endif /* _LINUX_PAGEMAP_H */ | 
|---|
| 1566 |  | 
|---|