| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright (C) 2010 Red Hat, Inc. | 
|---|
| 4 | * Copyright (C) 2016-2023 Christoph Hellwig. | 
|---|
| 5 | */ | 
|---|
| 6 | #include <linux/iomap.h> | 
|---|
| 7 | #include <linux/buffer_head.h> | 
|---|
| 8 | #include <linux/writeback.h> | 
|---|
| 9 | #include <linux/swap.h> | 
|---|
| 10 | #include <linux/migrate.h> | 
|---|
| 11 | #include "trace.h" | 
|---|
| 12 |  | 
|---|
| 13 | #include "../internal.h" | 
|---|
| 14 |  | 
|---|
| 15 | /* | 
|---|
| 16 | * Structure allocated for each folio to track per-block uptodate, dirty state | 
|---|
| 17 | * and I/O completions. | 
|---|
| 18 | */ | 
|---|
| 19 | struct iomap_folio_state { | 
|---|
| 20 | spinlock_t		state_lock; | 
|---|
| 21 | unsigned int		read_bytes_pending; | 
|---|
| 22 | atomic_t		write_bytes_pending; | 
|---|
| 23 |  | 
|---|
| 24 | /* | 
|---|
| 25 | * Each block has two bits in this bitmap: | 
|---|
| 26 | * Bits [0..blocks_per_folio) has the uptodate status. | 
|---|
| 27 | * Bits [b_p_f...(2*b_p_f))   has the dirty status. | 
|---|
| 28 | */ | 
|---|
| 29 | unsigned long		state[]; | 
|---|
| 30 | }; | 
|---|
| 31 |  | 
|---|
| 32 | static inline bool ifs_is_fully_uptodate(struct folio *folio, | 
|---|
| 33 | struct iomap_folio_state *ifs) | 
|---|
| 34 | { | 
|---|
| 35 | struct inode *inode = folio->mapping->host; | 
|---|
| 36 |  | 
|---|
| 37 | return bitmap_full(src: ifs->state, nbits: i_blocks_per_folio(inode, folio)); | 
|---|
| 38 | } | 
|---|
| 39 |  | 
|---|
| 40 | static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs, | 
|---|
| 41 | unsigned int block) | 
|---|
| 42 | { | 
|---|
| 43 | return test_bit(block, ifs->state); | 
|---|
| 44 | } | 
|---|
| 45 |  | 
|---|
| 46 | static bool ifs_set_range_uptodate(struct folio *folio, | 
|---|
| 47 | struct iomap_folio_state *ifs, size_t off, size_t len) | 
|---|
| 48 | { | 
|---|
| 49 | struct inode *inode = folio->mapping->host; | 
|---|
| 50 | unsigned int first_blk = off >> inode->i_blkbits; | 
|---|
| 51 | unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; | 
|---|
| 52 | unsigned int nr_blks = last_blk - first_blk + 1; | 
|---|
| 53 |  | 
|---|
| 54 | bitmap_set(map: ifs->state, start: first_blk, nbits: nr_blks); | 
|---|
| 55 | return ifs_is_fully_uptodate(folio, ifs); | 
|---|
| 56 | } | 
|---|
| 57 |  | 
|---|
| 58 | static void iomap_set_range_uptodate(struct folio *folio, size_t off, | 
|---|
| 59 | size_t len) | 
|---|
| 60 | { | 
|---|
| 61 | struct iomap_folio_state *ifs = folio->private; | 
|---|
| 62 | unsigned long flags; | 
|---|
| 63 | bool uptodate = true; | 
|---|
| 64 |  | 
|---|
| 65 | if (folio_test_uptodate(folio)) | 
|---|
| 66 | return; | 
|---|
| 67 |  | 
|---|
| 68 | if (ifs) { | 
|---|
| 69 | spin_lock_irqsave(&ifs->state_lock, flags); | 
|---|
| 70 | uptodate = ifs_set_range_uptodate(folio, ifs, off, len); | 
|---|
| 71 | spin_unlock_irqrestore(lock: &ifs->state_lock, flags); | 
|---|
| 72 | } | 
|---|
| 73 |  | 
|---|
| 74 | if (uptodate) | 
|---|
| 75 | folio_mark_uptodate(folio); | 
|---|
| 76 | } | 
|---|
| 77 |  | 
|---|
| 78 | static inline bool ifs_block_is_dirty(struct folio *folio, | 
|---|
| 79 | struct iomap_folio_state *ifs, int block) | 
|---|
| 80 | { | 
|---|
| 81 | struct inode *inode = folio->mapping->host; | 
|---|
| 82 | unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); | 
|---|
| 83 |  | 
|---|
| 84 | return test_bit(block + blks_per_folio, ifs->state); | 
|---|
| 85 | } | 
|---|
| 86 |  | 
|---|
| 87 | static unsigned ifs_find_dirty_range(struct folio *folio, | 
|---|
| 88 | struct iomap_folio_state *ifs, u64 *range_start, u64 range_end) | 
|---|
| 89 | { | 
|---|
| 90 | struct inode *inode = folio->mapping->host; | 
|---|
| 91 | unsigned start_blk = | 
|---|
| 92 | offset_in_folio(folio, *range_start) >> inode->i_blkbits; | 
|---|
| 93 | unsigned end_blk = min_not_zero( | 
|---|
| 94 | offset_in_folio(folio, range_end) >> inode->i_blkbits, | 
|---|
| 95 | i_blocks_per_folio(inode, folio)); | 
|---|
| 96 | unsigned nblks = 1; | 
|---|
| 97 |  | 
|---|
| 98 | while (!ifs_block_is_dirty(folio, ifs, block: start_blk)) | 
|---|
| 99 | if (++start_blk == end_blk) | 
|---|
| 100 | return 0; | 
|---|
| 101 |  | 
|---|
| 102 | while (start_blk + nblks < end_blk) { | 
|---|
| 103 | if (!ifs_block_is_dirty(folio, ifs, block: start_blk + nblks)) | 
|---|
| 104 | break; | 
|---|
| 105 | nblks++; | 
|---|
| 106 | } | 
|---|
| 107 |  | 
|---|
| 108 | *range_start = folio_pos(folio) + (start_blk << inode->i_blkbits); | 
|---|
| 109 | return nblks << inode->i_blkbits; | 
|---|
| 110 | } | 
|---|
| 111 |  | 
|---|
| 112 | static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start, | 
|---|
| 113 | u64 range_end) | 
|---|
| 114 | { | 
|---|
| 115 | struct iomap_folio_state *ifs = folio->private; | 
|---|
| 116 |  | 
|---|
| 117 | if (*range_start >= range_end) | 
|---|
| 118 | return 0; | 
|---|
| 119 |  | 
|---|
| 120 | if (ifs) | 
|---|
| 121 | return ifs_find_dirty_range(folio, ifs, range_start, range_end); | 
|---|
| 122 | return range_end - *range_start; | 
|---|
| 123 | } | 
|---|
| 124 |  | 
|---|
| 125 | static void ifs_clear_range_dirty(struct folio *folio, | 
|---|
| 126 | struct iomap_folio_state *ifs, size_t off, size_t len) | 
|---|
| 127 | { | 
|---|
| 128 | struct inode *inode = folio->mapping->host; | 
|---|
| 129 | unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); | 
|---|
| 130 | unsigned int first_blk = (off >> inode->i_blkbits); | 
|---|
| 131 | unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; | 
|---|
| 132 | unsigned int nr_blks = last_blk - first_blk + 1; | 
|---|
| 133 | unsigned long flags; | 
|---|
| 134 |  | 
|---|
| 135 | spin_lock_irqsave(&ifs->state_lock, flags); | 
|---|
| 136 | bitmap_clear(map: ifs->state, start: first_blk + blks_per_folio, nbits: nr_blks); | 
|---|
| 137 | spin_unlock_irqrestore(lock: &ifs->state_lock, flags); | 
|---|
| 138 | } | 
|---|
| 139 |  | 
|---|
| 140 | static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len) | 
|---|
| 141 | { | 
|---|
| 142 | struct iomap_folio_state *ifs = folio->private; | 
|---|
| 143 |  | 
|---|
| 144 | if (ifs) | 
|---|
| 145 | ifs_clear_range_dirty(folio, ifs, off, len); | 
|---|
| 146 | } | 
|---|
| 147 |  | 
|---|
| 148 | static void ifs_set_range_dirty(struct folio *folio, | 
|---|
| 149 | struct iomap_folio_state *ifs, size_t off, size_t len) | 
|---|
| 150 | { | 
|---|
| 151 | struct inode *inode = folio->mapping->host; | 
|---|
| 152 | unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); | 
|---|
| 153 | unsigned int first_blk = (off >> inode->i_blkbits); | 
|---|
| 154 | unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; | 
|---|
| 155 | unsigned int nr_blks = last_blk - first_blk + 1; | 
|---|
| 156 | unsigned long flags; | 
|---|
| 157 |  | 
|---|
| 158 | spin_lock_irqsave(&ifs->state_lock, flags); | 
|---|
| 159 | bitmap_set(map: ifs->state, start: first_blk + blks_per_folio, nbits: nr_blks); | 
|---|
| 160 | spin_unlock_irqrestore(lock: &ifs->state_lock, flags); | 
|---|
| 161 | } | 
|---|
| 162 |  | 
|---|
| 163 | static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len) | 
|---|
| 164 | { | 
|---|
| 165 | struct iomap_folio_state *ifs = folio->private; | 
|---|
| 166 |  | 
|---|
| 167 | if (ifs) | 
|---|
| 168 | ifs_set_range_dirty(folio, ifs, off, len); | 
|---|
| 169 | } | 
|---|
| 170 |  | 
|---|
| 171 | static struct iomap_folio_state *ifs_alloc(struct inode *inode, | 
|---|
| 172 | struct folio *folio, unsigned int flags) | 
|---|
| 173 | { | 
|---|
| 174 | struct iomap_folio_state *ifs = folio->private; | 
|---|
| 175 | unsigned int nr_blocks = i_blocks_per_folio(inode, folio); | 
|---|
| 176 | gfp_t gfp; | 
|---|
| 177 |  | 
|---|
| 178 | if (ifs || nr_blocks <= 1) | 
|---|
| 179 | return ifs; | 
|---|
| 180 |  | 
|---|
| 181 | if (flags & IOMAP_NOWAIT) | 
|---|
| 182 | gfp = GFP_NOWAIT; | 
|---|
| 183 | else | 
|---|
| 184 | gfp = GFP_NOFS | __GFP_NOFAIL; | 
|---|
| 185 |  | 
|---|
| 186 | /* | 
|---|
| 187 | * ifs->state tracks two sets of state flags when the | 
|---|
| 188 | * filesystem block size is smaller than the folio size. | 
|---|
| 189 | * The first state tracks per-block uptodate and the | 
|---|
| 190 | * second tracks per-block dirty state. | 
|---|
| 191 | */ | 
|---|
| 192 | ifs = kzalloc(struct_size(ifs, state, | 
|---|
| 193 | BITS_TO_LONGS(2 * nr_blocks)), gfp); | 
|---|
| 194 | if (!ifs) | 
|---|
| 195 | return ifs; | 
|---|
| 196 |  | 
|---|
| 197 | spin_lock_init(&ifs->state_lock); | 
|---|
| 198 | if (folio_test_uptodate(folio)) | 
|---|
| 199 | bitmap_set(map: ifs->state, start: 0, nbits: nr_blocks); | 
|---|
| 200 | if (folio_test_dirty(folio)) | 
|---|
| 201 | bitmap_set(map: ifs->state, start: nr_blocks, nbits: nr_blocks); | 
|---|
| 202 | folio_attach_private(folio, data: ifs); | 
|---|
| 203 |  | 
|---|
| 204 | return ifs; | 
|---|
| 205 | } | 
|---|
| 206 |  | 
|---|
| 207 | static void ifs_free(struct folio *folio) | 
|---|
| 208 | { | 
|---|
| 209 | struct iomap_folio_state *ifs = folio_detach_private(folio); | 
|---|
| 210 |  | 
|---|
| 211 | if (!ifs) | 
|---|
| 212 | return; | 
|---|
| 213 | WARN_ON_ONCE(ifs->read_bytes_pending != 0); | 
|---|
| 214 | WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending)); | 
|---|
| 215 | WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) != | 
|---|
| 216 | folio_test_uptodate(folio)); | 
|---|
| 217 | kfree(objp: ifs); | 
|---|
| 218 | } | 
|---|
| 219 |  | 
|---|
| 220 | /* | 
|---|
| 221 | * Calculate the range inside the folio that we actually need to read. | 
|---|
| 222 | */ | 
|---|
| 223 | static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, | 
|---|
| 224 | loff_t *pos, loff_t length, size_t *offp, size_t *lenp) | 
|---|
| 225 | { | 
|---|
| 226 | struct iomap_folio_state *ifs = folio->private; | 
|---|
| 227 | loff_t orig_pos = *pos; | 
|---|
| 228 | loff_t isize = i_size_read(inode); | 
|---|
| 229 | unsigned block_bits = inode->i_blkbits; | 
|---|
| 230 | unsigned block_size = (1 << block_bits); | 
|---|
| 231 | size_t poff = offset_in_folio(folio, *pos); | 
|---|
| 232 | size_t plen = min_t(loff_t, folio_size(folio) - poff, length); | 
|---|
| 233 | size_t orig_plen = plen; | 
|---|
| 234 | unsigned first = poff >> block_bits; | 
|---|
| 235 | unsigned last = (poff + plen - 1) >> block_bits; | 
|---|
| 236 |  | 
|---|
| 237 | /* | 
|---|
| 238 | * If the block size is smaller than the page size, we need to check the | 
|---|
| 239 | * per-block uptodate status and adjust the offset and length if needed | 
|---|
| 240 | * to avoid reading in already uptodate ranges. | 
|---|
| 241 | */ | 
|---|
| 242 | if (ifs) { | 
|---|
| 243 | unsigned int i; | 
|---|
| 244 |  | 
|---|
| 245 | /* move forward for each leading block marked uptodate */ | 
|---|
| 246 | for (i = first; i <= last; i++) { | 
|---|
| 247 | if (!ifs_block_is_uptodate(ifs, block: i)) | 
|---|
| 248 | break; | 
|---|
| 249 | *pos += block_size; | 
|---|
| 250 | poff += block_size; | 
|---|
| 251 | plen -= block_size; | 
|---|
| 252 | first++; | 
|---|
| 253 | } | 
|---|
| 254 |  | 
|---|
| 255 | /* truncate len if we find any trailing uptodate block(s) */ | 
|---|
| 256 | while (++i <= last) { | 
|---|
| 257 | if (ifs_block_is_uptodate(ifs, block: i)) { | 
|---|
| 258 | plen -= (last - i + 1) * block_size; | 
|---|
| 259 | last = i - 1; | 
|---|
| 260 | break; | 
|---|
| 261 | } | 
|---|
| 262 | } | 
|---|
| 263 | } | 
|---|
| 264 |  | 
|---|
| 265 | /* | 
|---|
| 266 | * If the extent spans the block that contains the i_size, we need to | 
|---|
| 267 | * handle both halves separately so that we properly zero data in the | 
|---|
| 268 | * page cache for blocks that are entirely outside of i_size. | 
|---|
| 269 | */ | 
|---|
| 270 | if (orig_pos <= isize && orig_pos + orig_plen > isize) { | 
|---|
| 271 | unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; | 
|---|
| 272 |  | 
|---|
| 273 | if (first <= end && last > end) | 
|---|
| 274 | plen -= (last - end) * block_size; | 
|---|
| 275 | } | 
|---|
| 276 |  | 
|---|
| 277 | *offp = poff; | 
|---|
| 278 | *lenp = plen; | 
|---|
| 279 | } | 
|---|
| 280 |  | 
|---|
| 281 | static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, | 
|---|
| 282 | loff_t pos) | 
|---|
| 283 | { | 
|---|
| 284 | const struct iomap *srcmap = iomap_iter_srcmap(i: iter); | 
|---|
| 285 |  | 
|---|
| 286 | return srcmap->type != IOMAP_MAPPED || | 
|---|
| 287 | (srcmap->flags & IOMAP_F_NEW) || | 
|---|
| 288 | pos >= i_size_read(inode: iter->inode); | 
|---|
| 289 | } | 
|---|
| 290 |  | 
|---|
| 291 | /** | 
|---|
| 292 | * iomap_read_inline_data - copy inline data into the page cache | 
|---|
| 293 | * @iter: iteration structure | 
|---|
| 294 | * @folio: folio to copy to | 
|---|
| 295 | * | 
|---|
| 296 | * Copy the inline data in @iter into @folio and zero out the rest of the folio. | 
|---|
| 297 | * Only a single IOMAP_INLINE extent is allowed at the end of each file. | 
|---|
| 298 | * Returns zero for success to complete the read, or the usual negative errno. | 
|---|
| 299 | */ | 
|---|
| 300 | static int iomap_read_inline_data(const struct iomap_iter *iter, | 
|---|
| 301 | struct folio *folio) | 
|---|
| 302 | { | 
|---|
| 303 | const struct iomap *iomap = iomap_iter_srcmap(i: iter); | 
|---|
| 304 | size_t size = i_size_read(inode: iter->inode) - iomap->offset; | 
|---|
| 305 | size_t offset = offset_in_folio(folio, iomap->offset); | 
|---|
| 306 |  | 
|---|
| 307 | if (WARN_ON_ONCE(!iomap->inline_data)) | 
|---|
| 308 | return -EIO; | 
|---|
| 309 |  | 
|---|
| 310 | if (folio_test_uptodate(folio)) | 
|---|
| 311 | return 0; | 
|---|
| 312 |  | 
|---|
| 313 | if (WARN_ON_ONCE(size > iomap->length)) | 
|---|
| 314 | return -EIO; | 
|---|
| 315 | if (offset > 0) | 
|---|
| 316 | ifs_alloc(inode: iter->inode, folio, flags: iter->flags); | 
|---|
| 317 |  | 
|---|
| 318 | folio_fill_tail(folio, offset, from: iomap->inline_data, len: size); | 
|---|
| 319 | iomap_set_range_uptodate(folio, off: offset, len: folio_size(folio) - offset); | 
|---|
| 320 | return 0; | 
|---|
| 321 | } | 
|---|
| 322 |  | 
|---|
| 323 | #ifdef CONFIG_BLOCK | 
|---|
| 324 | static void iomap_finish_folio_read(struct folio *folio, size_t off, | 
|---|
| 325 | size_t len, int error) | 
|---|
| 326 | { | 
|---|
| 327 | struct iomap_folio_state *ifs = folio->private; | 
|---|
| 328 | bool uptodate = !error; | 
|---|
| 329 | bool finished = true; | 
|---|
| 330 |  | 
|---|
| 331 | if (ifs) { | 
|---|
| 332 | unsigned long flags; | 
|---|
| 333 |  | 
|---|
| 334 | spin_lock_irqsave(&ifs->state_lock, flags); | 
|---|
| 335 | if (!error) | 
|---|
| 336 | uptodate = ifs_set_range_uptodate(folio, ifs, off, len); | 
|---|
| 337 | ifs->read_bytes_pending -= len; | 
|---|
| 338 | finished = !ifs->read_bytes_pending; | 
|---|
| 339 | spin_unlock_irqrestore(lock: &ifs->state_lock, flags); | 
|---|
| 340 | } | 
|---|
| 341 |  | 
|---|
| 342 | if (finished) | 
|---|
| 343 | folio_end_read(folio, success: uptodate); | 
|---|
| 344 | } | 
|---|
| 345 |  | 
|---|
| 346 | static void iomap_read_end_io(struct bio *bio) | 
|---|
| 347 | { | 
|---|
| 348 | int error = blk_status_to_errno(status: bio->bi_status); | 
|---|
| 349 | struct folio_iter fi; | 
|---|
| 350 |  | 
|---|
| 351 | bio_for_each_folio_all(fi, bio) | 
|---|
| 352 | iomap_finish_folio_read(folio: fi.folio, off: fi.offset, len: fi.length, error); | 
|---|
| 353 | bio_put(bio); | 
|---|
| 354 | } | 
|---|
| 355 |  | 
|---|
| 356 | struct iomap_readpage_ctx { | 
|---|
| 357 | struct folio		*cur_folio; | 
|---|
| 358 | bool			cur_folio_in_bio; | 
|---|
| 359 | struct bio		*bio; | 
|---|
| 360 | struct readahead_control *rac; | 
|---|
| 361 | }; | 
|---|
| 362 |  | 
|---|
| 363 | static int iomap_readpage_iter(struct iomap_iter *iter, | 
|---|
| 364 | struct iomap_readpage_ctx *ctx) | 
|---|
| 365 | { | 
|---|
| 366 | const struct iomap *iomap = &iter->iomap; | 
|---|
| 367 | loff_t pos = iter->pos; | 
|---|
| 368 | loff_t length = iomap_length(iter); | 
|---|
| 369 | struct folio *folio = ctx->cur_folio; | 
|---|
| 370 | struct iomap_folio_state *ifs; | 
|---|
| 371 | size_t poff, plen; | 
|---|
| 372 | sector_t sector; | 
|---|
| 373 | int ret; | 
|---|
| 374 |  | 
|---|
| 375 | if (iomap->type == IOMAP_INLINE) { | 
|---|
| 376 | ret = iomap_read_inline_data(iter, folio); | 
|---|
| 377 | if (ret) | 
|---|
| 378 | return ret; | 
|---|
| 379 | return iomap_iter_advance(iter, count: &length); | 
|---|
| 380 | } | 
|---|
| 381 |  | 
|---|
| 382 | /* zero post-eof blocks as the page may be mapped */ | 
|---|
| 383 | ifs = ifs_alloc(inode: iter->inode, folio, flags: iter->flags); | 
|---|
| 384 | iomap_adjust_read_range(inode: iter->inode, folio, pos: &pos, length, offp: &poff, lenp: &plen); | 
|---|
| 385 | if (plen == 0) | 
|---|
| 386 | goto done; | 
|---|
| 387 |  | 
|---|
| 388 | if (iomap_block_needs_zeroing(iter, pos)) { | 
|---|
| 389 | folio_zero_range(folio, start: poff, length: plen); | 
|---|
| 390 | iomap_set_range_uptodate(folio, off: poff, len: plen); | 
|---|
| 391 | goto done; | 
|---|
| 392 | } | 
|---|
| 393 |  | 
|---|
| 394 | ctx->cur_folio_in_bio = true; | 
|---|
| 395 | if (ifs) { | 
|---|
| 396 | spin_lock_irq(lock: &ifs->state_lock); | 
|---|
| 397 | ifs->read_bytes_pending += plen; | 
|---|
| 398 | spin_unlock_irq(lock: &ifs->state_lock); | 
|---|
| 399 | } | 
|---|
| 400 |  | 
|---|
| 401 | sector = iomap_sector(iomap, pos); | 
|---|
| 402 | if (!ctx->bio || | 
|---|
| 403 | bio_end_sector(ctx->bio) != sector || | 
|---|
| 404 | !bio_add_folio(bio: ctx->bio, folio, len: plen, off: poff)) { | 
|---|
| 405 | gfp_t gfp = mapping_gfp_constraint(mapping: folio->mapping, GFP_KERNEL); | 
|---|
| 406 | gfp_t orig_gfp = gfp; | 
|---|
| 407 | unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); | 
|---|
| 408 |  | 
|---|
| 409 | if (ctx->bio) | 
|---|
| 410 | submit_bio(bio: ctx->bio); | 
|---|
| 411 |  | 
|---|
| 412 | if (ctx->rac) /* same as readahead_gfp_mask */ | 
|---|
| 413 | gfp |= __GFP_NORETRY | __GFP_NOWARN; | 
|---|
| 414 | ctx->bio = bio_alloc(bdev: iomap->bdev, nr_vecs: bio_max_segs(nr_segs: nr_vecs), | 
|---|
| 415 | opf: REQ_OP_READ, gfp_mask: gfp); | 
|---|
| 416 | /* | 
|---|
| 417 | * If the bio_alloc fails, try it again for a single page to | 
|---|
| 418 | * avoid having to deal with partial page reads.  This emulates | 
|---|
| 419 | * what do_mpage_read_folio does. | 
|---|
| 420 | */ | 
|---|
| 421 | if (!ctx->bio) { | 
|---|
| 422 | ctx->bio = bio_alloc(bdev: iomap->bdev, nr_vecs: 1, opf: REQ_OP_READ, | 
|---|
| 423 | gfp_mask: orig_gfp); | 
|---|
| 424 | } | 
|---|
| 425 | if (ctx->rac) | 
|---|
| 426 | ctx->bio->bi_opf |= REQ_RAHEAD; | 
|---|
| 427 | ctx->bio->bi_iter.bi_sector = sector; | 
|---|
| 428 | ctx->bio->bi_end_io = iomap_read_end_io; | 
|---|
| 429 | bio_add_folio_nofail(bio: ctx->bio, folio, len: plen, off: poff); | 
|---|
| 430 | } | 
|---|
| 431 |  | 
|---|
| 432 | done: | 
|---|
| 433 | /* | 
|---|
| 434 | * Move the caller beyond our range so that it keeps making progress. | 
|---|
| 435 | * For that, we have to include any leading non-uptodate ranges, but | 
|---|
| 436 | * we can skip trailing ones as they will be handled in the next | 
|---|
| 437 | * iteration. | 
|---|
| 438 | */ | 
|---|
| 439 | length = pos - iter->pos + plen; | 
|---|
| 440 | return iomap_iter_advance(iter, count: &length); | 
|---|
| 441 | } | 
|---|
| 442 |  | 
|---|
| 443 | static int iomap_read_folio_iter(struct iomap_iter *iter, | 
|---|
| 444 | struct iomap_readpage_ctx *ctx) | 
|---|
| 445 | { | 
|---|
| 446 | int ret; | 
|---|
| 447 |  | 
|---|
| 448 | while (iomap_length(iter)) { | 
|---|
| 449 | ret = iomap_readpage_iter(iter, ctx); | 
|---|
| 450 | if (ret) | 
|---|
| 451 | return ret; | 
|---|
| 452 | } | 
|---|
| 453 |  | 
|---|
| 454 | return 0; | 
|---|
| 455 | } | 
|---|
| 456 |  | 
|---|
| 457 | int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) | 
|---|
| 458 | { | 
|---|
| 459 | struct iomap_iter iter = { | 
|---|
| 460 | .inode		= folio->mapping->host, | 
|---|
| 461 | .pos		= folio_pos(folio), | 
|---|
| 462 | .len		= folio_size(folio), | 
|---|
| 463 | }; | 
|---|
| 464 | struct iomap_readpage_ctx ctx = { | 
|---|
| 465 | .cur_folio	= folio, | 
|---|
| 466 | }; | 
|---|
| 467 | int ret; | 
|---|
| 468 |  | 
|---|
| 469 | trace_iomap_readpage(inode: iter.inode, nr_pages: 1); | 
|---|
| 470 |  | 
|---|
| 471 | while ((ret = iomap_iter(iter: &iter, ops)) > 0) | 
|---|
| 472 | iter.status = iomap_read_folio_iter(iter: &iter, ctx: &ctx); | 
|---|
| 473 |  | 
|---|
| 474 | if (ctx.bio) { | 
|---|
| 475 | submit_bio(bio: ctx.bio); | 
|---|
| 476 | WARN_ON_ONCE(!ctx.cur_folio_in_bio); | 
|---|
| 477 | } else { | 
|---|
| 478 | WARN_ON_ONCE(ctx.cur_folio_in_bio); | 
|---|
| 479 | folio_unlock(folio); | 
|---|
| 480 | } | 
|---|
| 481 |  | 
|---|
| 482 | /* | 
|---|
| 483 | * Just like mpage_readahead and block_read_full_folio, we always | 
|---|
| 484 | * return 0 and just set the folio error flag on errors.  This | 
|---|
| 485 | * should be cleaned up throughout the stack eventually. | 
|---|
| 486 | */ | 
|---|
| 487 | return 0; | 
|---|
| 488 | } | 
|---|
| 489 | EXPORT_SYMBOL_GPL(iomap_read_folio); | 
|---|
| 490 |  | 
|---|
| 491 | static int iomap_readahead_iter(struct iomap_iter *iter, | 
|---|
| 492 | struct iomap_readpage_ctx *ctx) | 
|---|
| 493 | { | 
|---|
| 494 | int ret; | 
|---|
| 495 |  | 
|---|
| 496 | while (iomap_length(iter)) { | 
|---|
| 497 | if (ctx->cur_folio && | 
|---|
| 498 | offset_in_folio(ctx->cur_folio, iter->pos) == 0) { | 
|---|
| 499 | if (!ctx->cur_folio_in_bio) | 
|---|
| 500 | folio_unlock(folio: ctx->cur_folio); | 
|---|
| 501 | ctx->cur_folio = NULL; | 
|---|
| 502 | } | 
|---|
| 503 | if (!ctx->cur_folio) { | 
|---|
| 504 | ctx->cur_folio = readahead_folio(ractl: ctx->rac); | 
|---|
| 505 | ctx->cur_folio_in_bio = false; | 
|---|
| 506 | } | 
|---|
| 507 | ret = iomap_readpage_iter(iter, ctx); | 
|---|
| 508 | if (ret) | 
|---|
| 509 | return ret; | 
|---|
| 510 | } | 
|---|
| 511 |  | 
|---|
| 512 | return 0; | 
|---|
| 513 | } | 
|---|
| 514 |  | 
|---|
| 515 | /** | 
|---|
| 516 | * iomap_readahead - Attempt to read pages from a file. | 
|---|
| 517 | * @rac: Describes the pages to be read. | 
|---|
| 518 | * @ops: The operations vector for the filesystem. | 
|---|
| 519 | * | 
|---|
| 520 | * This function is for filesystems to call to implement their readahead | 
|---|
| 521 | * address_space operation. | 
|---|
| 522 | * | 
|---|
| 523 | * Context: The @ops callbacks may submit I/O (eg to read the addresses of | 
|---|
| 524 | * blocks from disc), and may wait for it.  The caller may be trying to | 
|---|
| 525 | * access a different page, and so sleeping excessively should be avoided. | 
|---|
| 526 | * It may allocate memory, but should avoid costly allocations.  This | 
|---|
| 527 | * function is called with memalloc_nofs set, so allocations will not cause | 
|---|
| 528 | * the filesystem to be reentered. | 
|---|
| 529 | */ | 
|---|
| 530 | void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) | 
|---|
| 531 | { | 
|---|
| 532 | struct iomap_iter iter = { | 
|---|
| 533 | .inode	= rac->mapping->host, | 
|---|
| 534 | .pos	= readahead_pos(rac), | 
|---|
| 535 | .len	= readahead_length(rac), | 
|---|
| 536 | }; | 
|---|
| 537 | struct iomap_readpage_ctx ctx = { | 
|---|
| 538 | .rac	= rac, | 
|---|
| 539 | }; | 
|---|
| 540 |  | 
|---|
| 541 | trace_iomap_readahead(inode: rac->mapping->host, nr_pages: readahead_count(rac)); | 
|---|
| 542 |  | 
|---|
| 543 | while (iomap_iter(iter: &iter, ops) > 0) | 
|---|
| 544 | iter.status = iomap_readahead_iter(iter: &iter, ctx: &ctx); | 
|---|
| 545 |  | 
|---|
| 546 | if (ctx.bio) | 
|---|
| 547 | submit_bio(bio: ctx.bio); | 
|---|
| 548 | if (ctx.cur_folio) { | 
|---|
| 549 | if (!ctx.cur_folio_in_bio) | 
|---|
| 550 | folio_unlock(folio: ctx.cur_folio); | 
|---|
| 551 | } | 
|---|
| 552 | } | 
|---|
| 553 | EXPORT_SYMBOL_GPL(iomap_readahead); | 
|---|
| 554 |  | 
|---|
| 555 | static int iomap_read_folio_range(const struct iomap_iter *iter, | 
|---|
| 556 | struct folio *folio, loff_t pos, size_t len) | 
|---|
| 557 | { | 
|---|
| 558 | const struct iomap *srcmap = iomap_iter_srcmap(i: iter); | 
|---|
| 559 | struct bio_vec bvec; | 
|---|
| 560 | struct bio bio; | 
|---|
| 561 |  | 
|---|
| 562 | bio_init(bio: &bio, bdev: srcmap->bdev, table: &bvec, max_vecs: 1, opf: REQ_OP_READ); | 
|---|
| 563 | bio.bi_iter.bi_sector = iomap_sector(iomap: srcmap, pos); | 
|---|
| 564 | bio_add_folio_nofail(bio: &bio, folio, len, offset_in_folio(folio, pos)); | 
|---|
| 565 | return submit_bio_wait(bio: &bio); | 
|---|
| 566 | } | 
|---|
| 567 | #else | 
|---|
| 568 | static int iomap_read_folio_range(const struct iomap_iter *iter, | 
|---|
| 569 | struct folio *folio, loff_t pos, size_t len) | 
|---|
| 570 | { | 
|---|
| 571 | WARN_ON_ONCE(1); | 
|---|
| 572 | return -EIO; | 
|---|
| 573 | } | 
|---|
| 574 | #endif /* CONFIG_BLOCK */ | 
|---|
| 575 |  | 
|---|
| 576 | /* | 
|---|
| 577 | * iomap_is_partially_uptodate checks whether blocks within a folio are | 
|---|
| 578 | * uptodate or not. | 
|---|
| 579 | * | 
|---|
| 580 | * Returns true if all blocks which correspond to the specified part | 
|---|
| 581 | * of the folio are uptodate. | 
|---|
| 582 | */ | 
|---|
| 583 | bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) | 
|---|
| 584 | { | 
|---|
| 585 | struct iomap_folio_state *ifs = folio->private; | 
|---|
| 586 | struct inode *inode = folio->mapping->host; | 
|---|
| 587 | unsigned first, last, i; | 
|---|
| 588 |  | 
|---|
| 589 | if (!ifs) | 
|---|
| 590 | return false; | 
|---|
| 591 |  | 
|---|
| 592 | /* Caller's range may extend past the end of this folio */ | 
|---|
| 593 | count = min(folio_size(folio) - from, count); | 
|---|
| 594 |  | 
|---|
| 595 | /* First and last blocks in range within folio */ | 
|---|
| 596 | first = from >> inode->i_blkbits; | 
|---|
| 597 | last = (from + count - 1) >> inode->i_blkbits; | 
|---|
| 598 |  | 
|---|
| 599 | for (i = first; i <= last; i++) | 
|---|
| 600 | if (!ifs_block_is_uptodate(ifs, block: i)) | 
|---|
| 601 | return false; | 
|---|
| 602 | return true; | 
|---|
| 603 | } | 
|---|
| 604 | EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); | 
|---|
| 605 |  | 
|---|
| 606 | /** | 
|---|
| 607 | * iomap_get_folio - get a folio reference for writing | 
|---|
| 608 | * @iter: iteration structure | 
|---|
| 609 | * @pos: start offset of write | 
|---|
| 610 | * @len: Suggested size of folio to create. | 
|---|
| 611 | * | 
|---|
| 612 | * Returns a locked reference to the folio at @pos, or an error pointer if the | 
|---|
| 613 | * folio could not be obtained. | 
|---|
| 614 | */ | 
|---|
| 615 | struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) | 
|---|
| 616 | { | 
|---|
| 617 | fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS; | 
|---|
| 618 |  | 
|---|
| 619 | if (iter->flags & IOMAP_NOWAIT) | 
|---|
| 620 | fgp |= FGP_NOWAIT; | 
|---|
| 621 | if (iter->flags & IOMAP_DONTCACHE) | 
|---|
| 622 | fgp |= FGP_DONTCACHE; | 
|---|
| 623 | fgp |= fgf_set_order(size: len); | 
|---|
| 624 |  | 
|---|
| 625 | return __filemap_get_folio(mapping: iter->inode->i_mapping, index: pos >> PAGE_SHIFT, | 
|---|
| 626 | fgp_flags: fgp, gfp: mapping_gfp_mask(mapping: iter->inode->i_mapping)); | 
|---|
| 627 | } | 
|---|
| 628 | EXPORT_SYMBOL_GPL(iomap_get_folio); | 
|---|
| 629 |  | 
|---|
| 630 | bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) | 
|---|
| 631 | { | 
|---|
| 632 | trace_iomap_release_folio(inode: folio->mapping->host, off: folio_pos(folio), | 
|---|
| 633 | len: folio_size(folio)); | 
|---|
| 634 |  | 
|---|
| 635 | /* | 
|---|
| 636 | * If the folio is dirty, we refuse to release our metadata because | 
|---|
| 637 | * it may be partially dirty.  Once we track per-block dirty state, | 
|---|
| 638 | * we can release the metadata if every block is dirty. | 
|---|
| 639 | */ | 
|---|
| 640 | if (folio_test_dirty(folio)) | 
|---|
| 641 | return false; | 
|---|
| 642 | ifs_free(folio); | 
|---|
| 643 | return true; | 
|---|
| 644 | } | 
|---|
| 645 | EXPORT_SYMBOL_GPL(iomap_release_folio); | 
|---|
| 646 |  | 
|---|
| 647 | void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) | 
|---|
| 648 | { | 
|---|
| 649 | trace_iomap_invalidate_folio(inode: folio->mapping->host, | 
|---|
| 650 | off: folio_pos(folio) + offset, len); | 
|---|
| 651 |  | 
|---|
| 652 | /* | 
|---|
| 653 | * If we're invalidating the entire folio, clear the dirty state | 
|---|
| 654 | * from it and release it to avoid unnecessary buildup of the LRU. | 
|---|
| 655 | */ | 
|---|
| 656 | if (offset == 0 && len == folio_size(folio)) { | 
|---|
| 657 | WARN_ON_ONCE(folio_test_writeback(folio)); | 
|---|
| 658 | folio_cancel_dirty(folio); | 
|---|
| 659 | ifs_free(folio); | 
|---|
| 660 | } | 
|---|
| 661 | } | 
|---|
| 662 | EXPORT_SYMBOL_GPL(iomap_invalidate_folio); | 
|---|
| 663 |  | 
|---|
| 664 | bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio) | 
|---|
| 665 | { | 
|---|
| 666 | struct inode *inode = mapping->host; | 
|---|
| 667 | size_t len = folio_size(folio); | 
|---|
| 668 |  | 
|---|
| 669 | ifs_alloc(inode, folio, flags: 0); | 
|---|
| 670 | iomap_set_range_dirty(folio, off: 0, len); | 
|---|
| 671 | return filemap_dirty_folio(mapping, folio); | 
|---|
| 672 | } | 
|---|
| 673 | EXPORT_SYMBOL_GPL(iomap_dirty_folio); | 
|---|
| 674 |  | 
|---|
| 675 | static void | 
|---|
| 676 | iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) | 
|---|
| 677 | { | 
|---|
| 678 | loff_t i_size = i_size_read(inode); | 
|---|
| 679 |  | 
|---|
| 680 | /* | 
|---|
| 681 | * Only truncate newly allocated pages beyoned EOF, even if the | 
|---|
| 682 | * write started inside the existing inode size. | 
|---|
| 683 | */ | 
|---|
| 684 | if (pos + len > i_size) | 
|---|
| 685 | truncate_pagecache_range(inode, max(pos, i_size), | 
|---|
| 686 | end: pos + len - 1); | 
|---|
| 687 | } | 
|---|
| 688 |  | 
|---|
| 689 | static int __iomap_write_begin(const struct iomap_iter *iter, | 
|---|
| 690 | const struct iomap_write_ops *write_ops, size_t len, | 
|---|
| 691 | struct folio *folio) | 
|---|
| 692 | { | 
|---|
| 693 | struct iomap_folio_state *ifs; | 
|---|
| 694 | loff_t pos = iter->pos; | 
|---|
| 695 | loff_t block_size = i_blocksize(node: iter->inode); | 
|---|
| 696 | loff_t block_start = round_down(pos, block_size); | 
|---|
| 697 | loff_t block_end = round_up(pos + len, block_size); | 
|---|
| 698 | unsigned int nr_blocks = i_blocks_per_folio(inode: iter->inode, folio); | 
|---|
| 699 | size_t from = offset_in_folio(folio, pos), to = from + len; | 
|---|
| 700 | size_t poff, plen; | 
|---|
| 701 |  | 
|---|
| 702 | /* | 
|---|
| 703 | * If the write or zeroing completely overlaps the current folio, then | 
|---|
| 704 | * entire folio will be dirtied so there is no need for | 
|---|
| 705 | * per-block state tracking structures to be attached to this folio. | 
|---|
| 706 | * For the unshare case, we must read in the ondisk contents because we | 
|---|
| 707 | * are not changing pagecache contents. | 
|---|
| 708 | */ | 
|---|
| 709 | if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) && | 
|---|
| 710 | pos + len >= folio_pos(folio) + folio_size(folio)) | 
|---|
| 711 | return 0; | 
|---|
| 712 |  | 
|---|
| 713 | ifs = ifs_alloc(inode: iter->inode, folio, flags: iter->flags); | 
|---|
| 714 | if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1) | 
|---|
| 715 | return -EAGAIN; | 
|---|
| 716 |  | 
|---|
| 717 | if (folio_test_uptodate(folio)) | 
|---|
| 718 | return 0; | 
|---|
| 719 |  | 
|---|
| 720 | do { | 
|---|
| 721 | iomap_adjust_read_range(inode: iter->inode, folio, pos: &block_start, | 
|---|
| 722 | length: block_end - block_start, offp: &poff, lenp: &plen); | 
|---|
| 723 | if (plen == 0) | 
|---|
| 724 | break; | 
|---|
| 725 |  | 
|---|
| 726 | if (!(iter->flags & IOMAP_UNSHARE) && | 
|---|
| 727 | (from <= poff || from >= poff + plen) && | 
|---|
| 728 | (to <= poff || to >= poff + plen)) | 
|---|
| 729 | continue; | 
|---|
| 730 |  | 
|---|
| 731 | if (iomap_block_needs_zeroing(iter, pos: block_start)) { | 
|---|
| 732 | if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) | 
|---|
| 733 | return -EIO; | 
|---|
| 734 | folio_zero_segments(folio, start1: poff, xend1: from, start2: to, xend2: poff + plen); | 
|---|
| 735 | } else { | 
|---|
| 736 | int status; | 
|---|
| 737 |  | 
|---|
| 738 | if (iter->flags & IOMAP_NOWAIT) | 
|---|
| 739 | return -EAGAIN; | 
|---|
| 740 |  | 
|---|
| 741 | if (write_ops && write_ops->read_folio_range) | 
|---|
| 742 | status = write_ops->read_folio_range(iter, | 
|---|
| 743 | folio, block_start, plen); | 
|---|
| 744 | else | 
|---|
| 745 | status = iomap_read_folio_range(iter, | 
|---|
| 746 | folio, pos: block_start, len: plen); | 
|---|
| 747 | if (status) | 
|---|
| 748 | return status; | 
|---|
| 749 | } | 
|---|
| 750 | iomap_set_range_uptodate(folio, off: poff, len: plen); | 
|---|
| 751 | } while ((block_start += plen) < block_end); | 
|---|
| 752 |  | 
|---|
| 753 | return 0; | 
|---|
| 754 | } | 
|---|
| 755 |  | 
|---|
| 756 | static struct folio *__iomap_get_folio(struct iomap_iter *iter, | 
|---|
| 757 | const struct iomap_write_ops *write_ops, size_t len) | 
|---|
| 758 | { | 
|---|
| 759 | loff_t pos = iter->pos; | 
|---|
| 760 |  | 
|---|
| 761 | if (!mapping_large_folio_support(mapping: iter->inode->i_mapping)) | 
|---|
| 762 | len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); | 
|---|
| 763 |  | 
|---|
| 764 | if (write_ops && write_ops->get_folio) | 
|---|
| 765 | return write_ops->get_folio(iter, pos, len); | 
|---|
| 766 | return iomap_get_folio(iter, pos, len); | 
|---|
| 767 | } | 
|---|
| 768 |  | 
|---|
| 769 | static void __iomap_put_folio(struct iomap_iter *iter, | 
|---|
| 770 | const struct iomap_write_ops *write_ops, size_t ret, | 
|---|
| 771 | struct folio *folio) | 
|---|
| 772 | { | 
|---|
| 773 | loff_t pos = iter->pos; | 
|---|
| 774 |  | 
|---|
| 775 | if (write_ops && write_ops->put_folio) { | 
|---|
| 776 | write_ops->put_folio(iter->inode, pos, ret, folio); | 
|---|
| 777 | } else { | 
|---|
| 778 | folio_unlock(folio); | 
|---|
| 779 | folio_put(folio); | 
|---|
| 780 | } | 
|---|
| 781 | } | 
|---|
| 782 |  | 
|---|
| 783 | /* trim pos and bytes to within a given folio */ | 
|---|
| 784 | static loff_t iomap_trim_folio_range(struct iomap_iter *iter, | 
|---|
| 785 | struct folio *folio, size_t *offset, u64 *bytes) | 
|---|
| 786 | { | 
|---|
| 787 | loff_t pos = iter->pos; | 
|---|
| 788 | size_t fsize = folio_size(folio); | 
|---|
| 789 |  | 
|---|
| 790 | WARN_ON_ONCE(pos < folio_pos(folio)); | 
|---|
| 791 | WARN_ON_ONCE(pos >= folio_pos(folio) + fsize); | 
|---|
| 792 |  | 
|---|
| 793 | *offset = offset_in_folio(folio, pos); | 
|---|
| 794 | *bytes = min(*bytes, fsize - *offset); | 
|---|
| 795 |  | 
|---|
| 796 | return pos; | 
|---|
| 797 | } | 
|---|
| 798 |  | 
|---|
| 799 | static int iomap_write_begin_inline(const struct iomap_iter *iter, | 
|---|
| 800 | struct folio *folio) | 
|---|
| 801 | { | 
|---|
| 802 | /* needs more work for the tailpacking case; disable for now */ | 
|---|
| 803 | if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) | 
|---|
| 804 | return -EIO; | 
|---|
| 805 | return iomap_read_inline_data(iter, folio); | 
|---|
| 806 | } | 
|---|
| 807 |  | 
|---|
| 808 | /* | 
|---|
| 809 | * Grab and prepare a folio for write based on iter state. Returns the folio, | 
|---|
| 810 | * offset, and length. Callers can optionally pass a max length *plen, | 
|---|
| 811 | * otherwise init to zero. | 
|---|
| 812 | */ | 
|---|
| 813 | static int iomap_write_begin(struct iomap_iter *iter, | 
|---|
| 814 | const struct iomap_write_ops *write_ops, struct folio **foliop, | 
|---|
| 815 | size_t *poffset, u64 *plen) | 
|---|
| 816 | { | 
|---|
| 817 | const struct iomap *srcmap = iomap_iter_srcmap(i: iter); | 
|---|
| 818 | loff_t pos = iter->pos; | 
|---|
| 819 | u64 len = min_t(u64, SIZE_MAX, iomap_length(iter)); | 
|---|
| 820 | struct folio *folio; | 
|---|
| 821 | int status = 0; | 
|---|
| 822 |  | 
|---|
| 823 | len = min_not_zero(len, *plen); | 
|---|
| 824 | BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); | 
|---|
| 825 | if (srcmap != &iter->iomap) | 
|---|
| 826 | BUG_ON(pos + len > srcmap->offset + srcmap->length); | 
|---|
| 827 |  | 
|---|
| 828 | if (fatal_signal_pending(current)) | 
|---|
| 829 | return -EINTR; | 
|---|
| 830 |  | 
|---|
| 831 | folio = __iomap_get_folio(iter, write_ops, len); | 
|---|
| 832 | if (IS_ERR(ptr: folio)) | 
|---|
| 833 | return PTR_ERR(ptr: folio); | 
|---|
| 834 |  | 
|---|
| 835 | /* | 
|---|
| 836 | * Now we have a locked folio, before we do anything with it we need to | 
|---|
| 837 | * check that the iomap we have cached is not stale. The inode extent | 
|---|
| 838 | * mapping can change due to concurrent IO in flight (e.g. | 
|---|
| 839 | * IOMAP_UNWRITTEN state can change and memory reclaim could have | 
|---|
| 840 | * reclaimed a previously partially written page at this index after IO | 
|---|
| 841 | * completion before this write reaches this file offset) and hence we | 
|---|
| 842 | * could do the wrong thing here (zero a page range incorrectly or fail | 
|---|
| 843 | * to zero) and corrupt data. | 
|---|
| 844 | */ | 
|---|
| 845 | if (write_ops && write_ops->iomap_valid) { | 
|---|
| 846 | bool iomap_valid = write_ops->iomap_valid(iter->inode, | 
|---|
| 847 | &iter->iomap); | 
|---|
| 848 | if (!iomap_valid) { | 
|---|
| 849 | iter->iomap.flags |= IOMAP_F_STALE; | 
|---|
| 850 | status = 0; | 
|---|
| 851 | goto out_unlock; | 
|---|
| 852 | } | 
|---|
| 853 | } | 
|---|
| 854 |  | 
|---|
| 855 | pos = iomap_trim_folio_range(iter, folio, offset: poffset, bytes: &len); | 
|---|
| 856 |  | 
|---|
| 857 | if (srcmap->type == IOMAP_INLINE) | 
|---|
| 858 | status = iomap_write_begin_inline(iter, folio); | 
|---|
| 859 | else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) | 
|---|
| 860 | status = __block_write_begin_int(folio, pos, len, NULL, iomap: srcmap); | 
|---|
| 861 | else | 
|---|
| 862 | status = __iomap_write_begin(iter, write_ops, len, folio); | 
|---|
| 863 |  | 
|---|
| 864 | if (unlikely(status)) | 
|---|
| 865 | goto out_unlock; | 
|---|
| 866 |  | 
|---|
| 867 | *foliop = folio; | 
|---|
| 868 | *plen = len; | 
|---|
| 869 | return 0; | 
|---|
| 870 |  | 
|---|
| 871 | out_unlock: | 
|---|
| 872 | __iomap_put_folio(iter, write_ops, ret: 0, folio); | 
|---|
| 873 | return status; | 
|---|
| 874 | } | 
|---|
| 875 |  | 
|---|
| 876 | static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len, | 
|---|
| 877 | size_t copied, struct folio *folio) | 
|---|
| 878 | { | 
|---|
| 879 | flush_dcache_folio(folio); | 
|---|
| 880 |  | 
|---|
| 881 | /* | 
|---|
| 882 | * The blocks that were entirely written will now be uptodate, so we | 
|---|
| 883 | * don't have to worry about a read_folio reading them and overwriting a | 
|---|
| 884 | * partial write.  However, if we've encountered a short write and only | 
|---|
| 885 | * partially written into a block, it will not be marked uptodate, so a | 
|---|
| 886 | * read_folio might come in and destroy our partial write. | 
|---|
| 887 | * | 
|---|
| 888 | * Do the simplest thing and just treat any short write to a | 
|---|
| 889 | * non-uptodate page as a zero-length write, and force the caller to | 
|---|
| 890 | * redo the whole thing. | 
|---|
| 891 | */ | 
|---|
| 892 | if (unlikely(copied < len && !folio_test_uptodate(folio))) | 
|---|
| 893 | return false; | 
|---|
| 894 | iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len); | 
|---|
| 895 | iomap_set_range_dirty(folio, offset_in_folio(folio, pos), len: copied); | 
|---|
| 896 | filemap_dirty_folio(mapping: inode->i_mapping, folio); | 
|---|
| 897 | return true; | 
|---|
| 898 | } | 
|---|
| 899 |  | 
|---|
| 900 | static bool iomap_write_end_inline(const struct iomap_iter *iter, | 
|---|
| 901 | struct folio *folio, loff_t pos, size_t copied) | 
|---|
| 902 | { | 
|---|
| 903 | const struct iomap *iomap = &iter->iomap; | 
|---|
| 904 | void *addr; | 
|---|
| 905 |  | 
|---|
| 906 | WARN_ON_ONCE(!folio_test_uptodate(folio)); | 
|---|
| 907 | BUG_ON(!iomap_inline_data_valid(iomap)); | 
|---|
| 908 |  | 
|---|
| 909 | if (WARN_ON_ONCE(!iomap->inline_data)) | 
|---|
| 910 | return false; | 
|---|
| 911 |  | 
|---|
| 912 | flush_dcache_folio(folio); | 
|---|
| 913 | addr = kmap_local_folio(folio, offset: pos); | 
|---|
| 914 | memcpy(to: iomap_inline_data(iomap, pos), from: addr, len: copied); | 
|---|
| 915 | kunmap_local(addr); | 
|---|
| 916 |  | 
|---|
| 917 | mark_inode_dirty(inode: iter->inode); | 
|---|
| 918 | return true; | 
|---|
| 919 | } | 
|---|
| 920 |  | 
|---|
| 921 | /* | 
|---|
| 922 | * Returns true if all copied bytes have been written to the pagecache, | 
|---|
| 923 | * otherwise return false. | 
|---|
| 924 | */ | 
|---|
| 925 | static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied, | 
|---|
| 926 | struct folio *folio) | 
|---|
| 927 | { | 
|---|
| 928 | const struct iomap *srcmap = iomap_iter_srcmap(i: iter); | 
|---|
| 929 | loff_t pos = iter->pos; | 
|---|
| 930 |  | 
|---|
| 931 | if (srcmap->type == IOMAP_INLINE) | 
|---|
| 932 | return iomap_write_end_inline(iter, folio, pos, copied); | 
|---|
| 933 |  | 
|---|
| 934 | if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { | 
|---|
| 935 | size_t bh_written; | 
|---|
| 936 |  | 
|---|
| 937 | bh_written = block_write_end(pos, len, copied, folio); | 
|---|
| 938 | WARN_ON_ONCE(bh_written != copied && bh_written != 0); | 
|---|
| 939 | return bh_written == copied; | 
|---|
| 940 | } | 
|---|
| 941 |  | 
|---|
| 942 | return __iomap_write_end(inode: iter->inode, pos, len, copied, folio); | 
|---|
| 943 | } | 
|---|
| 944 |  | 
|---|
| 945 | static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i, | 
|---|
| 946 | const struct iomap_write_ops *write_ops) | 
|---|
| 947 | { | 
|---|
| 948 | ssize_t total_written = 0; | 
|---|
| 949 | int status = 0; | 
|---|
| 950 | struct address_space *mapping = iter->inode->i_mapping; | 
|---|
| 951 | size_t chunk = mapping_max_folio_size(mapping); | 
|---|
| 952 | unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0; | 
|---|
| 953 |  | 
|---|
| 954 | do { | 
|---|
| 955 | struct folio *folio; | 
|---|
| 956 | loff_t old_size; | 
|---|
| 957 | size_t offset;		/* Offset into folio */ | 
|---|
| 958 | u64 bytes;		/* Bytes to write to folio */ | 
|---|
| 959 | size_t copied;		/* Bytes copied from user */ | 
|---|
| 960 | u64 written;		/* Bytes have been written */ | 
|---|
| 961 | loff_t pos; | 
|---|
| 962 |  | 
|---|
| 963 | bytes = iov_iter_count(i); | 
|---|
| 964 | retry: | 
|---|
| 965 | offset = iter->pos & (chunk - 1); | 
|---|
| 966 | bytes = min(chunk - offset, bytes); | 
|---|
| 967 | status = balance_dirty_pages_ratelimited_flags(mapping, | 
|---|
| 968 | flags: bdp_flags); | 
|---|
| 969 | if (unlikely(status)) | 
|---|
| 970 | break; | 
|---|
| 971 |  | 
|---|
| 972 | if (bytes > iomap_length(iter)) | 
|---|
| 973 | bytes = iomap_length(iter); | 
|---|
| 974 |  | 
|---|
| 975 | /* | 
|---|
| 976 | * Bring in the user page that we'll copy from _first_. | 
|---|
| 977 | * Otherwise there's a nasty deadlock on copying from the | 
|---|
| 978 | * same page as we're writing to, without it being marked | 
|---|
| 979 | * up-to-date. | 
|---|
| 980 | * | 
|---|
| 981 | * For async buffered writes the assumption is that the user | 
|---|
| 982 | * page has already been faulted in. This can be optimized by | 
|---|
| 983 | * faulting the user page. | 
|---|
| 984 | */ | 
|---|
| 985 | if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { | 
|---|
| 986 | status = -EFAULT; | 
|---|
| 987 | break; | 
|---|
| 988 | } | 
|---|
| 989 |  | 
|---|
| 990 | status = iomap_write_begin(iter, write_ops, foliop: &folio, poffset: &offset, | 
|---|
| 991 | plen: &bytes); | 
|---|
| 992 | if (unlikely(status)) { | 
|---|
| 993 | iomap_write_failed(inode: iter->inode, pos: iter->pos, len: bytes); | 
|---|
| 994 | break; | 
|---|
| 995 | } | 
|---|
| 996 | if (iter->iomap.flags & IOMAP_F_STALE) | 
|---|
| 997 | break; | 
|---|
| 998 |  | 
|---|
| 999 | pos = iter->pos; | 
|---|
| 1000 |  | 
|---|
| 1001 | if (mapping_writably_mapped(mapping)) | 
|---|
| 1002 | flush_dcache_folio(folio); | 
|---|
| 1003 |  | 
|---|
| 1004 | copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); | 
|---|
| 1005 | written = iomap_write_end(iter, len: bytes, copied, folio) ? | 
|---|
| 1006 | copied : 0; | 
|---|
| 1007 |  | 
|---|
| 1008 | /* | 
|---|
| 1009 | * Update the in-memory inode size after copying the data into | 
|---|
| 1010 | * the page cache.  It's up to the file system to write the | 
|---|
| 1011 | * updated size to disk, preferably after I/O completion so that | 
|---|
| 1012 | * no stale data is exposed.  Only once that's done can we | 
|---|
| 1013 | * unlock and release the folio. | 
|---|
| 1014 | */ | 
|---|
| 1015 | old_size = iter->inode->i_size; | 
|---|
| 1016 | if (pos + written > old_size) { | 
|---|
| 1017 | i_size_write(inode: iter->inode, i_size: pos + written); | 
|---|
| 1018 | iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; | 
|---|
| 1019 | } | 
|---|
| 1020 | __iomap_put_folio(iter, write_ops, ret: written, folio); | 
|---|
| 1021 |  | 
|---|
| 1022 | if (old_size < pos) | 
|---|
| 1023 | pagecache_isize_extended(inode: iter->inode, from: old_size, to: pos); | 
|---|
| 1024 |  | 
|---|
| 1025 | cond_resched(); | 
|---|
| 1026 | if (unlikely(written == 0)) { | 
|---|
| 1027 | /* | 
|---|
| 1028 | * A short copy made iomap_write_end() reject the | 
|---|
| 1029 | * thing entirely.  Might be memory poisoning | 
|---|
| 1030 | * halfway through, might be a race with munmap, | 
|---|
| 1031 | * might be severe memory pressure. | 
|---|
| 1032 | */ | 
|---|
| 1033 | iomap_write_failed(inode: iter->inode, pos, len: bytes); | 
|---|
| 1034 | iov_iter_revert(i, bytes: copied); | 
|---|
| 1035 |  | 
|---|
| 1036 | if (chunk > PAGE_SIZE) | 
|---|
| 1037 | chunk /= 2; | 
|---|
| 1038 | if (copied) { | 
|---|
| 1039 | bytes = copied; | 
|---|
| 1040 | goto retry; | 
|---|
| 1041 | } | 
|---|
| 1042 | } else { | 
|---|
| 1043 | total_written += written; | 
|---|
| 1044 | iomap_iter_advance(iter, count: &written); | 
|---|
| 1045 | } | 
|---|
| 1046 | } while (iov_iter_count(i) && iomap_length(iter)); | 
|---|
| 1047 |  | 
|---|
| 1048 | return total_written ? 0 : status; | 
|---|
| 1049 | } | 
|---|
| 1050 |  | 
|---|
| 1051 | ssize_t | 
|---|
| 1052 | iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, | 
|---|
| 1053 | const struct iomap_ops *ops, | 
|---|
| 1054 | const struct iomap_write_ops *write_ops, void *private) | 
|---|
| 1055 | { | 
|---|
| 1056 | struct iomap_iter iter = { | 
|---|
| 1057 | .inode		= iocb->ki_filp->f_mapping->host, | 
|---|
| 1058 | .pos		= iocb->ki_pos, | 
|---|
| 1059 | .len		= iov_iter_count(i), | 
|---|
| 1060 | .flags		= IOMAP_WRITE, | 
|---|
| 1061 | .private	= private, | 
|---|
| 1062 | }; | 
|---|
| 1063 | ssize_t ret; | 
|---|
| 1064 |  | 
|---|
| 1065 | if (iocb->ki_flags & IOCB_NOWAIT) | 
|---|
| 1066 | iter.flags |= IOMAP_NOWAIT; | 
|---|
| 1067 | if (iocb->ki_flags & IOCB_DONTCACHE) | 
|---|
| 1068 | iter.flags |= IOMAP_DONTCACHE; | 
|---|
| 1069 |  | 
|---|
| 1070 | while ((ret = iomap_iter(iter: &iter, ops)) > 0) | 
|---|
| 1071 | iter.status = iomap_write_iter(iter: &iter, i, write_ops); | 
|---|
| 1072 |  | 
|---|
| 1073 | if (unlikely(iter.pos == iocb->ki_pos)) | 
|---|
| 1074 | return ret; | 
|---|
| 1075 | ret = iter.pos - iocb->ki_pos; | 
|---|
| 1076 | iocb->ki_pos = iter.pos; | 
|---|
| 1077 | return ret; | 
|---|
| 1078 | } | 
|---|
| 1079 | EXPORT_SYMBOL_GPL(iomap_file_buffered_write); | 
|---|
| 1080 |  | 
|---|
| 1081 | static void iomap_write_delalloc_ifs_punch(struct inode *inode, | 
|---|
| 1082 | struct folio *folio, loff_t start_byte, loff_t end_byte, | 
|---|
| 1083 | struct iomap *iomap, iomap_punch_t punch) | 
|---|
| 1084 | { | 
|---|
| 1085 | unsigned int first_blk, last_blk, i; | 
|---|
| 1086 | loff_t last_byte; | 
|---|
| 1087 | u8 blkbits = inode->i_blkbits; | 
|---|
| 1088 | struct iomap_folio_state *ifs; | 
|---|
| 1089 |  | 
|---|
| 1090 | /* | 
|---|
| 1091 | * When we have per-block dirty tracking, there can be | 
|---|
| 1092 | * blocks within a folio which are marked uptodate | 
|---|
| 1093 | * but not dirty. In that case it is necessary to punch | 
|---|
| 1094 | * out such blocks to avoid leaking any delalloc blocks. | 
|---|
| 1095 | */ | 
|---|
| 1096 | ifs = folio->private; | 
|---|
| 1097 | if (!ifs) | 
|---|
| 1098 | return; | 
|---|
| 1099 |  | 
|---|
| 1100 | last_byte = min_t(loff_t, end_byte - 1, | 
|---|
| 1101 | folio_pos(folio) + folio_size(folio) - 1); | 
|---|
| 1102 | first_blk = offset_in_folio(folio, start_byte) >> blkbits; | 
|---|
| 1103 | last_blk = offset_in_folio(folio, last_byte) >> blkbits; | 
|---|
| 1104 | for (i = first_blk; i <= last_blk; i++) { | 
|---|
| 1105 | if (!ifs_block_is_dirty(folio, ifs, block: i)) | 
|---|
| 1106 | punch(inode, folio_pos(folio) + (i << blkbits), | 
|---|
| 1107 | 1 << blkbits, iomap); | 
|---|
| 1108 | } | 
|---|
| 1109 | } | 
|---|
| 1110 |  | 
|---|
| 1111 | static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio, | 
|---|
| 1112 | loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, | 
|---|
| 1113 | struct iomap *iomap, iomap_punch_t punch) | 
|---|
| 1114 | { | 
|---|
| 1115 | if (!folio_test_dirty(folio)) | 
|---|
| 1116 | return; | 
|---|
| 1117 |  | 
|---|
| 1118 | /* if dirty, punch up to offset */ | 
|---|
| 1119 | if (start_byte > *punch_start_byte) { | 
|---|
| 1120 | punch(inode, *punch_start_byte, start_byte - *punch_start_byte, | 
|---|
| 1121 | iomap); | 
|---|
| 1122 | } | 
|---|
| 1123 |  | 
|---|
| 1124 | /* Punch non-dirty blocks within folio */ | 
|---|
| 1125 | iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte, | 
|---|
| 1126 | iomap, punch); | 
|---|
| 1127 |  | 
|---|
| 1128 | /* | 
|---|
| 1129 | * Make sure the next punch start is correctly bound to | 
|---|
| 1130 | * the end of this data range, not the end of the folio. | 
|---|
| 1131 | */ | 
|---|
| 1132 | *punch_start_byte = min_t(loff_t, end_byte, | 
|---|
| 1133 | folio_pos(folio) + folio_size(folio)); | 
|---|
| 1134 | } | 
|---|
| 1135 |  | 
|---|
| 1136 | /* | 
|---|
| 1137 | * Scan the data range passed to us for dirty page cache folios. If we find a | 
|---|
| 1138 | * dirty folio, punch out the preceding range and update the offset from which | 
|---|
| 1139 | * the next punch will start from. | 
|---|
| 1140 | * | 
|---|
| 1141 | * We can punch out storage reservations under clean pages because they either | 
|---|
| 1142 | * contain data that has been written back - in which case the delalloc punch | 
|---|
| 1143 | * over that range is a no-op - or they have been read faults in which case they | 
|---|
| 1144 | * contain zeroes and we can remove the delalloc backing range and any new | 
|---|
| 1145 | * writes to those pages will do the normal hole filling operation... | 
|---|
| 1146 | * | 
|---|
| 1147 | * This makes the logic simple: we only need to keep the delalloc extents only | 
|---|
| 1148 | * over the dirty ranges of the page cache. | 
|---|
| 1149 | * | 
|---|
| 1150 | * This function uses [start_byte, end_byte) intervals (i.e. open ended) to | 
|---|
| 1151 | * simplify range iterations. | 
|---|
| 1152 | */ | 
|---|
| 1153 | static void iomap_write_delalloc_scan(struct inode *inode, | 
|---|
| 1154 | loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, | 
|---|
| 1155 | struct iomap *iomap, iomap_punch_t punch) | 
|---|
| 1156 | { | 
|---|
| 1157 | while (start_byte < end_byte) { | 
|---|
| 1158 | struct folio	*folio; | 
|---|
| 1159 |  | 
|---|
| 1160 | /* grab locked page */ | 
|---|
| 1161 | folio = filemap_lock_folio(mapping: inode->i_mapping, | 
|---|
| 1162 | index: start_byte >> PAGE_SHIFT); | 
|---|
| 1163 | if (IS_ERR(ptr: folio)) { | 
|---|
| 1164 | start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) + | 
|---|
| 1165 | PAGE_SIZE; | 
|---|
| 1166 | continue; | 
|---|
| 1167 | } | 
|---|
| 1168 |  | 
|---|
| 1169 | iomap_write_delalloc_punch(inode, folio, punch_start_byte, | 
|---|
| 1170 | start_byte, end_byte, iomap, punch); | 
|---|
| 1171 |  | 
|---|
| 1172 | /* move offset to start of next folio in range */ | 
|---|
| 1173 | start_byte = folio_pos(folio) + folio_size(folio); | 
|---|
| 1174 | folio_unlock(folio); | 
|---|
| 1175 | folio_put(folio); | 
|---|
| 1176 | } | 
|---|
| 1177 | } | 
|---|
| 1178 |  | 
|---|
| 1179 | /* | 
|---|
| 1180 | * When a short write occurs, the filesystem might need to use ->iomap_end | 
|---|
| 1181 | * to remove space reservations created in ->iomap_begin. | 
|---|
| 1182 | * | 
|---|
| 1183 | * For filesystems that use delayed allocation, there can be dirty pages over | 
|---|
| 1184 | * the delalloc extent outside the range of a short write but still within the | 
|---|
| 1185 | * delalloc extent allocated for this iomap if the write raced with page | 
|---|
| 1186 | * faults. | 
|---|
| 1187 | * | 
|---|
| 1188 | * Punch out all the delalloc blocks in the range given except for those that | 
|---|
| 1189 | * have dirty data still pending in the page cache - those are going to be | 
|---|
| 1190 | * written and so must still retain the delalloc backing for writeback. | 
|---|
| 1191 | * | 
|---|
| 1192 | * The punch() callback *must* only punch delalloc extents in the range passed | 
|---|
| 1193 | * to it. It must skip over all other types of extents in the range and leave | 
|---|
| 1194 | * them completely unchanged. It must do this punch atomically with respect to | 
|---|
| 1195 | * other extent modifications. | 
|---|
| 1196 | * | 
|---|
| 1197 | * The punch() callback may be called with a folio locked to prevent writeback | 
|---|
| 1198 | * extent allocation racing at the edge of the range we are currently punching. | 
|---|
| 1199 | * The locked folio may or may not cover the range being punched, so it is not | 
|---|
| 1200 | * safe for the punch() callback to lock folios itself. | 
|---|
| 1201 | * | 
|---|
| 1202 | * Lock order is: | 
|---|
| 1203 | * | 
|---|
| 1204 | * inode->i_rwsem (shared or exclusive) | 
|---|
| 1205 | *   inode->i_mapping->invalidate_lock (exclusive) | 
|---|
| 1206 | *     folio_lock() | 
|---|
| 1207 | *       ->punch | 
|---|
| 1208 | *         internal filesystem allocation lock | 
|---|
| 1209 | * | 
|---|
| 1210 | * As we are scanning the page cache for data, we don't need to reimplement the | 
|---|
| 1211 | * wheel - mapping_seek_hole_data() does exactly what we need to identify the | 
|---|
| 1212 | * start and end of data ranges correctly even for sub-folio block sizes. This | 
|---|
| 1213 | * byte range based iteration is especially convenient because it means we | 
|---|
| 1214 | * don't have to care about variable size folios, nor where the start or end of | 
|---|
| 1215 | * the data range lies within a folio, if they lie within the same folio or even | 
|---|
| 1216 | * if there are multiple discontiguous data ranges within the folio. | 
|---|
| 1217 | * | 
|---|
| 1218 | * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so | 
|---|
| 1219 | * can return data ranges that exist in the cache beyond EOF. e.g. a page fault | 
|---|
| 1220 | * spanning EOF will initialise the post-EOF data to zeroes and mark it up to | 
|---|
| 1221 | * date. A write page fault can then mark it dirty. If we then fail a write() | 
|---|
| 1222 | * beyond EOF into that up to date cached range, we allocate a delalloc block | 
|---|
| 1223 | * beyond EOF and then have to punch it out. Because the range is up to date, | 
|---|
| 1224 | * mapping_seek_hole_data() will return it, and we will skip the punch because | 
|---|
| 1225 | * the folio is dirty. THis is incorrect - we always need to punch out delalloc | 
|---|
| 1226 | * beyond EOF in this case as writeback will never write back and covert that | 
|---|
| 1227 | * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF, | 
|---|
| 1228 | * resulting in always punching out the range from the EOF to the end of the | 
|---|
| 1229 | * range the iomap spans. | 
|---|
| 1230 | * | 
|---|
| 1231 | * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it | 
|---|
| 1232 | * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA | 
|---|
| 1233 | * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte) | 
|---|
| 1234 | * returns the end of the data range (data_end). Using closed intervals would | 
|---|
| 1235 | * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose | 
|---|
| 1236 | * the code to subtle off-by-one bugs.... | 
|---|
| 1237 | */ | 
|---|
| 1238 | void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte, | 
|---|
| 1239 | loff_t end_byte, unsigned flags, struct iomap *iomap, | 
|---|
| 1240 | iomap_punch_t punch) | 
|---|
| 1241 | { | 
|---|
| 1242 | loff_t punch_start_byte = start_byte; | 
|---|
| 1243 | loff_t scan_end_byte = min(i_size_read(inode), end_byte); | 
|---|
| 1244 |  | 
|---|
| 1245 | /* | 
|---|
| 1246 | * The caller must hold invalidate_lock to avoid races with page faults | 
|---|
| 1247 | * re-instantiating folios and dirtying them via ->page_mkwrite whilst | 
|---|
| 1248 | * we walk the cache and perform delalloc extent removal.  Failing to do | 
|---|
| 1249 | * this can leave dirty pages with no space reservation in the cache. | 
|---|
| 1250 | */ | 
|---|
| 1251 | lockdep_assert_held_write(&inode->i_mapping->invalidate_lock); | 
|---|
| 1252 |  | 
|---|
| 1253 | while (start_byte < scan_end_byte) { | 
|---|
| 1254 | loff_t		data_end; | 
|---|
| 1255 |  | 
|---|
| 1256 | start_byte = mapping_seek_hole_data(inode->i_mapping, | 
|---|
| 1257 | start: start_byte, end: scan_end_byte, SEEK_DATA); | 
|---|
| 1258 | /* | 
|---|
| 1259 | * If there is no more data to scan, all that is left is to | 
|---|
| 1260 | * punch out the remaining range. | 
|---|
| 1261 | * | 
|---|
| 1262 | * Note that mapping_seek_hole_data is only supposed to return | 
|---|
| 1263 | * either an offset or -ENXIO, so WARN on any other error as | 
|---|
| 1264 | * that would be an API change without updating the callers. | 
|---|
| 1265 | */ | 
|---|
| 1266 | if (start_byte == -ENXIO || start_byte == scan_end_byte) | 
|---|
| 1267 | break; | 
|---|
| 1268 | if (WARN_ON_ONCE(start_byte < 0)) | 
|---|
| 1269 | return; | 
|---|
| 1270 | WARN_ON_ONCE(start_byte < punch_start_byte); | 
|---|
| 1271 | WARN_ON_ONCE(start_byte > scan_end_byte); | 
|---|
| 1272 |  | 
|---|
| 1273 | /* | 
|---|
| 1274 | * We find the end of this contiguous cached data range by | 
|---|
| 1275 | * seeking from start_byte to the beginning of the next hole. | 
|---|
| 1276 | */ | 
|---|
| 1277 | data_end = mapping_seek_hole_data(inode->i_mapping, start: start_byte, | 
|---|
| 1278 | end: scan_end_byte, SEEK_HOLE); | 
|---|
| 1279 | if (WARN_ON_ONCE(data_end < 0)) | 
|---|
| 1280 | return; | 
|---|
| 1281 |  | 
|---|
| 1282 | /* | 
|---|
| 1283 | * If we race with post-direct I/O invalidation of the page cache, | 
|---|
| 1284 | * there might be no data left at start_byte. | 
|---|
| 1285 | */ | 
|---|
| 1286 | if (data_end == start_byte) | 
|---|
| 1287 | continue; | 
|---|
| 1288 |  | 
|---|
| 1289 | WARN_ON_ONCE(data_end < start_byte); | 
|---|
| 1290 | WARN_ON_ONCE(data_end > scan_end_byte); | 
|---|
| 1291 |  | 
|---|
| 1292 | iomap_write_delalloc_scan(inode, punch_start_byte: &punch_start_byte, start_byte, | 
|---|
| 1293 | end_byte: data_end, iomap, punch); | 
|---|
| 1294 |  | 
|---|
| 1295 | /* The next data search starts at the end of this one. */ | 
|---|
| 1296 | start_byte = data_end; | 
|---|
| 1297 | } | 
|---|
| 1298 |  | 
|---|
| 1299 | if (punch_start_byte < end_byte) | 
|---|
| 1300 | punch(inode, punch_start_byte, end_byte - punch_start_byte, | 
|---|
| 1301 | iomap); | 
|---|
| 1302 | } | 
|---|
| 1303 | EXPORT_SYMBOL_GPL(iomap_write_delalloc_release); | 
|---|
| 1304 |  | 
|---|
| 1305 | static int iomap_unshare_iter(struct iomap_iter *iter, | 
|---|
| 1306 | const struct iomap_write_ops *write_ops) | 
|---|
| 1307 | { | 
|---|
| 1308 | struct iomap *iomap = &iter->iomap; | 
|---|
| 1309 | u64 bytes = iomap_length(iter); | 
|---|
| 1310 | int status; | 
|---|
| 1311 |  | 
|---|
| 1312 | if (!iomap_want_unshare_iter(iter)) | 
|---|
| 1313 | return iomap_iter_advance(iter, count: &bytes); | 
|---|
| 1314 |  | 
|---|
| 1315 | do { | 
|---|
| 1316 | struct folio *folio; | 
|---|
| 1317 | size_t offset; | 
|---|
| 1318 | bool ret; | 
|---|
| 1319 |  | 
|---|
| 1320 | bytes = min_t(u64, SIZE_MAX, bytes); | 
|---|
| 1321 | status = iomap_write_begin(iter, write_ops, foliop: &folio, poffset: &offset, | 
|---|
| 1322 | plen: &bytes); | 
|---|
| 1323 | if (unlikely(status)) | 
|---|
| 1324 | return status; | 
|---|
| 1325 | if (iomap->flags & IOMAP_F_STALE) | 
|---|
| 1326 | break; | 
|---|
| 1327 |  | 
|---|
| 1328 | ret = iomap_write_end(iter, len: bytes, copied: bytes, folio); | 
|---|
| 1329 | __iomap_put_folio(iter, write_ops, ret: bytes, folio); | 
|---|
| 1330 | if (WARN_ON_ONCE(!ret)) | 
|---|
| 1331 | return -EIO; | 
|---|
| 1332 |  | 
|---|
| 1333 | cond_resched(); | 
|---|
| 1334 |  | 
|---|
| 1335 | balance_dirty_pages_ratelimited(mapping: iter->inode->i_mapping); | 
|---|
| 1336 |  | 
|---|
| 1337 | status = iomap_iter_advance(iter, count: &bytes); | 
|---|
| 1338 | if (status) | 
|---|
| 1339 | break; | 
|---|
| 1340 | } while (bytes > 0); | 
|---|
| 1341 |  | 
|---|
| 1342 | return status; | 
|---|
| 1343 | } | 
|---|
| 1344 |  | 
|---|
| 1345 | int | 
|---|
| 1346 | iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, | 
|---|
| 1347 | const struct iomap_ops *ops, | 
|---|
| 1348 | const struct iomap_write_ops *write_ops) | 
|---|
| 1349 | { | 
|---|
| 1350 | struct iomap_iter iter = { | 
|---|
| 1351 | .inode		= inode, | 
|---|
| 1352 | .pos		= pos, | 
|---|
| 1353 | .flags		= IOMAP_WRITE | IOMAP_UNSHARE, | 
|---|
| 1354 | }; | 
|---|
| 1355 | loff_t size = i_size_read(inode); | 
|---|
| 1356 | int ret; | 
|---|
| 1357 |  | 
|---|
| 1358 | if (pos < 0 || pos >= size) | 
|---|
| 1359 | return 0; | 
|---|
| 1360 |  | 
|---|
| 1361 | iter.len = min(len, size - pos); | 
|---|
| 1362 | while ((ret = iomap_iter(iter: &iter, ops)) > 0) | 
|---|
| 1363 | iter.status = iomap_unshare_iter(iter: &iter, write_ops); | 
|---|
| 1364 | return ret; | 
|---|
| 1365 | } | 
|---|
| 1366 | EXPORT_SYMBOL_GPL(iomap_file_unshare); | 
|---|
| 1367 |  | 
|---|
| 1368 | /* | 
|---|
| 1369 | * Flush the remaining range of the iter and mark the current mapping stale. | 
|---|
| 1370 | * This is used when zero range sees an unwritten mapping that may have had | 
|---|
| 1371 | * dirty pagecache over it. | 
|---|
| 1372 | */ | 
|---|
| 1373 | static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i) | 
|---|
| 1374 | { | 
|---|
| 1375 | struct address_space *mapping = i->inode->i_mapping; | 
|---|
| 1376 | loff_t end = i->pos + i->len - 1; | 
|---|
| 1377 |  | 
|---|
| 1378 | i->iomap.flags |= IOMAP_F_STALE; | 
|---|
| 1379 | return filemap_write_and_wait_range(mapping, lstart: i->pos, lend: end); | 
|---|
| 1380 | } | 
|---|
| 1381 |  | 
|---|
| 1382 | static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero, | 
|---|
| 1383 | const struct iomap_write_ops *write_ops) | 
|---|
| 1384 | { | 
|---|
| 1385 | u64 bytes = iomap_length(iter); | 
|---|
| 1386 | int status; | 
|---|
| 1387 |  | 
|---|
| 1388 | do { | 
|---|
| 1389 | struct folio *folio; | 
|---|
| 1390 | size_t offset; | 
|---|
| 1391 | bool ret; | 
|---|
| 1392 |  | 
|---|
| 1393 | bytes = min_t(u64, SIZE_MAX, bytes); | 
|---|
| 1394 | status = iomap_write_begin(iter, write_ops, foliop: &folio, poffset: &offset, | 
|---|
| 1395 | plen: &bytes); | 
|---|
| 1396 | if (status) | 
|---|
| 1397 | return status; | 
|---|
| 1398 | if (iter->iomap.flags & IOMAP_F_STALE) | 
|---|
| 1399 | break; | 
|---|
| 1400 |  | 
|---|
| 1401 | /* warn about zeroing folios beyond eof that won't write back */ | 
|---|
| 1402 | WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size); | 
|---|
| 1403 |  | 
|---|
| 1404 | trace_iomap_zero_iter(inode: iter->inode, off: folio_pos(folio) + offset, | 
|---|
| 1405 | len: bytes); | 
|---|
| 1406 |  | 
|---|
| 1407 | folio_zero_range(folio, start: offset, length: bytes); | 
|---|
| 1408 | folio_mark_accessed(folio); | 
|---|
| 1409 |  | 
|---|
| 1410 | ret = iomap_write_end(iter, len: bytes, copied: bytes, folio); | 
|---|
| 1411 | __iomap_put_folio(iter, write_ops, ret: bytes, folio); | 
|---|
| 1412 | if (WARN_ON_ONCE(!ret)) | 
|---|
| 1413 | return -EIO; | 
|---|
| 1414 |  | 
|---|
| 1415 | status = iomap_iter_advance(iter, count: &bytes); | 
|---|
| 1416 | if (status) | 
|---|
| 1417 | break; | 
|---|
| 1418 | } while (bytes > 0); | 
|---|
| 1419 |  | 
|---|
| 1420 | if (did_zero) | 
|---|
| 1421 | *did_zero = true; | 
|---|
| 1422 | return status; | 
|---|
| 1423 | } | 
|---|
| 1424 |  | 
|---|
| 1425 | int | 
|---|
| 1426 | iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, | 
|---|
| 1427 | const struct iomap_ops *ops, | 
|---|
| 1428 | const struct iomap_write_ops *write_ops, void *private) | 
|---|
| 1429 | { | 
|---|
| 1430 | struct iomap_iter iter = { | 
|---|
| 1431 | .inode		= inode, | 
|---|
| 1432 | .pos		= pos, | 
|---|
| 1433 | .len		= len, | 
|---|
| 1434 | .flags		= IOMAP_ZERO, | 
|---|
| 1435 | .private	= private, | 
|---|
| 1436 | }; | 
|---|
| 1437 | struct address_space *mapping = inode->i_mapping; | 
|---|
| 1438 | unsigned int blocksize = i_blocksize(node: inode); | 
|---|
| 1439 | unsigned int off = pos & (blocksize - 1); | 
|---|
| 1440 | loff_t plen = min_t(loff_t, len, blocksize - off); | 
|---|
| 1441 | int ret; | 
|---|
| 1442 | bool range_dirty; | 
|---|
| 1443 |  | 
|---|
| 1444 | /* | 
|---|
| 1445 | * Zero range can skip mappings that are zero on disk so long as | 
|---|
| 1446 | * pagecache is clean. If pagecache was dirty prior to zero range, the | 
|---|
| 1447 | * mapping converts on writeback completion and so must be zeroed. | 
|---|
| 1448 | * | 
|---|
| 1449 | * The simplest way to deal with this across a range is to flush | 
|---|
| 1450 | * pagecache and process the updated mappings. To avoid excessive | 
|---|
| 1451 | * flushing on partial eof zeroing, special case it to zero the | 
|---|
| 1452 | * unaligned start portion if already dirty in pagecache. | 
|---|
| 1453 | */ | 
|---|
| 1454 | if (off && | 
|---|
| 1455 | filemap_range_needs_writeback(mapping, start_byte: pos, end_byte: pos + plen - 1)) { | 
|---|
| 1456 | iter.len = plen; | 
|---|
| 1457 | while ((ret = iomap_iter(iter: &iter, ops)) > 0) | 
|---|
| 1458 | iter.status = iomap_zero_iter(iter: &iter, did_zero, | 
|---|
| 1459 | write_ops); | 
|---|
| 1460 |  | 
|---|
| 1461 | iter.len = len - (iter.pos - pos); | 
|---|
| 1462 | if (ret || !iter.len) | 
|---|
| 1463 | return ret; | 
|---|
| 1464 | } | 
|---|
| 1465 |  | 
|---|
| 1466 | /* | 
|---|
| 1467 | * To avoid an unconditional flush, check pagecache state and only flush | 
|---|
| 1468 | * if dirty and the fs returns a mapping that might convert on | 
|---|
| 1469 | * writeback. | 
|---|
| 1470 | */ | 
|---|
| 1471 | range_dirty = filemap_range_needs_writeback(mapping: inode->i_mapping, | 
|---|
| 1472 | start_byte: iter.pos, end_byte: iter.pos + iter.len - 1); | 
|---|
| 1473 | while ((ret = iomap_iter(iter: &iter, ops)) > 0) { | 
|---|
| 1474 | const struct iomap *srcmap = iomap_iter_srcmap(i: &iter); | 
|---|
| 1475 |  | 
|---|
| 1476 | if (srcmap->type == IOMAP_HOLE || | 
|---|
| 1477 | srcmap->type == IOMAP_UNWRITTEN) { | 
|---|
| 1478 | s64 status; | 
|---|
| 1479 |  | 
|---|
| 1480 | if (range_dirty) { | 
|---|
| 1481 | range_dirty = false; | 
|---|
| 1482 | status = iomap_zero_iter_flush_and_stale(i: &iter); | 
|---|
| 1483 | } else { | 
|---|
| 1484 | status = iomap_iter_advance_full(iter: &iter); | 
|---|
| 1485 | } | 
|---|
| 1486 | iter.status = status; | 
|---|
| 1487 | continue; | 
|---|
| 1488 | } | 
|---|
| 1489 |  | 
|---|
| 1490 | iter.status = iomap_zero_iter(iter: &iter, did_zero, write_ops); | 
|---|
| 1491 | } | 
|---|
| 1492 | return ret; | 
|---|
| 1493 | } | 
|---|
| 1494 | EXPORT_SYMBOL_GPL(iomap_zero_range); | 
|---|
| 1495 |  | 
|---|
| 1496 | int | 
|---|
| 1497 | iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, | 
|---|
| 1498 | const struct iomap_ops *ops, | 
|---|
| 1499 | const struct iomap_write_ops *write_ops, void *private) | 
|---|
| 1500 | { | 
|---|
| 1501 | unsigned int blocksize = i_blocksize(node: inode); | 
|---|
| 1502 | unsigned int off = pos & (blocksize - 1); | 
|---|
| 1503 |  | 
|---|
| 1504 | /* Block boundary? Nothing to do */ | 
|---|
| 1505 | if (!off) | 
|---|
| 1506 | return 0; | 
|---|
| 1507 | return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops, | 
|---|
| 1508 | write_ops, private); | 
|---|
| 1509 | } | 
|---|
| 1510 | EXPORT_SYMBOL_GPL(iomap_truncate_page); | 
|---|
| 1511 |  | 
|---|
| 1512 | static int iomap_folio_mkwrite_iter(struct iomap_iter *iter, | 
|---|
| 1513 | struct folio *folio) | 
|---|
| 1514 | { | 
|---|
| 1515 | loff_t length = iomap_length(iter); | 
|---|
| 1516 | int ret; | 
|---|
| 1517 |  | 
|---|
| 1518 | if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { | 
|---|
| 1519 | ret = __block_write_begin_int(folio, pos: iter->pos, len: length, NULL, | 
|---|
| 1520 | iomap: &iter->iomap); | 
|---|
| 1521 | if (ret) | 
|---|
| 1522 | return ret; | 
|---|
| 1523 | block_commit_write(folio, from: 0, to: length); | 
|---|
| 1524 | } else { | 
|---|
| 1525 | WARN_ON_ONCE(!folio_test_uptodate(folio)); | 
|---|
| 1526 | folio_mark_dirty(folio); | 
|---|
| 1527 | } | 
|---|
| 1528 |  | 
|---|
| 1529 | return iomap_iter_advance(iter, count: &length); | 
|---|
| 1530 | } | 
|---|
| 1531 |  | 
|---|
| 1532 | vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops, | 
|---|
| 1533 | void *private) | 
|---|
| 1534 | { | 
|---|
| 1535 | struct iomap_iter iter = { | 
|---|
| 1536 | .inode		= file_inode(f: vmf->vma->vm_file), | 
|---|
| 1537 | .flags		= IOMAP_WRITE | IOMAP_FAULT, | 
|---|
| 1538 | .private	= private, | 
|---|
| 1539 | }; | 
|---|
| 1540 | struct folio *folio = page_folio(vmf->page); | 
|---|
| 1541 | ssize_t ret; | 
|---|
| 1542 |  | 
|---|
| 1543 | folio_lock(folio); | 
|---|
| 1544 | ret = folio_mkwrite_check_truncate(folio, inode: iter.inode); | 
|---|
| 1545 | if (ret < 0) | 
|---|
| 1546 | goto out_unlock; | 
|---|
| 1547 | iter.pos = folio_pos(folio); | 
|---|
| 1548 | iter.len = ret; | 
|---|
| 1549 | while ((ret = iomap_iter(iter: &iter, ops)) > 0) | 
|---|
| 1550 | iter.status = iomap_folio_mkwrite_iter(iter: &iter, folio); | 
|---|
| 1551 |  | 
|---|
| 1552 | if (ret < 0) | 
|---|
| 1553 | goto out_unlock; | 
|---|
| 1554 | folio_wait_stable(folio); | 
|---|
| 1555 | return VM_FAULT_LOCKED; | 
|---|
| 1556 | out_unlock: | 
|---|
| 1557 | folio_unlock(folio); | 
|---|
| 1558 | return vmf_fs_error(err: ret); | 
|---|
| 1559 | } | 
|---|
| 1560 | EXPORT_SYMBOL_GPL(iomap_page_mkwrite); | 
|---|
| 1561 |  | 
|---|
| 1562 | void iomap_start_folio_write(struct inode *inode, struct folio *folio, | 
|---|
| 1563 | size_t len) | 
|---|
| 1564 | { | 
|---|
| 1565 | struct iomap_folio_state *ifs = folio->private; | 
|---|
| 1566 |  | 
|---|
| 1567 | WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs); | 
|---|
| 1568 | if (ifs) | 
|---|
| 1569 | atomic_add(i: len, v: &ifs->write_bytes_pending); | 
|---|
| 1570 | } | 
|---|
| 1571 | EXPORT_SYMBOL_GPL(iomap_start_folio_write); | 
|---|
| 1572 |  | 
|---|
| 1573 | void iomap_finish_folio_write(struct inode *inode, struct folio *folio, | 
|---|
| 1574 | size_t len) | 
|---|
| 1575 | { | 
|---|
| 1576 | struct iomap_folio_state *ifs = folio->private; | 
|---|
| 1577 |  | 
|---|
| 1578 | WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs); | 
|---|
| 1579 | WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0); | 
|---|
| 1580 |  | 
|---|
| 1581 | if (!ifs || atomic_sub_and_test(i: len, v: &ifs->write_bytes_pending)) | 
|---|
| 1582 | folio_end_writeback(folio); | 
|---|
| 1583 | } | 
|---|
| 1584 | EXPORT_SYMBOL_GPL(iomap_finish_folio_write); | 
|---|
| 1585 |  | 
|---|
| 1586 | static int iomap_writeback_range(struct iomap_writepage_ctx *wpc, | 
|---|
| 1587 | struct folio *folio, u64 pos, u32 rlen, u64 end_pos, | 
|---|
| 1588 | bool *wb_pending) | 
|---|
| 1589 | { | 
|---|
| 1590 | do { | 
|---|
| 1591 | ssize_t ret; | 
|---|
| 1592 |  | 
|---|
| 1593 | ret = wpc->ops->writeback_range(wpc, folio, pos, rlen, end_pos); | 
|---|
| 1594 | if (WARN_ON_ONCE(ret == 0 || ret > rlen)) | 
|---|
| 1595 | return -EIO; | 
|---|
| 1596 | if (ret < 0) | 
|---|
| 1597 | return ret; | 
|---|
| 1598 | rlen -= ret; | 
|---|
| 1599 | pos += ret; | 
|---|
| 1600 |  | 
|---|
| 1601 | /* | 
|---|
| 1602 | * Holes are not be written back by ->writeback_range, so track | 
|---|
| 1603 | * if we did handle anything that is not a hole here. | 
|---|
| 1604 | */ | 
|---|
| 1605 | if (wpc->iomap.type != IOMAP_HOLE) | 
|---|
| 1606 | *wb_pending = true; | 
|---|
| 1607 | } while (rlen); | 
|---|
| 1608 |  | 
|---|
| 1609 | return 0; | 
|---|
| 1610 | } | 
|---|
| 1611 |  | 
|---|
| 1612 | /* | 
|---|
| 1613 | * Check interaction of the folio with the file end. | 
|---|
| 1614 | * | 
|---|
| 1615 | * If the folio is entirely beyond i_size, return false.  If it straddles | 
|---|
| 1616 | * i_size, adjust end_pos and zero all data beyond i_size. | 
|---|
| 1617 | */ | 
|---|
| 1618 | static bool iomap_writeback_handle_eof(struct folio *folio, struct inode *inode, | 
|---|
| 1619 | u64 *end_pos) | 
|---|
| 1620 | { | 
|---|
| 1621 | u64 isize = i_size_read(inode); | 
|---|
| 1622 |  | 
|---|
| 1623 | if (*end_pos > isize) { | 
|---|
| 1624 | size_t poff = offset_in_folio(folio, isize); | 
|---|
| 1625 | pgoff_t end_index = isize >> PAGE_SHIFT; | 
|---|
| 1626 |  | 
|---|
| 1627 | /* | 
|---|
| 1628 | * If the folio is entirely ouside of i_size, skip it. | 
|---|
| 1629 | * | 
|---|
| 1630 | * This can happen due to a truncate operation that is in | 
|---|
| 1631 | * progress and in that case truncate will finish it off once | 
|---|
| 1632 | * we've dropped the folio lock. | 
|---|
| 1633 | * | 
|---|
| 1634 | * Note that the pgoff_t used for end_index is an unsigned long. | 
|---|
| 1635 | * If the given offset is greater than 16TB on a 32-bit system, | 
|---|
| 1636 | * then if we checked if the folio is fully outside i_size with | 
|---|
| 1637 | * "if (folio->index >= end_index + 1)", "end_index + 1" would | 
|---|
| 1638 | * overflow and evaluate to 0.  Hence this folio would be | 
|---|
| 1639 | * redirtied and written out repeatedly, which would result in | 
|---|
| 1640 | * an infinite loop; the user program performing this operation | 
|---|
| 1641 | * would hang.  Instead, we can detect this situation by | 
|---|
| 1642 | * checking if the folio is totally beyond i_size or if its | 
|---|
| 1643 | * offset is just equal to the EOF. | 
|---|
| 1644 | */ | 
|---|
| 1645 | if (folio->index > end_index || | 
|---|
| 1646 | (folio->index == end_index && poff == 0)) | 
|---|
| 1647 | return false; | 
|---|
| 1648 |  | 
|---|
| 1649 | /* | 
|---|
| 1650 | * The folio straddles i_size. | 
|---|
| 1651 | * | 
|---|
| 1652 | * It must be zeroed out on each and every writepage invocation | 
|---|
| 1653 | * because it may be mmapped: | 
|---|
| 1654 | * | 
|---|
| 1655 | *    A file is mapped in multiples of the page size.  For a | 
|---|
| 1656 | *    file that is not a multiple of the page size, the | 
|---|
| 1657 | *    remaining memory is zeroed when mapped, and writes to that | 
|---|
| 1658 | *    region are not written out to the file. | 
|---|
| 1659 | * | 
|---|
| 1660 | * Also adjust the end_pos to the end of file and skip writeback | 
|---|
| 1661 | * for all blocks entirely beyond i_size. | 
|---|
| 1662 | */ | 
|---|
| 1663 | folio_zero_segment(folio, start: poff, xend: folio_size(folio)); | 
|---|
| 1664 | *end_pos = isize; | 
|---|
| 1665 | } | 
|---|
| 1666 |  | 
|---|
| 1667 | return true; | 
|---|
| 1668 | } | 
|---|
| 1669 |  | 
|---|
| 1670 | int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio) | 
|---|
| 1671 | { | 
|---|
| 1672 | struct iomap_folio_state *ifs = folio->private; | 
|---|
| 1673 | struct inode *inode = wpc->inode; | 
|---|
| 1674 | u64 pos = folio_pos(folio); | 
|---|
| 1675 | u64 end_pos = pos + folio_size(folio); | 
|---|
| 1676 | u64 end_aligned = 0; | 
|---|
| 1677 | bool wb_pending = false; | 
|---|
| 1678 | int error = 0; | 
|---|
| 1679 | u32 rlen; | 
|---|
| 1680 |  | 
|---|
| 1681 | WARN_ON_ONCE(!folio_test_locked(folio)); | 
|---|
| 1682 | WARN_ON_ONCE(folio_test_dirty(folio)); | 
|---|
| 1683 | WARN_ON_ONCE(folio_test_writeback(folio)); | 
|---|
| 1684 |  | 
|---|
| 1685 | trace_iomap_writeback_folio(inode, off: pos, len: folio_size(folio)); | 
|---|
| 1686 |  | 
|---|
| 1687 | if (!iomap_writeback_handle_eof(folio, inode, end_pos: &end_pos)) | 
|---|
| 1688 | return 0; | 
|---|
| 1689 | WARN_ON_ONCE(end_pos <= pos); | 
|---|
| 1690 |  | 
|---|
| 1691 | if (i_blocks_per_folio(inode, folio) > 1) { | 
|---|
| 1692 | if (!ifs) { | 
|---|
| 1693 | ifs = ifs_alloc(inode, folio, flags: 0); | 
|---|
| 1694 | iomap_set_range_dirty(folio, off: 0, len: end_pos - pos); | 
|---|
| 1695 | } | 
|---|
| 1696 |  | 
|---|
| 1697 | /* | 
|---|
| 1698 | * Keep the I/O completion handler from clearing the writeback | 
|---|
| 1699 | * bit until we have submitted all blocks by adding a bias to | 
|---|
| 1700 | * ifs->write_bytes_pending, which is dropped after submitting | 
|---|
| 1701 | * all blocks. | 
|---|
| 1702 | */ | 
|---|
| 1703 | WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0); | 
|---|
| 1704 | iomap_start_folio_write(inode, folio, 1); | 
|---|
| 1705 | } | 
|---|
| 1706 |  | 
|---|
| 1707 | /* | 
|---|
| 1708 | * Set the writeback bit ASAP, as the I/O completion for the single | 
|---|
| 1709 | * block per folio case happen hit as soon as we're submitting the bio. | 
|---|
| 1710 | */ | 
|---|
| 1711 | folio_start_writeback(folio); | 
|---|
| 1712 |  | 
|---|
| 1713 | /* | 
|---|
| 1714 | * Walk through the folio to find dirty areas to write back. | 
|---|
| 1715 | */ | 
|---|
| 1716 | end_aligned = round_up(end_pos, i_blocksize(inode)); | 
|---|
| 1717 | while ((rlen = iomap_find_dirty_range(folio, range_start: &pos, range_end: end_aligned))) { | 
|---|
| 1718 | error = iomap_writeback_range(wpc, folio, pos, rlen, end_pos, | 
|---|
| 1719 | wb_pending: &wb_pending); | 
|---|
| 1720 | if (error) | 
|---|
| 1721 | break; | 
|---|
| 1722 | pos += rlen; | 
|---|
| 1723 | } | 
|---|
| 1724 |  | 
|---|
| 1725 | if (wb_pending) | 
|---|
| 1726 | wpc->nr_folios++; | 
|---|
| 1727 |  | 
|---|
| 1728 | /* | 
|---|
| 1729 | * We can have dirty bits set past end of file in page_mkwrite path | 
|---|
| 1730 | * while mapping the last partial folio. Hence it's better to clear | 
|---|
| 1731 | * all the dirty bits in the folio here. | 
|---|
| 1732 | */ | 
|---|
| 1733 | iomap_clear_range_dirty(folio, off: 0, len: folio_size(folio)); | 
|---|
| 1734 |  | 
|---|
| 1735 | /* | 
|---|
| 1736 | * Usually the writeback bit is cleared by the I/O completion handler. | 
|---|
| 1737 | * But we may end up either not actually writing any blocks, or (when | 
|---|
| 1738 | * there are multiple blocks in a folio) all I/O might have finished | 
|---|
| 1739 | * already at this point.  In that case we need to clear the writeback | 
|---|
| 1740 | * bit ourselves right after unlocking the page. | 
|---|
| 1741 | */ | 
|---|
| 1742 | if (ifs) { | 
|---|
| 1743 | if (atomic_dec_and_test(v: &ifs->write_bytes_pending)) | 
|---|
| 1744 | folio_end_writeback(folio); | 
|---|
| 1745 | } else { | 
|---|
| 1746 | if (!wb_pending) | 
|---|
| 1747 | folio_end_writeback(folio); | 
|---|
| 1748 | } | 
|---|
| 1749 | mapping_set_error(mapping: inode->i_mapping, error); | 
|---|
| 1750 | return error; | 
|---|
| 1751 | } | 
|---|
| 1752 | EXPORT_SYMBOL_GPL(iomap_writeback_folio); | 
|---|
| 1753 |  | 
|---|
| 1754 | int | 
|---|
| 1755 | iomap_writepages(struct iomap_writepage_ctx *wpc) | 
|---|
| 1756 | { | 
|---|
| 1757 | struct address_space *mapping = wpc->inode->i_mapping; | 
|---|
| 1758 | struct folio *folio = NULL; | 
|---|
| 1759 | int error; | 
|---|
| 1760 |  | 
|---|
| 1761 | /* | 
|---|
| 1762 | * Writeback from reclaim context should never happen except in the case | 
|---|
| 1763 | * of a VM regression so warn about it and refuse to write the data. | 
|---|
| 1764 | */ | 
|---|
| 1765 | if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) == | 
|---|
| 1766 | PF_MEMALLOC)) | 
|---|
| 1767 | return -EIO; | 
|---|
| 1768 |  | 
|---|
| 1769 | while ((folio = writeback_iter(mapping, wbc: wpc->wbc, folio, error: &error))) { | 
|---|
| 1770 | error = iomap_writeback_folio(wpc, folio); | 
|---|
| 1771 | folio_unlock(folio); | 
|---|
| 1772 | } | 
|---|
| 1773 |  | 
|---|
| 1774 | /* | 
|---|
| 1775 | * If @error is non-zero, it means that we have a situation where some | 
|---|
| 1776 | * part of the submission process has failed after we've marked pages | 
|---|
| 1777 | * for writeback. | 
|---|
| 1778 | * | 
|---|
| 1779 | * We cannot cancel the writeback directly in that case, so always call | 
|---|
| 1780 | * ->writeback_submit to run the I/O completion handler to clear the | 
|---|
| 1781 | * writeback bit and let the file system proess the errors. | 
|---|
| 1782 | */ | 
|---|
| 1783 | if (wpc->wb_ctx) | 
|---|
| 1784 | return wpc->ops->writeback_submit(wpc, error); | 
|---|
| 1785 | return error; | 
|---|
| 1786 | } | 
|---|
| 1787 | EXPORT_SYMBOL_GPL(iomap_writepages); | 
|---|
| 1788 |  | 
|---|