| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | /* | 
|---|
| 3 | * linux/fs/ext4/page-io.c | 
|---|
| 4 | * | 
|---|
| 5 | * This contains the new page_io functions for ext4 | 
|---|
| 6 | * | 
|---|
| 7 | * Written by Theodore Ts'o, 2010. | 
|---|
| 8 | */ | 
|---|
| 9 |  | 
|---|
| 10 | #include <linux/fs.h> | 
|---|
| 11 | #include <linux/time.h> | 
|---|
| 12 | #include <linux/highuid.h> | 
|---|
| 13 | #include <linux/pagemap.h> | 
|---|
| 14 | #include <linux/quotaops.h> | 
|---|
| 15 | #include <linux/string.h> | 
|---|
| 16 | #include <linux/buffer_head.h> | 
|---|
| 17 | #include <linux/writeback.h> | 
|---|
| 18 | #include <linux/pagevec.h> | 
|---|
| 19 | #include <linux/mpage.h> | 
|---|
| 20 | #include <linux/namei.h> | 
|---|
| 21 | #include <linux/uio.h> | 
|---|
| 22 | #include <linux/bio.h> | 
|---|
| 23 | #include <linux/workqueue.h> | 
|---|
| 24 | #include <linux/kernel.h> | 
|---|
| 25 | #include <linux/slab.h> | 
|---|
| 26 | #include <linux/mm.h> | 
|---|
| 27 | #include <linux/sched/mm.h> | 
|---|
| 28 |  | 
|---|
| 29 | #include "ext4_jbd2.h" | 
|---|
| 30 | #include "xattr.h" | 
|---|
| 31 | #include "acl.h" | 
|---|
| 32 |  | 
|---|
| 33 | static struct kmem_cache *io_end_cachep; | 
|---|
| 34 | static struct kmem_cache *io_end_vec_cachep; | 
|---|
| 35 |  | 
|---|
| 36 | int __init ext4_init_pageio(void) | 
|---|
| 37 | { | 
|---|
| 38 | io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); | 
|---|
| 39 | if (io_end_cachep == NULL) | 
|---|
| 40 | return -ENOMEM; | 
|---|
| 41 |  | 
|---|
| 42 | io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0); | 
|---|
| 43 | if (io_end_vec_cachep == NULL) { | 
|---|
| 44 | kmem_cache_destroy(s: io_end_cachep); | 
|---|
| 45 | return -ENOMEM; | 
|---|
| 46 | } | 
|---|
| 47 | return 0; | 
|---|
| 48 | } | 
|---|
| 49 |  | 
|---|
| 50 | void ext4_exit_pageio(void) | 
|---|
| 51 | { | 
|---|
| 52 | kmem_cache_destroy(s: io_end_cachep); | 
|---|
| 53 | kmem_cache_destroy(s: io_end_vec_cachep); | 
|---|
| 54 | } | 
|---|
| 55 |  | 
|---|
| 56 | struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end) | 
|---|
| 57 | { | 
|---|
| 58 | struct ext4_io_end_vec *io_end_vec; | 
|---|
| 59 |  | 
|---|
| 60 | io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS); | 
|---|
| 61 | if (!io_end_vec) | 
|---|
| 62 | return ERR_PTR(error: -ENOMEM); | 
|---|
| 63 | INIT_LIST_HEAD(list: &io_end_vec->list); | 
|---|
| 64 | list_add_tail(new: &io_end_vec->list, head: &io_end->list_vec); | 
|---|
| 65 | return io_end_vec; | 
|---|
| 66 | } | 
|---|
| 67 |  | 
|---|
| 68 | static void ext4_free_io_end_vec(ext4_io_end_t *io_end) | 
|---|
| 69 | { | 
|---|
| 70 | struct ext4_io_end_vec *io_end_vec, *tmp; | 
|---|
| 71 |  | 
|---|
| 72 | if (list_empty(head: &io_end->list_vec)) | 
|---|
| 73 | return; | 
|---|
| 74 | list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) { | 
|---|
| 75 | list_del(entry: &io_end_vec->list); | 
|---|
| 76 | kmem_cache_free(s: io_end_vec_cachep, objp: io_end_vec); | 
|---|
| 77 | } | 
|---|
| 78 | } | 
|---|
| 79 |  | 
|---|
| 80 | struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end) | 
|---|
| 81 | { | 
|---|
| 82 | BUG_ON(list_empty(&io_end->list_vec)); | 
|---|
| 83 | return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list); | 
|---|
| 84 | } | 
|---|
| 85 |  | 
|---|
| 86 | /* | 
|---|
| 87 | * Print an buffer I/O error compatible with the fs/buffer.c.  This | 
|---|
| 88 | * provides compatibility with dmesg scrapers that look for a specific | 
|---|
| 89 | * buffer I/O error message.  We really need a unified error reporting | 
|---|
| 90 | * structure to userspace ala Digital Unix's uerf system, but it's | 
|---|
| 91 | * probably not going to happen in my lifetime, due to LKML politics... | 
|---|
| 92 | */ | 
|---|
| 93 | static void buffer_io_error(struct buffer_head *bh) | 
|---|
| 94 | { | 
|---|
| 95 | printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n", | 
|---|
| 96 | bh->b_bdev, | 
|---|
| 97 | (unsigned long long)bh->b_blocknr); | 
|---|
| 98 | } | 
|---|
| 99 |  | 
|---|
| 100 | static void ext4_finish_bio(struct bio *bio) | 
|---|
| 101 | { | 
|---|
| 102 | struct folio_iter fi; | 
|---|
| 103 |  | 
|---|
| 104 | bio_for_each_folio_all(fi, bio) { | 
|---|
| 105 | struct folio *folio = fi.folio; | 
|---|
| 106 | struct folio *io_folio = NULL; | 
|---|
| 107 | struct buffer_head *bh, *head; | 
|---|
| 108 | size_t bio_start = fi.offset; | 
|---|
| 109 | size_t bio_end = bio_start + fi.length; | 
|---|
| 110 | unsigned under_io = 0; | 
|---|
| 111 | unsigned long flags; | 
|---|
| 112 |  | 
|---|
| 113 | if (fscrypt_is_bounce_folio(folio)) { | 
|---|
| 114 | io_folio = folio; | 
|---|
| 115 | folio = fscrypt_pagecache_folio(bounce_folio: folio); | 
|---|
| 116 | } | 
|---|
| 117 |  | 
|---|
| 118 | if (bio->bi_status) { | 
|---|
| 119 | int err = blk_status_to_errno(status: bio->bi_status); | 
|---|
| 120 | mapping_set_error(mapping: folio->mapping, error: err); | 
|---|
| 121 | } | 
|---|
| 122 | bh = head = folio_buffers(folio); | 
|---|
| 123 | /* | 
|---|
| 124 | * We check all buffers in the folio under b_uptodate_lock | 
|---|
| 125 | * to avoid races with other end io clearing async_write flags | 
|---|
| 126 | */ | 
|---|
| 127 | spin_lock_irqsave(&head->b_uptodate_lock, flags); | 
|---|
| 128 | do { | 
|---|
| 129 | if (bh_offset(bh) < bio_start || | 
|---|
| 130 | bh_offset(bh) + bh->b_size > bio_end) { | 
|---|
| 131 | if (buffer_async_write(bh)) | 
|---|
| 132 | under_io++; | 
|---|
| 133 | continue; | 
|---|
| 134 | } | 
|---|
| 135 | clear_buffer_async_write(bh); | 
|---|
| 136 | if (bio->bi_status) { | 
|---|
| 137 | set_buffer_write_io_error(bh); | 
|---|
| 138 | buffer_io_error(bh); | 
|---|
| 139 | } | 
|---|
| 140 | } while ((bh = bh->b_this_page) != head); | 
|---|
| 141 | spin_unlock_irqrestore(lock: &head->b_uptodate_lock, flags); | 
|---|
| 142 | if (!under_io) { | 
|---|
| 143 | fscrypt_free_bounce_page(bounce_page: &io_folio->page); | 
|---|
| 144 | folio_end_writeback(folio); | 
|---|
| 145 | } | 
|---|
| 146 | } | 
|---|
| 147 | } | 
|---|
| 148 |  | 
|---|
| 149 | static void ext4_release_io_end(ext4_io_end_t *io_end) | 
|---|
| 150 | { | 
|---|
| 151 | struct bio *bio, *next_bio; | 
|---|
| 152 |  | 
|---|
| 153 | BUG_ON(!list_empty(&io_end->list)); | 
|---|
| 154 | BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); | 
|---|
| 155 | WARN_ON(io_end->handle); | 
|---|
| 156 |  | 
|---|
| 157 | for (bio = io_end->bio; bio; bio = next_bio) { | 
|---|
| 158 | next_bio = bio->bi_private; | 
|---|
| 159 | ext4_finish_bio(bio); | 
|---|
| 160 | bio_put(bio); | 
|---|
| 161 | } | 
|---|
| 162 | ext4_free_io_end_vec(io_end); | 
|---|
| 163 | kmem_cache_free(s: io_end_cachep, objp: io_end); | 
|---|
| 164 | } | 
|---|
| 165 |  | 
|---|
| 166 | /* | 
|---|
| 167 | * On successful IO, check a range of space and convert unwritten extents to | 
|---|
| 168 | * written. On IO failure, check if journal abort is needed. Note that | 
|---|
| 169 | * we are protected from truncate touching same part of extent tree by the | 
|---|
| 170 | * fact that truncate code waits for all DIO to finish (thus exclusion from | 
|---|
| 171 | * direct IO is achieved) and also waits for PageWriteback bits. Thus we | 
|---|
| 172 | * cannot get to ext4_ext_truncate() before all IOs overlapping that range are | 
|---|
| 173 | * completed (happens from ext4_free_ioend()). | 
|---|
| 174 | */ | 
|---|
| 175 | static int ext4_end_io_end(ext4_io_end_t *io_end) | 
|---|
| 176 | { | 
|---|
| 177 | struct inode *inode = io_end->inode; | 
|---|
| 178 | handle_t *handle = io_end->handle; | 
|---|
| 179 | struct super_block *sb = inode->i_sb; | 
|---|
| 180 | int ret = 0; | 
|---|
| 181 |  | 
|---|
| 182 | ext4_debug( "ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p," | 
|---|
| 183 | "list->prev 0x%p\n", | 
|---|
| 184 | io_end, inode->i_ino, io_end->list.next, io_end->list.prev); | 
|---|
| 185 |  | 
|---|
| 186 | /* | 
|---|
| 187 | * Do not convert the unwritten extents if data writeback fails, | 
|---|
| 188 | * or stale data may be exposed. | 
|---|
| 189 | */ | 
|---|
| 190 | io_end->handle = NULL;  /* Following call will use up the handle */ | 
|---|
| 191 | if (unlikely(io_end->flag & EXT4_IO_END_FAILED)) { | 
|---|
| 192 | ret = -EIO; | 
|---|
| 193 | if (handle) | 
|---|
| 194 | jbd2_journal_free_reserved(handle); | 
|---|
| 195 |  | 
|---|
| 196 | if (test_opt(sb, DATA_ERR_ABORT)) | 
|---|
| 197 | jbd2_journal_abort(EXT4_SB(sb)->s_journal, ret); | 
|---|
| 198 | } else { | 
|---|
| 199 | ret = ext4_convert_unwritten_io_end_vec(handle, io_end); | 
|---|
| 200 | } | 
|---|
| 201 | if (ret < 0 && !ext4_emergency_state(sb) && | 
|---|
| 202 | io_end->flag & EXT4_IO_END_UNWRITTEN) { | 
|---|
| 203 | ext4_msg(sb, KERN_EMERG, | 
|---|
| 204 | "failed to convert unwritten extents to written " | 
|---|
| 205 | "extents -- potential data loss!  " | 
|---|
| 206 | "(inode %lu, error %d)", inode->i_ino, ret); | 
|---|
| 207 | } | 
|---|
| 208 |  | 
|---|
| 209 | ext4_clear_io_unwritten_flag(io_end); | 
|---|
| 210 | ext4_release_io_end(io_end); | 
|---|
| 211 | return ret; | 
|---|
| 212 | } | 
|---|
| 213 |  | 
|---|
| 214 | static void dump_completed_IO(struct inode *inode, struct list_head *head) | 
|---|
| 215 | { | 
|---|
| 216 | #ifdef	EXT4FS_DEBUG | 
|---|
| 217 | struct list_head *cur, *before, *after; | 
|---|
| 218 | ext4_io_end_t *io_end, *io_end0, *io_end1; | 
|---|
| 219 |  | 
|---|
| 220 | if (list_empty(head)) | 
|---|
| 221 | return; | 
|---|
| 222 |  | 
|---|
| 223 | ext4_debug( "Dump inode %lu completed io list\n", inode->i_ino); | 
|---|
| 224 | list_for_each_entry(io_end, head, list) { | 
|---|
| 225 | cur = &io_end->list; | 
|---|
| 226 | before = cur->prev; | 
|---|
| 227 | io_end0 = container_of(before, ext4_io_end_t, list); | 
|---|
| 228 | after = cur->next; | 
|---|
| 229 | io_end1 = container_of(after, ext4_io_end_t, list); | 
|---|
| 230 |  | 
|---|
| 231 | ext4_debug( "io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", | 
|---|
| 232 | io_end, inode->i_ino, io_end0, io_end1); | 
|---|
| 233 | } | 
|---|
| 234 | #endif | 
|---|
| 235 | } | 
|---|
| 236 |  | 
|---|
| 237 | static bool ext4_io_end_defer_completion(ext4_io_end_t *io_end) | 
|---|
| 238 | { | 
|---|
| 239 | if (io_end->flag & EXT4_IO_END_UNWRITTEN && | 
|---|
| 240 | !list_empty(head: &io_end->list_vec)) | 
|---|
| 241 | return true; | 
|---|
| 242 | if (test_opt(io_end->inode->i_sb, DATA_ERR_ABORT) && | 
|---|
| 243 | io_end->flag & EXT4_IO_END_FAILED && | 
|---|
| 244 | !ext4_emergency_state(sb: io_end->inode->i_sb)) | 
|---|
| 245 | return true; | 
|---|
| 246 | return false; | 
|---|
| 247 | } | 
|---|
| 248 |  | 
|---|
| 249 | /* Add the io_end to per-inode completed end_io list. */ | 
|---|
| 250 | static void ext4_add_complete_io(ext4_io_end_t *io_end) | 
|---|
| 251 | { | 
|---|
| 252 | struct ext4_inode_info *ei = EXT4_I(inode: io_end->inode); | 
|---|
| 253 | struct ext4_sb_info *sbi = EXT4_SB(sb: io_end->inode->i_sb); | 
|---|
| 254 | struct workqueue_struct *wq; | 
|---|
| 255 | unsigned long flags; | 
|---|
| 256 |  | 
|---|
| 257 | /* Only reserved conversions or pending IO errors will enter here. */ | 
|---|
| 258 | WARN_ON(!(io_end->flag & EXT4_IO_END_DEFER_COMPLETION)); | 
|---|
| 259 | WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN && | 
|---|
| 260 | !io_end->handle && sbi->s_journal); | 
|---|
| 261 | WARN_ON(!io_end->bio); | 
|---|
| 262 |  | 
|---|
| 263 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | 
|---|
| 264 | wq = sbi->rsv_conversion_wq; | 
|---|
| 265 | if (list_empty(head: &ei->i_rsv_conversion_list)) | 
|---|
| 266 | queue_work(wq, work: &ei->i_rsv_conversion_work); | 
|---|
| 267 | list_add_tail(new: &io_end->list, head: &ei->i_rsv_conversion_list); | 
|---|
| 268 | spin_unlock_irqrestore(lock: &ei->i_completed_io_lock, flags); | 
|---|
| 269 | } | 
|---|
| 270 |  | 
|---|
| 271 | static int ext4_do_flush_completed_IO(struct inode *inode, | 
|---|
| 272 | struct list_head *head) | 
|---|
| 273 | { | 
|---|
| 274 | ext4_io_end_t *io_end; | 
|---|
| 275 | struct list_head unwritten; | 
|---|
| 276 | unsigned long flags; | 
|---|
| 277 | struct ext4_inode_info *ei = EXT4_I(inode); | 
|---|
| 278 | int err, ret = 0; | 
|---|
| 279 |  | 
|---|
| 280 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); | 
|---|
| 281 | dump_completed_IO(inode, head); | 
|---|
| 282 | list_replace_init(old: head, new: &unwritten); | 
|---|
| 283 | spin_unlock_irqrestore(lock: &ei->i_completed_io_lock, flags); | 
|---|
| 284 |  | 
|---|
| 285 | while (!list_empty(head: &unwritten)) { | 
|---|
| 286 | io_end = list_entry(unwritten.next, ext4_io_end_t, list); | 
|---|
| 287 | BUG_ON(!(io_end->flag & EXT4_IO_END_DEFER_COMPLETION)); | 
|---|
| 288 | list_del_init(entry: &io_end->list); | 
|---|
| 289 |  | 
|---|
| 290 | err = ext4_end_io_end(io_end); | 
|---|
| 291 | if (unlikely(!ret && err)) | 
|---|
| 292 | ret = err; | 
|---|
| 293 | } | 
|---|
| 294 | return ret; | 
|---|
| 295 | } | 
|---|
| 296 |  | 
|---|
| 297 | /* | 
|---|
| 298 | * Used to convert unwritten extents to written extents upon IO completion, | 
|---|
| 299 | * or used to abort the journal upon IO errors. | 
|---|
| 300 | */ | 
|---|
| 301 | void ext4_end_io_rsv_work(struct work_struct *work) | 
|---|
| 302 | { | 
|---|
| 303 | struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info, | 
|---|
| 304 | i_rsv_conversion_work); | 
|---|
| 305 | ext4_do_flush_completed_IO(inode: &ei->vfs_inode, head: &ei->i_rsv_conversion_list); | 
|---|
| 306 | } | 
|---|
| 307 |  | 
|---|
| 308 | ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) | 
|---|
| 309 | { | 
|---|
| 310 | ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags); | 
|---|
| 311 |  | 
|---|
| 312 | if (io_end) { | 
|---|
| 313 | io_end->inode = inode; | 
|---|
| 314 | INIT_LIST_HEAD(list: &io_end->list); | 
|---|
| 315 | INIT_LIST_HEAD(list: &io_end->list_vec); | 
|---|
| 316 | refcount_set(r: &io_end->count, n: 1); | 
|---|
| 317 | } | 
|---|
| 318 | return io_end; | 
|---|
| 319 | } | 
|---|
| 320 |  | 
|---|
| 321 | void ext4_put_io_end_defer(ext4_io_end_t *io_end) | 
|---|
| 322 | { | 
|---|
| 323 | if (refcount_dec_and_test(r: &io_end->count)) { | 
|---|
| 324 | if (ext4_io_end_defer_completion(io_end)) | 
|---|
| 325 | return ext4_add_complete_io(io_end); | 
|---|
| 326 |  | 
|---|
| 327 | ext4_release_io_end(io_end); | 
|---|
| 328 | } | 
|---|
| 329 | } | 
|---|
| 330 |  | 
|---|
| 331 | int ext4_put_io_end(ext4_io_end_t *io_end) | 
|---|
| 332 | { | 
|---|
| 333 | if (refcount_dec_and_test(r: &io_end->count)) { | 
|---|
| 334 | if (ext4_io_end_defer_completion(io_end)) | 
|---|
| 335 | return ext4_end_io_end(io_end); | 
|---|
| 336 |  | 
|---|
| 337 | ext4_release_io_end(io_end); | 
|---|
| 338 | } | 
|---|
| 339 | return 0; | 
|---|
| 340 | } | 
|---|
| 341 |  | 
|---|
| 342 | ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) | 
|---|
| 343 | { | 
|---|
| 344 | refcount_inc(r: &io_end->count); | 
|---|
| 345 | return io_end; | 
|---|
| 346 | } | 
|---|
| 347 |  | 
|---|
| 348 | /* BIO completion function for page writeback */ | 
|---|
| 349 | static void ext4_end_bio(struct bio *bio) | 
|---|
| 350 | { | 
|---|
| 351 | ext4_io_end_t *io_end = bio->bi_private; | 
|---|
| 352 | sector_t bi_sector = bio->bi_iter.bi_sector; | 
|---|
| 353 |  | 
|---|
| 354 | if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n", | 
|---|
| 355 | bio->bi_bdev, | 
|---|
| 356 | (long long) bio->bi_iter.bi_sector, | 
|---|
| 357 | (unsigned) bio_sectors(bio), | 
|---|
| 358 | bio->bi_status)) { | 
|---|
| 359 | ext4_finish_bio(bio); | 
|---|
| 360 | bio_put(bio); | 
|---|
| 361 | return; | 
|---|
| 362 | } | 
|---|
| 363 | bio->bi_end_io = NULL; | 
|---|
| 364 |  | 
|---|
| 365 | if (bio->bi_status) { | 
|---|
| 366 | struct inode *inode = io_end->inode; | 
|---|
| 367 |  | 
|---|
| 368 | ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " | 
|---|
| 369 | "starting block %llu)", | 
|---|
| 370 | bio->bi_status, inode->i_ino, | 
|---|
| 371 | (unsigned long long) | 
|---|
| 372 | bi_sector >> (inode->i_blkbits - 9)); | 
|---|
| 373 | io_end->flag |= EXT4_IO_END_FAILED; | 
|---|
| 374 | mapping_set_error(mapping: inode->i_mapping, | 
|---|
| 375 | error: blk_status_to_errno(status: bio->bi_status)); | 
|---|
| 376 | } | 
|---|
| 377 |  | 
|---|
| 378 | if (ext4_io_end_defer_completion(io_end)) { | 
|---|
| 379 | /* | 
|---|
| 380 | * Link bio into list hanging from io_end. We have to do it | 
|---|
| 381 | * atomically as bio completions can be racing against each | 
|---|
| 382 | * other. | 
|---|
| 383 | */ | 
|---|
| 384 | bio->bi_private = xchg(&io_end->bio, bio); | 
|---|
| 385 | ext4_put_io_end_defer(io_end); | 
|---|
| 386 | } else { | 
|---|
| 387 | /* | 
|---|
| 388 | * Drop io_end reference early. Inode can get freed once | 
|---|
| 389 | * we finish the bio. | 
|---|
| 390 | */ | 
|---|
| 391 | ext4_put_io_end_defer(io_end); | 
|---|
| 392 | ext4_finish_bio(bio); | 
|---|
| 393 | bio_put(bio); | 
|---|
| 394 | } | 
|---|
| 395 | } | 
|---|
| 396 |  | 
|---|
| 397 | void ext4_io_submit(struct ext4_io_submit *io) | 
|---|
| 398 | { | 
|---|
| 399 | struct bio *bio = io->io_bio; | 
|---|
| 400 |  | 
|---|
| 401 | if (bio) { | 
|---|
| 402 | if (io->io_wbc->sync_mode == WB_SYNC_ALL) | 
|---|
| 403 | io->io_bio->bi_opf |= REQ_SYNC; | 
|---|
| 404 | submit_bio(bio: io->io_bio); | 
|---|
| 405 | } | 
|---|
| 406 | io->io_bio = NULL; | 
|---|
| 407 | } | 
|---|
| 408 |  | 
|---|
| 409 | void ext4_io_submit_init(struct ext4_io_submit *io, | 
|---|
| 410 | struct writeback_control *wbc) | 
|---|
| 411 | { | 
|---|
| 412 | io->io_wbc = wbc; | 
|---|
| 413 | io->io_bio = NULL; | 
|---|
| 414 | io->io_end = NULL; | 
|---|
| 415 | } | 
|---|
| 416 |  | 
|---|
| 417 | static void io_submit_init_bio(struct ext4_io_submit *io, | 
|---|
| 418 | struct buffer_head *bh) | 
|---|
| 419 | { | 
|---|
| 420 | struct bio *bio; | 
|---|
| 421 |  | 
|---|
| 422 | /* | 
|---|
| 423 | * bio_alloc will _always_ be able to allocate a bio if | 
|---|
| 424 | * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset(). | 
|---|
| 425 | */ | 
|---|
| 426 | bio = bio_alloc(bdev: bh->b_bdev, BIO_MAX_VECS, opf: REQ_OP_WRITE, GFP_NOIO); | 
|---|
| 427 | fscrypt_set_bio_crypt_ctx_bh(bio, first_bh: bh, GFP_NOIO); | 
|---|
| 428 | bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); | 
|---|
| 429 | bio->bi_end_io = ext4_end_bio; | 
|---|
| 430 | bio->bi_private = ext4_get_io_end(io_end: io->io_end); | 
|---|
| 431 | io->io_bio = bio; | 
|---|
| 432 | io->io_next_block = bh->b_blocknr; | 
|---|
| 433 | wbc_init_bio(wbc: io->io_wbc, bio); | 
|---|
| 434 | } | 
|---|
| 435 |  | 
|---|
| 436 | static void io_submit_add_bh(struct ext4_io_submit *io, | 
|---|
| 437 | struct inode *inode, | 
|---|
| 438 | struct folio *folio, | 
|---|
| 439 | struct folio *io_folio, | 
|---|
| 440 | struct buffer_head *bh) | 
|---|
| 441 | { | 
|---|
| 442 | if (io->io_bio && (bh->b_blocknr != io->io_next_block || | 
|---|
| 443 | !fscrypt_mergeable_bio_bh(bio: io->io_bio, next_bh: bh))) { | 
|---|
| 444 | submit_and_retry: | 
|---|
| 445 | ext4_io_submit(io); | 
|---|
| 446 | } | 
|---|
| 447 | if (io->io_bio == NULL) { | 
|---|
| 448 | io_submit_init_bio(io, bh); | 
|---|
| 449 | io->io_bio->bi_write_hint = inode->i_write_hint; | 
|---|
| 450 | } | 
|---|
| 451 | if (!bio_add_folio(bio: io->io_bio, folio: io_folio, len: bh->b_size, off: bh_offset(bh))) | 
|---|
| 452 | goto submit_and_retry; | 
|---|
| 453 | wbc_account_cgroup_owner(wbc: io->io_wbc, folio, bytes: bh->b_size); | 
|---|
| 454 | io->io_next_block++; | 
|---|
| 455 | } | 
|---|
| 456 |  | 
|---|
| 457 | int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio, | 
|---|
| 458 | size_t len) | 
|---|
| 459 | { | 
|---|
| 460 | struct folio *io_folio = folio; | 
|---|
| 461 | struct inode *inode = folio->mapping->host; | 
|---|
| 462 | unsigned block_start; | 
|---|
| 463 | struct buffer_head *bh, *head; | 
|---|
| 464 | int ret = 0; | 
|---|
| 465 | int nr_to_submit = 0; | 
|---|
| 466 | struct writeback_control *wbc = io->io_wbc; | 
|---|
| 467 | bool keep_towrite = false; | 
|---|
| 468 |  | 
|---|
| 469 | BUG_ON(!folio_test_locked(folio)); | 
|---|
| 470 | BUG_ON(folio_test_writeback(folio)); | 
|---|
| 471 |  | 
|---|
| 472 | /* | 
|---|
| 473 | * Comments copied from block_write_full_folio: | 
|---|
| 474 | * | 
|---|
| 475 | * The folio straddles i_size.  It must be zeroed out on each and every | 
|---|
| 476 | * writepage invocation because it may be mmapped.  "A file is mapped | 
|---|
| 477 | * in multiples of the page size.  For a file that is not a multiple of | 
|---|
| 478 | * the page size, the remaining memory is zeroed when mapped, and | 
|---|
| 479 | * writes to that region are not written out to the file." | 
|---|
| 480 | */ | 
|---|
| 481 | if (len < folio_size(folio)) | 
|---|
| 482 | folio_zero_segment(folio, start: len, xend: folio_size(folio)); | 
|---|
| 483 | /* | 
|---|
| 484 | * In the first loop we prepare and mark buffers to submit. We have to | 
|---|
| 485 | * mark all buffers in the folio before submitting so that | 
|---|
| 486 | * folio_end_writeback() cannot be called from ext4_end_bio() when IO | 
|---|
| 487 | * on the first buffer finishes and we are still working on submitting | 
|---|
| 488 | * the second buffer. | 
|---|
| 489 | */ | 
|---|
| 490 | bh = head = folio_buffers(folio); | 
|---|
| 491 | do { | 
|---|
| 492 | block_start = bh_offset(bh); | 
|---|
| 493 | if (block_start >= len) { | 
|---|
| 494 | clear_buffer_dirty(bh); | 
|---|
| 495 | set_buffer_uptodate(bh); | 
|---|
| 496 | continue; | 
|---|
| 497 | } | 
|---|
| 498 | if (!buffer_dirty(bh) || buffer_delay(bh) || | 
|---|
| 499 | !buffer_mapped(bh) || buffer_unwritten(bh)) { | 
|---|
| 500 | /* A hole? We can safely clear the dirty bit */ | 
|---|
| 501 | if (!buffer_mapped(bh)) | 
|---|
| 502 | clear_buffer_dirty(bh); | 
|---|
| 503 | /* | 
|---|
| 504 | * Keeping dirty some buffer we cannot write? Make sure | 
|---|
| 505 | * to redirty the folio and keep TOWRITE tag so that | 
|---|
| 506 | * racing WB_SYNC_ALL writeback does not skip the folio. | 
|---|
| 507 | * This happens e.g. when doing writeout for | 
|---|
| 508 | * transaction commit or when journalled data is not | 
|---|
| 509 | * yet committed. | 
|---|
| 510 | */ | 
|---|
| 511 | if (buffer_dirty(bh) || | 
|---|
| 512 | (buffer_jbd(bh) && buffer_jbddirty(bh))) { | 
|---|
| 513 | if (!folio_test_dirty(folio)) | 
|---|
| 514 | folio_redirty_for_writepage(wbc, folio); | 
|---|
| 515 | keep_towrite = true; | 
|---|
| 516 | } | 
|---|
| 517 | continue; | 
|---|
| 518 | } | 
|---|
| 519 | if (buffer_new(bh)) | 
|---|
| 520 | clear_buffer_new(bh); | 
|---|
| 521 | set_buffer_async_write(bh); | 
|---|
| 522 | clear_buffer_dirty(bh); | 
|---|
| 523 | nr_to_submit++; | 
|---|
| 524 | } while ((bh = bh->b_this_page) != head); | 
|---|
| 525 |  | 
|---|
| 526 | /* Nothing to submit? Just unlock the folio... */ | 
|---|
| 527 | if (!nr_to_submit) | 
|---|
| 528 | return 0; | 
|---|
| 529 |  | 
|---|
| 530 | bh = head = folio_buffers(folio); | 
|---|
| 531 |  | 
|---|
| 532 | /* | 
|---|
| 533 | * If any blocks are being written to an encrypted file, encrypt them | 
|---|
| 534 | * into a bounce page.  For simplicity, just encrypt until the last | 
|---|
| 535 | * block which might be needed.  This may cause some unneeded blocks | 
|---|
| 536 | * (e.g. holes) to be unnecessarily encrypted, but this is rare and | 
|---|
| 537 | * can't happen in the common case of blocksize == PAGE_SIZE. | 
|---|
| 538 | */ | 
|---|
| 539 | if (fscrypt_inode_uses_fs_layer_crypto(inode)) { | 
|---|
| 540 | gfp_t gfp_flags = GFP_NOFS; | 
|---|
| 541 | unsigned int enc_bytes = round_up(len, i_blocksize(inode)); | 
|---|
| 542 | struct page *bounce_page; | 
|---|
| 543 |  | 
|---|
| 544 | /* | 
|---|
| 545 | * Since bounce page allocation uses a mempool, we can only use | 
|---|
| 546 | * a waiting mask (i.e. request guaranteed allocation) on the | 
|---|
| 547 | * first page of the bio.  Otherwise it can deadlock. | 
|---|
| 548 | */ | 
|---|
| 549 | if (io->io_bio) | 
|---|
| 550 | gfp_flags = GFP_NOWAIT; | 
|---|
| 551 | retry_encrypt: | 
|---|
| 552 | bounce_page = fscrypt_encrypt_pagecache_blocks(folio, | 
|---|
| 553 | len: enc_bytes, offs: 0, gfp_flags); | 
|---|
| 554 | if (IS_ERR(ptr: bounce_page)) { | 
|---|
| 555 | ret = PTR_ERR(ptr: bounce_page); | 
|---|
| 556 | if (ret == -ENOMEM && | 
|---|
| 557 | (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) { | 
|---|
| 558 | gfp_t new_gfp_flags = GFP_NOFS; | 
|---|
| 559 | if (io->io_bio) | 
|---|
| 560 | ext4_io_submit(io); | 
|---|
| 561 | else | 
|---|
| 562 | new_gfp_flags |= __GFP_NOFAIL; | 
|---|
| 563 | memalloc_retry_wait(gfp_flags); | 
|---|
| 564 | gfp_flags = new_gfp_flags; | 
|---|
| 565 | goto retry_encrypt; | 
|---|
| 566 | } | 
|---|
| 567 |  | 
|---|
| 568 | printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); | 
|---|
| 569 | folio_redirty_for_writepage(wbc, folio); | 
|---|
| 570 | do { | 
|---|
| 571 | if (buffer_async_write(bh)) { | 
|---|
| 572 | clear_buffer_async_write(bh); | 
|---|
| 573 | set_buffer_dirty(bh); | 
|---|
| 574 | } | 
|---|
| 575 | bh = bh->b_this_page; | 
|---|
| 576 | } while (bh != head); | 
|---|
| 577 |  | 
|---|
| 578 | return ret; | 
|---|
| 579 | } | 
|---|
| 580 | io_folio = page_folio(bounce_page); | 
|---|
| 581 | } | 
|---|
| 582 |  | 
|---|
| 583 | __folio_start_writeback(folio, keep_write: keep_towrite); | 
|---|
| 584 |  | 
|---|
| 585 | /* Now submit buffers to write */ | 
|---|
| 586 | do { | 
|---|
| 587 | if (!buffer_async_write(bh)) | 
|---|
| 588 | continue; | 
|---|
| 589 | io_submit_add_bh(io, inode, folio, io_folio, bh); | 
|---|
| 590 | } while ((bh = bh->b_this_page) != head); | 
|---|
| 591 |  | 
|---|
| 592 | return 0; | 
|---|
| 593 | } | 
|---|
| 594 |  | 
|---|