1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/fs/nfs/pagelist.c
4 *
5 * A set of helper functions for managing NFS read and write requests.
6 * The main purpose of these routines is to provide support for the
7 * coalescing of several requests into a single RPC call.
8 *
9 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
10 *
11 */
12
13#include <linux/slab.h>
14#include <linux/file.h>
15#include <linux/sched.h>
16#include <linux/sunrpc/clnt.h>
17#include <linux/nfs.h>
18#include <linux/nfs3.h>
19#include <linux/nfs4.h>
20#include <linux/nfs_fs.h>
21#include <linux/nfs_page.h>
22#include <linux/nfs_mount.h>
23#include <linux/export.h>
24#include <linux/filelock.h>
25
26#include "internal.h"
27#include "pnfs.h"
28#include "nfstrace.h"
29#include "fscache.h"
30
31#define NFSDBG_FACILITY NFSDBG_PAGECACHE
32
33static struct kmem_cache *nfs_page_cachep;
34static const struct rpc_call_ops nfs_pgio_common_ops;
35
36struct nfs_page_iter_page {
37 const struct nfs_page *req;
38 size_t count;
39};
40
41static void nfs_page_iter_page_init(struct nfs_page_iter_page *i,
42 const struct nfs_page *req)
43{
44 i->req = req;
45 i->count = 0;
46}
47
48static void nfs_page_iter_page_advance(struct nfs_page_iter_page *i, size_t sz)
49{
50 const struct nfs_page *req = i->req;
51 size_t tmp = i->count + sz;
52
53 i->count = (tmp < req->wb_bytes) ? tmp : req->wb_bytes;
54}
55
56static struct page *nfs_page_iter_page_get(struct nfs_page_iter_page *i)
57{
58 const struct nfs_page *req = i->req;
59 struct page *page;
60
61 if (i->count != req->wb_bytes) {
62 size_t base = i->count + req->wb_pgbase;
63 size_t len = PAGE_SIZE - offset_in_page(base);
64
65 page = nfs_page_to_page(req, pgbase: base);
66 nfs_page_iter_page_advance(i, sz: len);
67 return page;
68 }
69 return NULL;
70}
71
72static struct nfs_pgio_mirror *
73nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
74{
75 if (desc->pg_ops->pg_get_mirror)
76 return desc->pg_ops->pg_get_mirror(desc, idx);
77 return &desc->pg_mirrors[0];
78}
79
80struct nfs_pgio_mirror *
81nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
82{
83 return nfs_pgio_get_mirror(desc, idx: desc->pg_mirror_idx);
84}
85EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
86
87static u32
88nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
89{
90 if (desc->pg_ops->pg_set_mirror)
91 return desc->pg_ops->pg_set_mirror(desc, idx);
92 return desc->pg_mirror_idx;
93}
94
95void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
96 struct nfs_pgio_header *hdr,
97 void (*release)(struct nfs_pgio_header *hdr))
98{
99 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
100
101
102 hdr->req = nfs_list_entry(head: mirror->pg_list.next);
103 hdr->inode = desc->pg_inode;
104 hdr->cred = nfs_req_openctx(req: hdr->req)->cred;
105 hdr->io_start = req_offset(req: hdr->req);
106 hdr->good_bytes = mirror->pg_count;
107 hdr->io_completion = desc->pg_io_completion;
108 hdr->dreq = desc->pg_dreq;
109 nfs_netfs_set_pgio_header(hdr, desc);
110 hdr->release = release;
111 hdr->completion_ops = desc->pg_completion_ops;
112 if (hdr->completion_ops->init_hdr)
113 hdr->completion_ops->init_hdr(hdr);
114
115 hdr->pgio_mirror_idx = desc->pg_mirror_idx;
116}
117EXPORT_SYMBOL_GPL(nfs_pgheader_init);
118
119void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
120{
121 unsigned int new = pos - hdr->io_start;
122
123 trace_nfs_pgio_error(hdr, error, pos);
124 if (hdr->good_bytes > new) {
125 hdr->good_bytes = new;
126 clear_bit(nr: NFS_IOHDR_EOF, addr: &hdr->flags);
127 if (!test_and_set_bit(nr: NFS_IOHDR_ERROR, addr: &hdr->flags))
128 hdr->error = error;
129 }
130}
131
132static inline struct nfs_page *nfs_page_alloc(void)
133{
134 struct nfs_page *p =
135 kmem_cache_zalloc(nfs_page_cachep, nfs_io_gfp_mask());
136 if (p)
137 INIT_LIST_HEAD(list: &p->wb_list);
138 return p;
139}
140
141static inline void
142nfs_page_free(struct nfs_page *p)
143{
144 kmem_cache_free(s: nfs_page_cachep, objp: p);
145}
146
147/**
148 * nfs_iocounter_wait - wait for i/o to complete
149 * @l_ctx: nfs_lock_context with io_counter to use
150 *
151 * returns -ERESTARTSYS if interrupted by a fatal signal.
152 * Otherwise returns 0 once the io_count hits 0.
153 */
154int
155nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
156{
157 return wait_var_event_killable(&l_ctx->io_count,
158 !atomic_read(&l_ctx->io_count));
159}
160
161/**
162 * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
163 * to complete
164 * @task: the rpc_task that should wait
165 * @l_ctx: nfs_lock_context with io_counter to check
166 *
167 * Returns true if there is outstanding I/O to wait on and the
168 * task has been put to sleep.
169 */
170bool
171nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
172{
173 struct inode *inode = d_inode(dentry: l_ctx->open_context->dentry);
174 bool ret = false;
175
176 if (atomic_read(v: &l_ctx->io_count) > 0) {
177 rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
178 ret = true;
179 }
180
181 if (atomic_read(v: &l_ctx->io_count) == 0) {
182 rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
183 ret = false;
184 }
185
186 return ret;
187}
188EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
189
190/*
191 * nfs_page_set_headlock - set the request PG_HEADLOCK
192 * @req: request that is to be locked
193 *
194 * this lock must be held when modifying req->wb_head
195 *
196 * return 0 on success, < 0 on error
197 */
198int
199nfs_page_set_headlock(struct nfs_page *req)
200{
201 if (!test_and_set_bit(nr: PG_HEADLOCK, addr: &req->wb_flags))
202 return 0;
203
204 set_bit(nr: PG_CONTENDED1, addr: &req->wb_flags);
205 smp_mb__after_atomic();
206 return wait_on_bit_lock(word: &req->wb_flags, bit: PG_HEADLOCK,
207 TASK_UNINTERRUPTIBLE);
208}
209
210/*
211 * nfs_page_clear_headlock - clear the request PG_HEADLOCK
212 * @req: request that is to be locked
213 */
214void
215nfs_page_clear_headlock(struct nfs_page *req)
216{
217 clear_bit_unlock(nr: PG_HEADLOCK, addr: &req->wb_flags);
218 smp_mb__after_atomic();
219 if (!test_bit(PG_CONTENDED1, &req->wb_flags))
220 return;
221 wake_up_bit(word: &req->wb_flags, bit: PG_HEADLOCK);
222}
223
224/*
225 * nfs_page_group_lock - lock the head of the page group
226 * @req: request in group that is to be locked
227 *
228 * this lock must be held when traversing or modifying the page
229 * group list
230 *
231 * return 0 on success, < 0 on error
232 */
233int
234nfs_page_group_lock(struct nfs_page *req)
235{
236 int ret;
237
238 ret = nfs_page_set_headlock(req);
239 if (ret || req->wb_head == req)
240 return ret;
241 return nfs_page_set_headlock(req: req->wb_head);
242}
243
244/*
245 * nfs_page_group_unlock - unlock the head of the page group
246 * @req: request in group that is to be unlocked
247 */
248void
249nfs_page_group_unlock(struct nfs_page *req)
250{
251 if (req != req->wb_head)
252 nfs_page_clear_headlock(req: req->wb_head);
253 nfs_page_clear_headlock(req);
254}
255
256/**
257 * nfs_page_group_sync_on_bit_locked - Test if all requests have @bit set
258 * @req: request in page group
259 * @bit: PG_* bit that is used to sync page group
260 *
261 * must be called with page group lock held
262 */
263bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
264{
265 struct nfs_page *head = req->wb_head;
266 struct nfs_page *tmp;
267
268 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
269 WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
270
271 tmp = req->wb_this_page;
272 while (tmp != req) {
273 if (!test_bit(bit, &tmp->wb_flags))
274 return false;
275 tmp = tmp->wb_this_page;
276 }
277
278 /* true! reset all bits */
279 tmp = req;
280 do {
281 clear_bit(nr: bit, addr: &tmp->wb_flags);
282 tmp = tmp->wb_this_page;
283 } while (tmp != req);
284
285 return true;
286}
287
288/*
289 * nfs_page_group_sync_on_bit - set bit on current request, but only
290 * return true if the bit is set for all requests in page group
291 * @req - request in page group
292 * @bit - PG_* bit that is used to sync page group
293 */
294bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
295{
296 bool ret;
297
298 nfs_page_group_lock(req);
299 ret = nfs_page_group_sync_on_bit_locked(req, bit);
300 nfs_page_group_unlock(req);
301
302 return ret;
303}
304
305/*
306 * nfs_page_group_init - Initialize the page group linkage for @req
307 * @req - a new nfs request
308 * @prev - the previous request in page group, or NULL if @req is the first
309 * or only request in the group (the head).
310 */
311static inline void
312nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
313{
314 struct inode *inode;
315 WARN_ON_ONCE(prev == req);
316
317 if (!prev) {
318 /* a head request */
319 req->wb_head = req;
320 req->wb_this_page = req;
321 } else {
322 /* a subrequest */
323 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
324 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
325 req->wb_head = prev->wb_head;
326 req->wb_this_page = prev->wb_this_page;
327 prev->wb_this_page = req;
328
329 /* All subrequests take a ref on the head request until
330 * nfs_page_group_destroy is called */
331 kref_get(kref: &req->wb_head->wb_kref);
332
333 /* grab extra ref and bump the request count if head request
334 * has extra ref from the write/commit path to handle handoff
335 * between write and commit lists. */
336 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
337 inode = nfs_page_to_inode(req);
338 set_bit(nr: PG_INODE_REF, addr: &req->wb_flags);
339 kref_get(kref: &req->wb_kref);
340 atomic_long_inc(v: &NFS_I(inode)->nrequests);
341 }
342 }
343}
344
345/*
346 * nfs_page_group_destroy - sync the destruction of page groups
347 * @req - request that no longer needs the page group
348 *
349 * releases the page group reference from each member once all
350 * members have called this function.
351 */
352static void
353nfs_page_group_destroy(struct kref *kref)
354{
355 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
356 struct nfs_page *head = req->wb_head;
357 struct nfs_page *tmp, *next;
358
359 if (!nfs_page_group_sync_on_bit(req, bit: PG_TEARDOWN))
360 goto out;
361
362 tmp = req;
363 do {
364 next = tmp->wb_this_page;
365 /* unlink and free */
366 tmp->wb_this_page = tmp;
367 tmp->wb_head = tmp;
368 nfs_free_request(req: tmp);
369 tmp = next;
370 } while (tmp != req);
371out:
372 /* subrequests must release the ref on the head request */
373 if (head != req)
374 nfs_release_request(head);
375}
376
377static struct nfs_page *nfs_page_create(struct nfs_lock_context *l_ctx,
378 unsigned int pgbase, pgoff_t index,
379 unsigned int offset, unsigned int count)
380{
381 struct nfs_page *req;
382 struct nfs_open_context *ctx = l_ctx->open_context;
383
384 if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
385 return ERR_PTR(error: -EBADF);
386 /* try to allocate the request struct */
387 req = nfs_page_alloc();
388 if (req == NULL)
389 return ERR_PTR(error: -ENOMEM);
390
391 req->wb_lock_context = l_ctx;
392 refcount_inc(r: &l_ctx->count);
393 atomic_inc(v: &l_ctx->io_count);
394
395 /* Initialize the request struct. Initially, we assume a
396 * long write-back delay. This will be adjusted in
397 * update_nfs_request below if the region is not locked. */
398 req->wb_pgbase = pgbase;
399 req->wb_index = index;
400 req->wb_offset = offset;
401 req->wb_bytes = count;
402 kref_init(kref: &req->wb_kref);
403 req->wb_nio = 0;
404 return req;
405}
406
407static void nfs_page_assign_folio(struct nfs_page *req, struct folio *folio)
408{
409 if (folio != NULL) {
410 req->wb_folio = folio;
411 folio_get(folio);
412 set_bit(nr: PG_FOLIO, addr: &req->wb_flags);
413 }
414}
415
416static void nfs_page_assign_page(struct nfs_page *req, struct page *page)
417{
418 if (page != NULL) {
419 req->wb_page = page;
420 get_page(page);
421 }
422}
423
424/**
425 * nfs_page_create_from_page - Create an NFS read/write request.
426 * @ctx: open context to use
427 * @page: page to write
428 * @pgbase: starting offset within the page for the write
429 * @offset: file offset for the write
430 * @count: number of bytes to read/write
431 *
432 * The page must be locked by the caller. This makes sure we never
433 * create two different requests for the same page.
434 * User should ensure it is safe to sleep in this function.
435 */
436struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
437 struct page *page,
438 unsigned int pgbase, loff_t offset,
439 unsigned int count)
440{
441 struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
442 struct nfs_page *ret;
443
444 if (IS_ERR(ptr: l_ctx))
445 return ERR_CAST(ptr: l_ctx);
446 ret = nfs_page_create(l_ctx, pgbase, index: offset >> PAGE_SHIFT,
447 offset_in_page(offset), count);
448 if (!IS_ERR(ptr: ret)) {
449 nfs_page_assign_page(req: ret, page);
450 nfs_page_group_init(req: ret, NULL);
451 }
452 nfs_put_lock_context(l_ctx);
453 return ret;
454}
455
456/**
457 * nfs_page_create_from_folio - Create an NFS read/write request.
458 * @ctx: open context to use
459 * @folio: folio to write
460 * @offset: starting offset within the folio for the write
461 * @count: number of bytes to read/write
462 *
463 * The page must be locked by the caller. This makes sure we never
464 * create two different requests for the same page.
465 * User should ensure it is safe to sleep in this function.
466 */
467struct nfs_page *nfs_page_create_from_folio(struct nfs_open_context *ctx,
468 struct folio *folio,
469 unsigned int offset,
470 unsigned int count)
471{
472 struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
473 struct nfs_page *ret;
474
475 if (IS_ERR(ptr: l_ctx))
476 return ERR_CAST(ptr: l_ctx);
477 ret = nfs_page_create(l_ctx, pgbase: offset, index: folio->index, offset, count);
478 if (!IS_ERR(ptr: ret)) {
479 nfs_page_assign_folio(req: ret, folio);
480 nfs_page_group_init(req: ret, NULL);
481 }
482 nfs_put_lock_context(l_ctx);
483 return ret;
484}
485
486static struct nfs_page *
487nfs_create_subreq(struct nfs_page *req,
488 unsigned int pgbase,
489 unsigned int offset,
490 unsigned int count)
491{
492 struct nfs_page *last;
493 struct nfs_page *ret;
494 struct folio *folio = nfs_page_to_folio(req);
495 struct page *page = nfs_page_to_page(req, pgbase);
496
497 ret = nfs_page_create(l_ctx: req->wb_lock_context, pgbase, index: req->wb_index,
498 offset, count);
499 if (!IS_ERR(ptr: ret)) {
500 if (folio)
501 nfs_page_assign_folio(req: ret, folio);
502 else
503 nfs_page_assign_page(req: ret, page);
504 /* find the last request */
505 for (last = req->wb_head;
506 last->wb_this_page != req->wb_head;
507 last = last->wb_this_page)
508 ;
509
510 nfs_lock_request(req: ret);
511 nfs_page_group_init(req: ret, prev: last);
512 ret->wb_nio = req->wb_nio;
513 }
514 return ret;
515}
516
517/**
518 * nfs_unlock_request - Unlock request and wake up sleepers.
519 * @req: pointer to request
520 */
521void nfs_unlock_request(struct nfs_page *req)
522{
523 clear_bit_unlock(nr: PG_BUSY, addr: &req->wb_flags);
524 smp_mb__after_atomic();
525 if (!test_bit(PG_CONTENDED2, &req->wb_flags))
526 return;
527 wake_up_bit(word: &req->wb_flags, bit: PG_BUSY);
528}
529
530/**
531 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
532 * @req: pointer to request
533 */
534void nfs_unlock_and_release_request(struct nfs_page *req)
535{
536 nfs_unlock_request(req);
537 nfs_release_request(req);
538}
539
540/*
541 * nfs_clear_request - Free up all resources allocated to the request
542 * @req:
543 *
544 * Release page and open context resources associated with a read/write
545 * request after it has completed.
546 */
547static void nfs_clear_request(struct nfs_page *req)
548{
549 struct folio *folio = nfs_page_to_folio(req);
550 struct page *page = req->wb_page;
551 struct nfs_lock_context *l_ctx = req->wb_lock_context;
552 struct nfs_open_context *ctx;
553
554 if (folio != NULL) {
555 folio_put(folio);
556 req->wb_folio = NULL;
557 clear_bit(nr: PG_FOLIO, addr: &req->wb_flags);
558 } else if (page != NULL) {
559 put_page(page);
560 req->wb_page = NULL;
561 }
562 if (l_ctx != NULL) {
563 if (atomic_dec_and_test(v: &l_ctx->io_count)) {
564 wake_up_var(var: &l_ctx->io_count);
565 ctx = l_ctx->open_context;
566 if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags))
567 rpc_wake_up(&NFS_SERVER(inode: d_inode(dentry: ctx->dentry))->uoc_rpcwaitq);
568 }
569 nfs_put_lock_context(l_ctx);
570 req->wb_lock_context = NULL;
571 }
572}
573
574/**
575 * nfs_free_request - Release the count on an NFS read/write request
576 * @req: request to release
577 *
578 * Note: Should never be called with the spinlock held!
579 */
580void nfs_free_request(struct nfs_page *req)
581{
582 WARN_ON_ONCE(req->wb_this_page != req);
583
584 /* extra debug: make sure no sync bits are still set */
585 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
586 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
587 WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
588 WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
589 WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
590
591 /* Release struct file and open context */
592 nfs_clear_request(req);
593 nfs_page_free(p: req);
594}
595
596void nfs_release_request(struct nfs_page *req)
597{
598 kref_put(kref: &req->wb_kref, release: nfs_page_group_destroy);
599}
600EXPORT_SYMBOL_GPL(nfs_release_request);
601
602/*
603 * nfs_generic_pg_test - determine if requests can be coalesced
604 * @desc: pointer to descriptor
605 * @prev: previous request in desc, or NULL
606 * @req: this request
607 *
608 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
609 * the size of the request.
610 */
611size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
612 struct nfs_page *prev, struct nfs_page *req)
613{
614 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
615
616
617 if (mirror->pg_count > mirror->pg_bsize) {
618 /* should never happen */
619 WARN_ON_ONCE(1);
620 return 0;
621 }
622
623 /*
624 * Limit the request size so that we can still allocate a page array
625 * for it without upsetting the slab allocator.
626 */
627 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
628 sizeof(struct page *) > PAGE_SIZE)
629 return 0;
630
631 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
632}
633EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
634
635struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
636{
637 struct nfs_pgio_header *hdr = ops->rw_alloc_header();
638
639 if (hdr) {
640 INIT_LIST_HEAD(list: &hdr->pages);
641 hdr->rw_ops = ops;
642 }
643 return hdr;
644}
645EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
646
647/**
648 * nfs_pgio_data_destroy - make @hdr suitable for reuse
649 *
650 * Frees memory and releases refs from nfs_generic_pgio, so that it may
651 * be called again.
652 *
653 * @hdr: A header that has had nfs_generic_pgio called
654 */
655static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
656{
657 if (hdr->args.context)
658 put_nfs_open_context(ctx: hdr->args.context);
659 if (hdr->page_array.pagevec != hdr->page_array.page_array)
660 kfree(objp: hdr->page_array.pagevec);
661}
662
663/*
664 * nfs_pgio_header_free - Free a read or write header
665 * @hdr: The header to free
666 */
667void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
668{
669 nfs_pgio_data_destroy(hdr);
670 hdr->rw_ops->rw_free_header(hdr);
671}
672EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
673
674/**
675 * nfs_pgio_rpcsetup - Set up arguments for a pageio call
676 * @hdr: The pageio hdr
677 * @pgbase: base
678 * @count: Number of bytes to read
679 * @how: How to commit data (writes only)
680 * @cinfo: Commit information for the call (writes only)
681 */
682static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, unsigned int pgbase,
683 unsigned int count, int how,
684 struct nfs_commit_info *cinfo)
685{
686 struct nfs_page *req = hdr->req;
687
688 /* Set up the RPC argument and reply structs
689 * NB: take care not to mess about with hdr->commit et al. */
690
691 hdr->args.fh = NFS_FH(inode: hdr->inode);
692 hdr->args.offset = req_offset(req);
693 /* pnfs_set_layoutcommit needs this */
694 hdr->mds_offset = hdr->args.offset;
695 hdr->args.pgbase = pgbase;
696 hdr->args.pages = hdr->page_array.pagevec;
697 hdr->args.count = count;
698 hdr->args.context = get_nfs_open_context(ctx: nfs_req_openctx(req));
699 hdr->args.lock_context = req->wb_lock_context;
700 hdr->args.stable = NFS_UNSTABLE;
701 switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
702 case 0:
703 break;
704 case FLUSH_COND_STABLE:
705 if (nfs_reqs_to_commit(cinfo))
706 break;
707 fallthrough;
708 default:
709 hdr->args.stable = NFS_FILE_SYNC;
710 }
711
712 hdr->res.fattr = &hdr->fattr;
713 hdr->res.count = 0;
714 hdr->res.eof = 0;
715 hdr->res.verf = &hdr->verf;
716 nfs_fattr_init(fattr: &hdr->fattr);
717}
718
719/**
720 * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
721 * @task: The current task
722 * @calldata: pageio header to prepare
723 */
724static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
725{
726 struct nfs_pgio_header *hdr = calldata;
727 int err;
728 err = NFS_PROTO(inode: hdr->inode)->pgio_rpc_prepare(task, hdr);
729 if (err)
730 rpc_exit(task, err);
731}
732
733int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
734 const struct cred *cred, const struct nfs_rpc_ops *rpc_ops,
735 const struct rpc_call_ops *call_ops, int how, int flags,
736 struct nfsd_file *localio)
737{
738 struct rpc_task *task;
739 struct rpc_message msg = {
740 .rpc_argp = &hdr->args,
741 .rpc_resp = &hdr->res,
742 .rpc_cred = cred,
743 };
744 struct rpc_task_setup task_setup_data = {
745 .rpc_client = clnt,
746 .task = &hdr->task,
747 .rpc_message = &msg,
748 .callback_ops = call_ops,
749 .callback_data = hdr,
750 .workqueue = nfsiod_workqueue,
751 .flags = RPC_TASK_ASYNC | flags,
752 };
753
754 if (nfs_server_capable(inode: hdr->inode, NFS_CAP_MOVEABLE))
755 task_setup_data.flags |= RPC_TASK_MOVEABLE;
756
757 hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
758
759 dprintk("NFS: initiated pgio call "
760 "(req %s/%llu, %u bytes @ offset %llu)\n",
761 hdr->inode->i_sb->s_id,
762 (unsigned long long)NFS_FILEID(hdr->inode),
763 hdr->args.count,
764 (unsigned long long)hdr->args.offset);
765
766 if (localio)
767 return nfs_local_doio(clp: NFS_SERVER(inode: hdr->inode)->nfs_client,
768 localio, hdr, call_ops);
769
770 task = rpc_run_task(&task_setup_data);
771 if (IS_ERR(ptr: task))
772 return PTR_ERR(ptr: task);
773 rpc_put_task(task);
774 return 0;
775}
776EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
777
778/**
779 * nfs_pgio_error - Clean up from a pageio error
780 * @hdr: pageio header
781 */
782static void nfs_pgio_error(struct nfs_pgio_header *hdr)
783{
784 set_bit(nr: NFS_IOHDR_REDO, addr: &hdr->flags);
785 hdr->completion_ops->completion(hdr);
786}
787
788/**
789 * nfs_pgio_release - Release pageio data
790 * @calldata: The pageio header to release
791 */
792static void nfs_pgio_release(void *calldata)
793{
794 struct nfs_pgio_header *hdr = calldata;
795 hdr->completion_ops->completion(hdr);
796}
797
798static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
799 unsigned int bsize)
800{
801 INIT_LIST_HEAD(list: &mirror->pg_list);
802 mirror->pg_bytes_written = 0;
803 mirror->pg_count = 0;
804 mirror->pg_bsize = bsize;
805 mirror->pg_base = 0;
806 mirror->pg_recoalesce = 0;
807}
808
809/**
810 * nfs_pageio_init - initialise a page io descriptor
811 * @desc: pointer to descriptor
812 * @inode: pointer to inode
813 * @pg_ops: pointer to pageio operations
814 * @compl_ops: pointer to pageio completion operations
815 * @rw_ops: pointer to nfs read/write operations
816 * @bsize: io block size
817 * @io_flags: extra parameters for the io function
818 */
819void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
820 struct inode *inode,
821 const struct nfs_pageio_ops *pg_ops,
822 const struct nfs_pgio_completion_ops *compl_ops,
823 const struct nfs_rw_ops *rw_ops,
824 size_t bsize,
825 int io_flags)
826{
827 desc->pg_moreio = 0;
828 desc->pg_inode = inode;
829 desc->pg_ops = pg_ops;
830 desc->pg_completion_ops = compl_ops;
831 desc->pg_rw_ops = rw_ops;
832 desc->pg_ioflags = io_flags;
833 desc->pg_error = 0;
834 desc->pg_lseg = NULL;
835 desc->pg_io_completion = NULL;
836 desc->pg_dreq = NULL;
837 nfs_netfs_reset_pageio_descriptor(desc);
838 desc->pg_bsize = bsize;
839
840 desc->pg_mirror_count = 1;
841 desc->pg_mirror_idx = 0;
842
843 desc->pg_mirrors_dynamic = NULL;
844 desc->pg_mirrors = desc->pg_mirrors_static;
845 nfs_pageio_mirror_init(mirror: &desc->pg_mirrors[0], bsize);
846 desc->pg_maxretrans = 0;
847}
848
849/**
850 * nfs_pgio_result - Basic pageio error handling
851 * @task: The task that ran
852 * @calldata: Pageio header to check
853 */
854static void nfs_pgio_result(struct rpc_task *task, void *calldata)
855{
856 struct nfs_pgio_header *hdr = calldata;
857 struct inode *inode = hdr->inode;
858
859 if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
860 return;
861 if (task->tk_status < 0)
862 nfs_set_pgio_error(hdr, error: task->tk_status, pos: hdr->args.offset);
863 else
864 hdr->rw_ops->rw_result(task, hdr);
865}
866
867/*
868 * Create an RPC task for the given read or write request and kick it.
869 * The page must have been locked by the caller.
870 *
871 * It may happen that the page we're passed is not marked dirty.
872 * This is the case if nfs_updatepage detects a conflicting request
873 * that has been written but not committed.
874 */
875int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
876 struct nfs_pgio_header *hdr)
877{
878 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
879
880 struct nfs_page *req;
881 struct page **pages,
882 *last_page;
883 struct list_head *head = &mirror->pg_list;
884 struct nfs_commit_info cinfo;
885 struct nfs_page_array *pg_array = &hdr->page_array;
886 unsigned int pagecount, pageused;
887 unsigned int pg_base = offset_in_page(mirror->pg_base);
888 gfp_t gfp_flags = nfs_io_gfp_mask();
889
890 pagecount = nfs_page_array_len(base: pg_base, len: mirror->pg_count);
891 pg_array->npages = pagecount;
892
893 if (pagecount <= ARRAY_SIZE(pg_array->page_array))
894 pg_array->pagevec = pg_array->page_array;
895 else {
896 pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
897 if (!pg_array->pagevec) {
898 pg_array->npages = 0;
899 nfs_pgio_error(hdr);
900 desc->pg_error = -ENOMEM;
901 return desc->pg_error;
902 }
903 }
904
905 nfs_init_cinfo(cinfo: &cinfo, inode: desc->pg_inode, dreq: desc->pg_dreq);
906 pages = hdr->page_array.pagevec;
907 last_page = NULL;
908 pageused = 0;
909 while (!list_empty(head)) {
910 struct nfs_page_iter_page i;
911 struct page *page;
912
913 req = nfs_list_entry(head: head->next);
914 nfs_list_move_request(req, head: &hdr->pages);
915
916 if (req->wb_pgbase == 0)
917 last_page = NULL;
918
919 nfs_page_iter_page_init(i: &i, req);
920 while ((page = nfs_page_iter_page_get(i: &i)) != NULL) {
921 if (last_page != page) {
922 pageused++;
923 if (pageused > pagecount)
924 goto full;
925 *pages++ = last_page = page;
926 }
927 }
928 }
929full:
930 if (WARN_ON_ONCE(pageused != pagecount)) {
931 nfs_pgio_error(hdr);
932 desc->pg_error = -EINVAL;
933 return desc->pg_error;
934 }
935
936 if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
937 (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
938 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
939
940 /* Set up the argument struct */
941 nfs_pgio_rpcsetup(hdr, pgbase: pg_base, count: mirror->pg_count, how: desc->pg_ioflags,
942 cinfo: &cinfo);
943 desc->pg_rpc_callops = &nfs_pgio_common_ops;
944 return 0;
945}
946EXPORT_SYMBOL_GPL(nfs_generic_pgio);
947
948static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
949{
950 struct nfs_pgio_header *hdr;
951 int ret;
952 unsigned short task_flags = 0;
953
954 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
955 if (!hdr) {
956 desc->pg_error = -ENOMEM;
957 return desc->pg_error;
958 }
959 nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
960 ret = nfs_generic_pgio(desc, hdr);
961 if (ret == 0) {
962 struct nfs_client *clp = NFS_SERVER(inode: hdr->inode)->nfs_client;
963
964 struct nfsd_file *localio =
965 nfs_local_open_fh(clp, cred: hdr->cred, fh: hdr->args.fh,
966 nfl: &hdr->args.context->nfl,
967 mode: hdr->args.context->mode);
968
969 if (NFS_SERVER(inode: hdr->inode)->nfs_client->cl_minorversion)
970 task_flags = RPC_TASK_MOVEABLE;
971 ret = nfs_initiate_pgio(NFS_CLIENT(inode: hdr->inode),
972 hdr,
973 hdr->cred,
974 NFS_PROTO(inode: hdr->inode),
975 desc->pg_rpc_callops,
976 desc->pg_ioflags,
977 RPC_TASK_CRED_NOREF | task_flags,
978 localio);
979 }
980 return ret;
981}
982
983static struct nfs_pgio_mirror *
984nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
985 unsigned int mirror_count)
986{
987 struct nfs_pgio_mirror *ret;
988 unsigned int i;
989
990 kfree(objp: desc->pg_mirrors_dynamic);
991 desc->pg_mirrors_dynamic = NULL;
992 if (mirror_count == 1)
993 return desc->pg_mirrors_static;
994 ret = kmalloc_array(mirror_count, sizeof(*ret), nfs_io_gfp_mask());
995 if (ret != NULL) {
996 for (i = 0; i < mirror_count; i++)
997 nfs_pageio_mirror_init(mirror: &ret[i], bsize: desc->pg_bsize);
998 desc->pg_mirrors_dynamic = ret;
999 }
1000 return ret;
1001}
1002
1003/*
1004 * nfs_pageio_setup_mirroring - determine if mirroring is to be used
1005 * by calling the pg_get_mirror_count op
1006 */
1007static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
1008 struct nfs_page *req)
1009{
1010 unsigned int mirror_count = 1;
1011
1012 if (pgio->pg_ops->pg_get_mirror_count)
1013 mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
1014 if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
1015 return;
1016
1017 if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
1018 pgio->pg_error = -EINVAL;
1019 return;
1020 }
1021
1022 pgio->pg_mirrors = nfs_pageio_alloc_mirrors(desc: pgio, mirror_count);
1023 if (pgio->pg_mirrors == NULL) {
1024 pgio->pg_error = -ENOMEM;
1025 pgio->pg_mirrors = pgio->pg_mirrors_static;
1026 mirror_count = 1;
1027 }
1028 pgio->pg_mirror_count = mirror_count;
1029}
1030
1031static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
1032{
1033 pgio->pg_mirror_count = 1;
1034 pgio->pg_mirror_idx = 0;
1035 pgio->pg_mirrors = pgio->pg_mirrors_static;
1036 kfree(objp: pgio->pg_mirrors_dynamic);
1037 pgio->pg_mirrors_dynamic = NULL;
1038}
1039
1040static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
1041 const struct nfs_lock_context *l2)
1042{
1043 return l1->lockowner == l2->lockowner;
1044}
1045
1046static bool nfs_page_is_contiguous(const struct nfs_page *prev,
1047 const struct nfs_page *req)
1048{
1049 size_t prev_end = prev->wb_pgbase + prev->wb_bytes;
1050
1051 if (req_offset(req) != req_offset(req: prev) + prev->wb_bytes)
1052 return false;
1053 if (req->wb_pgbase == 0)
1054 return prev_end == nfs_page_max_length(req: prev);
1055 if (req->wb_pgbase == prev_end) {
1056 struct folio *folio = nfs_page_to_folio(req);
1057 if (folio)
1058 return folio == nfs_page_to_folio(req: prev);
1059 return req->wb_page == prev->wb_page;
1060 }
1061 return false;
1062}
1063
1064/**
1065 * nfs_coalesce_size - test two requests for compatibility
1066 * @prev: pointer to nfs_page
1067 * @req: pointer to nfs_page
1068 * @pgio: pointer to nfs_pagio_descriptor
1069 *
1070 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
1071 * page data area they describe is contiguous, and that their RPC
1072 * credentials, NFSv4 open state, and lockowners are the same.
1073 *
1074 * Returns size of the request that can be coalesced
1075 */
1076static unsigned int nfs_coalesce_size(struct nfs_page *prev,
1077 struct nfs_page *req,
1078 struct nfs_pageio_descriptor *pgio)
1079{
1080 struct file_lock_context *flctx;
1081
1082 if (prev) {
1083 if (!nfs_match_open_context(ctx1: nfs_req_openctx(req), ctx2: nfs_req_openctx(req: prev)))
1084 return 0;
1085 flctx = locks_inode_context(inode: d_inode(dentry: nfs_req_openctx(req)->dentry));
1086 if (flctx != NULL &&
1087 !(list_empty_careful(head: &flctx->flc_posix) &&
1088 list_empty_careful(head: &flctx->flc_flock)) &&
1089 !nfs_match_lock_context(l1: req->wb_lock_context,
1090 l2: prev->wb_lock_context))
1091 return 0;
1092 if (!nfs_page_is_contiguous(prev, req))
1093 return 0;
1094 }
1095 return pgio->pg_ops->pg_test(pgio, prev, req);
1096}
1097
1098/**
1099 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
1100 * @desc: destination io descriptor
1101 * @req: request
1102 *
1103 * If the request 'req' was successfully coalesced into the existing list
1104 * of pages 'desc', it returns the size of req.
1105 */
1106static unsigned int
1107nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
1108 struct nfs_page *req)
1109{
1110 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1111 struct nfs_page *prev = NULL;
1112 unsigned int size;
1113
1114 if (list_empty(head: &mirror->pg_list)) {
1115 if (desc->pg_ops->pg_init)
1116 desc->pg_ops->pg_init(desc, req);
1117 if (desc->pg_error < 0)
1118 return 0;
1119 mirror->pg_base = req->wb_pgbase;
1120 mirror->pg_count = 0;
1121 mirror->pg_recoalesce = 0;
1122 } else
1123 prev = nfs_list_entry(head: mirror->pg_list.prev);
1124
1125 if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
1126 if (NFS_SERVER(inode: desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
1127 desc->pg_error = -ETIMEDOUT;
1128 else
1129 desc->pg_error = -EIO;
1130 return 0;
1131 }
1132
1133 size = nfs_coalesce_size(prev, req, pgio: desc);
1134 if (size < req->wb_bytes)
1135 return size;
1136 nfs_list_move_request(req, head: &mirror->pg_list);
1137 mirror->pg_count += req->wb_bytes;
1138 return req->wb_bytes;
1139}
1140
1141/*
1142 * Helper for nfs_pageio_add_request and nfs_pageio_complete
1143 */
1144static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
1145{
1146 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1147
1148 if (!list_empty(head: &mirror->pg_list)) {
1149 int error = desc->pg_ops->pg_doio(desc);
1150 if (error < 0)
1151 desc->pg_error = error;
1152 if (list_empty(head: &mirror->pg_list))
1153 mirror->pg_bytes_written += mirror->pg_count;
1154 }
1155}
1156
1157static void
1158nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
1159 struct nfs_page *req)
1160{
1161 LIST_HEAD(head);
1162
1163 nfs_list_move_request(req, head: &head);
1164 desc->pg_completion_ops->error_cleanup(&head, desc->pg_error);
1165}
1166
1167/**
1168 * __nfs_pageio_add_request - Attempt to coalesce a request into a page list.
1169 * @desc: destination io descriptor
1170 * @req: request
1171 *
1172 * This may split a request into subrequests which are all part of the
1173 * same page group. If so, it will submit @req as the last one, to ensure
1174 * the pointer to @req is still valid in case of failure.
1175 *
1176 * Returns true if the request 'req' was successfully coalesced into the
1177 * existing list of pages 'desc'.
1178 */
1179static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1180 struct nfs_page *req)
1181{
1182 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1183 struct nfs_page *subreq;
1184 unsigned int size, subreq_size;
1185
1186 nfs_page_group_lock(req);
1187
1188 subreq = req;
1189 subreq_size = subreq->wb_bytes;
1190 for(;;) {
1191 size = nfs_pageio_do_add_request(desc, req: subreq);
1192 if (size == subreq_size) {
1193 /* We successfully submitted a request */
1194 if (subreq == req)
1195 break;
1196 req->wb_pgbase += size;
1197 req->wb_bytes -= size;
1198 req->wb_offset += size;
1199 subreq_size = req->wb_bytes;
1200 subreq = req;
1201 continue;
1202 }
1203 if (WARN_ON_ONCE(subreq != req)) {
1204 nfs_page_group_unlock(req);
1205 nfs_pageio_cleanup_request(desc, req: subreq);
1206 subreq = req;
1207 subreq_size = req->wb_bytes;
1208 nfs_page_group_lock(req);
1209 }
1210 if (!size) {
1211 /* Can't coalesce any more, so do I/O */
1212 nfs_page_group_unlock(req);
1213 desc->pg_moreio = 1;
1214 nfs_pageio_doio(desc);
1215 if (desc->pg_error < 0 || mirror->pg_recoalesce)
1216 return 0;
1217 /* retry add_request for this subreq */
1218 nfs_page_group_lock(req);
1219 continue;
1220 }
1221 subreq = nfs_create_subreq(req, pgbase: req->wb_pgbase,
1222 offset: req->wb_offset, count: size);
1223 if (IS_ERR(ptr: subreq))
1224 goto err_ptr;
1225 subreq_size = size;
1226 }
1227
1228 nfs_page_group_unlock(req);
1229 return 1;
1230err_ptr:
1231 desc->pg_error = PTR_ERR(ptr: subreq);
1232 nfs_page_group_unlock(req);
1233 return 0;
1234}
1235
1236static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1237{
1238 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1239 LIST_HEAD(head);
1240
1241 do {
1242 list_splice_init(list: &mirror->pg_list, head: &head);
1243 mirror->pg_recoalesce = 0;
1244
1245 while (!list_empty(head: &head)) {
1246 struct nfs_page *req;
1247
1248 req = list_first_entry(&head, struct nfs_page, wb_list);
1249 if (__nfs_pageio_add_request(desc, req))
1250 continue;
1251 if (desc->pg_error < 0) {
1252 list_splice_tail(list: &head, head: &mirror->pg_list);
1253 mirror->pg_recoalesce = 1;
1254 return 0;
1255 }
1256 break;
1257 }
1258 } while (mirror->pg_recoalesce);
1259 return 1;
1260}
1261
1262static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
1263 struct nfs_page *req)
1264{
1265 int ret;
1266
1267 do {
1268 ret = __nfs_pageio_add_request(desc, req);
1269 if (ret)
1270 break;
1271 if (desc->pg_error < 0)
1272 break;
1273 ret = nfs_do_recoalesce(desc);
1274 } while (ret);
1275
1276 return ret;
1277}
1278
1279static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
1280{
1281 u32 midx;
1282 struct nfs_pgio_mirror *mirror;
1283
1284 if (!desc->pg_error)
1285 return;
1286
1287 for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1288 mirror = nfs_pgio_get_mirror(desc, idx: midx);
1289 desc->pg_completion_ops->error_cleanup(&mirror->pg_list,
1290 desc->pg_error);
1291 }
1292}
1293
1294int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1295 struct nfs_page *req)
1296{
1297 u32 midx;
1298 unsigned int pgbase, offset, bytes;
1299 struct nfs_page *dupreq;
1300
1301 pgbase = req->wb_pgbase;
1302 offset = req->wb_offset;
1303 bytes = req->wb_bytes;
1304
1305 nfs_pageio_setup_mirroring(pgio: desc, req);
1306 if (desc->pg_error < 0)
1307 goto out_failed;
1308
1309 /* Create the mirror instances first, and fire them off */
1310 for (midx = 1; midx < desc->pg_mirror_count; midx++) {
1311 nfs_page_group_lock(req);
1312
1313 dupreq = nfs_create_subreq(req,
1314 pgbase, offset, count: bytes);
1315
1316 nfs_page_group_unlock(req);
1317 if (IS_ERR(ptr: dupreq)) {
1318 desc->pg_error = PTR_ERR(ptr: dupreq);
1319 goto out_failed;
1320 }
1321
1322 nfs_pgio_set_current_mirror(desc, idx: midx);
1323 if (!nfs_pageio_add_request_mirror(desc, req: dupreq))
1324 goto out_cleanup_subreq;
1325 }
1326
1327 nfs_pgio_set_current_mirror(desc, idx: 0);
1328 if (!nfs_pageio_add_request_mirror(desc, req))
1329 goto out_failed;
1330
1331 return 1;
1332
1333out_cleanup_subreq:
1334 nfs_pageio_cleanup_request(desc, req: dupreq);
1335out_failed:
1336 nfs_pageio_error_cleanup(desc);
1337 return 0;
1338}
1339
1340/*
1341 * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
1342 * nfs_pageio_descriptor
1343 * @desc: pointer to io descriptor
1344 * @mirror_idx: pointer to mirror index
1345 */
1346static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1347 u32 mirror_idx)
1348{
1349 struct nfs_pgio_mirror *mirror;
1350 u32 restore_idx;
1351
1352 restore_idx = nfs_pgio_set_current_mirror(desc, idx: mirror_idx);
1353 mirror = nfs_pgio_current_mirror(desc);
1354
1355 for (;;) {
1356 nfs_pageio_doio(desc);
1357 if (desc->pg_error < 0 || !mirror->pg_recoalesce)
1358 break;
1359 if (!nfs_do_recoalesce(desc))
1360 break;
1361 }
1362 nfs_pgio_set_current_mirror(desc, idx: restore_idx);
1363}
1364
1365/*
1366 * nfs_pageio_resend - Transfer requests to new descriptor and resend
1367 * @hdr - the pgio header to move request from
1368 * @desc - the pageio descriptor to add requests to
1369 *
1370 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1371 * to send them.
1372 *
1373 * Returns 0 on success and < 0 on error.
1374 */
1375int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1376 struct nfs_pgio_header *hdr)
1377{
1378 LIST_HEAD(pages);
1379
1380 desc->pg_io_completion = hdr->io_completion;
1381 desc->pg_dreq = hdr->dreq;
1382 nfs_netfs_set_pageio_descriptor(desc, hdr);
1383 list_splice_init(list: &hdr->pages, head: &pages);
1384 while (!list_empty(head: &pages)) {
1385 struct nfs_page *req = nfs_list_entry(head: pages.next);
1386
1387 if (!nfs_pageio_add_request(desc, req))
1388 break;
1389 }
1390 nfs_pageio_complete(desc);
1391 if (!list_empty(head: &pages)) {
1392 int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1393 hdr->completion_ops->error_cleanup(&pages, err);
1394 nfs_set_pgio_error(hdr, error: err, pos: hdr->io_start);
1395 return err;
1396 }
1397 return 0;
1398}
1399EXPORT_SYMBOL_GPL(nfs_pageio_resend);
1400
1401/**
1402 * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
1403 * @desc: pointer to io descriptor
1404 */
1405void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
1406{
1407 u32 midx;
1408
1409 for (midx = 0; midx < desc->pg_mirror_count; midx++)
1410 nfs_pageio_complete_mirror(desc, mirror_idx: midx);
1411
1412 if (desc->pg_error < 0)
1413 nfs_pageio_error_cleanup(desc);
1414 if (desc->pg_ops->pg_cleanup)
1415 desc->pg_ops->pg_cleanup(desc);
1416 nfs_pageio_cleanup_mirroring(pgio: desc);
1417}
1418
1419/**
1420 * nfs_pageio_cond_complete - Conditional I/O completion
1421 * @desc: pointer to io descriptor
1422 * @index: page index
1423 *
1424 * It is important to ensure that processes don't try to take locks
1425 * on non-contiguous ranges of pages as that might deadlock. This
1426 * function should be called before attempting to wait on a locked
1427 * nfs_page. It will complete the I/O if the page index 'index'
1428 * is not contiguous with the existing list of pages in 'desc'.
1429 */
1430void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
1431{
1432 struct nfs_pgio_mirror *mirror;
1433 struct nfs_page *prev;
1434 struct folio *folio;
1435 u32 midx;
1436
1437 for (midx = 0; midx < desc->pg_mirror_count; midx++) {
1438 mirror = nfs_pgio_get_mirror(desc, idx: midx);
1439 if (!list_empty(head: &mirror->pg_list)) {
1440 prev = nfs_list_entry(head: mirror->pg_list.prev);
1441 folio = nfs_page_to_folio(req: prev);
1442 if (folio) {
1443 if (index == folio_next_index(folio))
1444 continue;
1445 } else if (index == prev->wb_index + 1)
1446 continue;
1447 /*
1448 * We will submit more requests after these. Indicate
1449 * this to the underlying layers.
1450 */
1451 desc->pg_moreio = 1;
1452 nfs_pageio_complete(desc);
1453 break;
1454 }
1455 }
1456}
1457
1458/*
1459 * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
1460 */
1461void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
1462{
1463 nfs_pageio_complete(desc: pgio);
1464}
1465
1466int __init nfs_init_nfspagecache(void)
1467{
1468 nfs_page_cachep = kmem_cache_create("nfs_page",
1469 sizeof(struct nfs_page),
1470 0, SLAB_HWCACHE_ALIGN,
1471 NULL);
1472 if (nfs_page_cachep == NULL)
1473 return -ENOMEM;
1474
1475 return 0;
1476}
1477
1478void nfs_destroy_nfspagecache(void)
1479{
1480 kmem_cache_destroy(s: nfs_page_cachep);
1481}
1482
1483static const struct rpc_call_ops nfs_pgio_common_ops = {
1484 .rpc_call_prepare = nfs_pgio_prepare,
1485 .rpc_call_done = nfs_pgio_result,
1486 .rpc_release = nfs_pgio_release,
1487};
1488
1489const struct nfs_pageio_ops nfs_pgio_rw_ops = {
1490 .pg_test = nfs_generic_pg_test,
1491 .pg_doio = nfs_generic_pg_pgios,
1492};
1493