| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* Object lifetime handling and tracing. | 
|---|
| 3 | * | 
|---|
| 4 | * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved. | 
|---|
| 5 | * Written by David Howells (dhowells@redhat.com) | 
|---|
| 6 | */ | 
|---|
| 7 |  | 
|---|
| 8 | #include <linux/slab.h> | 
|---|
| 9 | #include <linux/mempool.h> | 
|---|
| 10 | #include <linux/delay.h> | 
|---|
| 11 | #include "internal.h" | 
|---|
| 12 |  | 
|---|
| 13 | static void netfs_free_request(struct work_struct *work); | 
|---|
| 14 |  | 
|---|
| 15 | /* | 
|---|
| 16 | * Allocate an I/O request and initialise it. | 
|---|
| 17 | */ | 
|---|
| 18 | struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, | 
|---|
| 19 | struct file *file, | 
|---|
| 20 | loff_t start, size_t len, | 
|---|
| 21 | enum netfs_io_origin origin) | 
|---|
| 22 | { | 
|---|
| 23 | static atomic_t debug_ids; | 
|---|
| 24 | struct inode *inode = file ? file_inode(f: file) : mapping->host; | 
|---|
| 25 | struct netfs_inode *ctx = netfs_inode(inode); | 
|---|
| 26 | struct netfs_io_request *rreq; | 
|---|
| 27 | mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool; | 
|---|
| 28 | struct kmem_cache *cache = mempool->pool_data; | 
|---|
| 29 | int ret; | 
|---|
| 30 |  | 
|---|
| 31 | for (;;) { | 
|---|
| 32 | rreq = mempool_alloc(mempool, GFP_KERNEL); | 
|---|
| 33 | if (rreq) | 
|---|
| 34 | break; | 
|---|
| 35 | msleep(msecs: 10); | 
|---|
| 36 | } | 
|---|
| 37 |  | 
|---|
| 38 | memset(s: rreq, c: 0, n: kmem_cache_size(s: cache)); | 
|---|
| 39 | INIT_WORK(&rreq->cleanup_work, netfs_free_request); | 
|---|
| 40 | rreq->start	= start; | 
|---|
| 41 | rreq->len	= len; | 
|---|
| 42 | rreq->origin	= origin; | 
|---|
| 43 | rreq->netfs_ops	= ctx->ops; | 
|---|
| 44 | rreq->mapping	= mapping; | 
|---|
| 45 | rreq->inode	= inode; | 
|---|
| 46 | rreq->i_size	= i_size_read(inode); | 
|---|
| 47 | rreq->debug_id	= atomic_inc_return(v: &debug_ids); | 
|---|
| 48 | rreq->wsize	= INT_MAX; | 
|---|
| 49 | rreq->io_streams[0].sreq_max_len = ULONG_MAX; | 
|---|
| 50 | rreq->io_streams[0].sreq_max_segs = 0; | 
|---|
| 51 | spin_lock_init(&rreq->lock); | 
|---|
| 52 | INIT_LIST_HEAD(list: &rreq->io_streams[0].subrequests); | 
|---|
| 53 | INIT_LIST_HEAD(list: &rreq->io_streams[1].subrequests); | 
|---|
| 54 | init_waitqueue_head(&rreq->waitq); | 
|---|
| 55 | refcount_set(r: &rreq->ref, n: 2); | 
|---|
| 56 |  | 
|---|
| 57 | if (origin == NETFS_READAHEAD || | 
|---|
| 58 | origin == NETFS_READPAGE || | 
|---|
| 59 | origin == NETFS_READ_GAPS || | 
|---|
| 60 | origin == NETFS_READ_SINGLE || | 
|---|
| 61 | origin == NETFS_READ_FOR_WRITE || | 
|---|
| 62 | origin == NETFS_UNBUFFERED_READ || | 
|---|
| 63 | origin == NETFS_DIO_READ) { | 
|---|
| 64 | INIT_WORK(&rreq->work, netfs_read_collection_worker); | 
|---|
| 65 | rreq->io_streams[0].avail = true; | 
|---|
| 66 | } else { | 
|---|
| 67 | INIT_WORK(&rreq->work, netfs_write_collection_worker); | 
|---|
| 68 | } | 
|---|
| 69 |  | 
|---|
| 70 | __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); | 
|---|
| 71 | if (rreq->netfs_ops->init_request) { | 
|---|
| 72 | ret = rreq->netfs_ops->init_request(rreq, file); | 
|---|
| 73 | if (ret < 0) { | 
|---|
| 74 | mempool_free(element: rreq, pool: rreq->netfs_ops->request_pool ?: &netfs_request_pool); | 
|---|
| 75 | return ERR_PTR(error: ret); | 
|---|
| 76 | } | 
|---|
| 77 | } | 
|---|
| 78 |  | 
|---|
| 79 | atomic_inc(v: &ctx->io_count); | 
|---|
| 80 | trace_netfs_rreq_ref(rreq_debug_id: rreq->debug_id, ref: refcount_read(r: &rreq->ref), what: netfs_rreq_trace_new); | 
|---|
| 81 | netfs_proc_add_rreq(rreq); | 
|---|
| 82 | netfs_stat(&netfs_n_rh_rreq); | 
|---|
| 83 | return rreq; | 
|---|
| 84 | } | 
|---|
| 85 |  | 
|---|
| 86 | void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what) | 
|---|
| 87 | { | 
|---|
| 88 | int r; | 
|---|
| 89 |  | 
|---|
| 90 | __refcount_inc(r: &rreq->ref, oldp: &r); | 
|---|
| 91 | trace_netfs_rreq_ref(rreq_debug_id: rreq->debug_id, ref: r + 1, what); | 
|---|
| 92 | } | 
|---|
| 93 |  | 
|---|
| 94 | void netfs_clear_subrequests(struct netfs_io_request *rreq) | 
|---|
| 95 | { | 
|---|
| 96 | struct netfs_io_subrequest *subreq; | 
|---|
| 97 | struct netfs_io_stream *stream; | 
|---|
| 98 | int s; | 
|---|
| 99 |  | 
|---|
| 100 | for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) { | 
|---|
| 101 | stream = &rreq->io_streams[s]; | 
|---|
| 102 | while (!list_empty(head: &stream->subrequests)) { | 
|---|
| 103 | subreq = list_first_entry(&stream->subrequests, | 
|---|
| 104 | struct netfs_io_subrequest, rreq_link); | 
|---|
| 105 | list_del(entry: &subreq->rreq_link); | 
|---|
| 106 | netfs_put_subrequest(subreq, what: netfs_sreq_trace_put_clear); | 
|---|
| 107 | } | 
|---|
| 108 | } | 
|---|
| 109 | } | 
|---|
| 110 |  | 
|---|
| 111 | static void netfs_free_request_rcu(struct rcu_head *rcu) | 
|---|
| 112 | { | 
|---|
| 113 | struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu); | 
|---|
| 114 |  | 
|---|
| 115 | mempool_free(element: rreq, pool: rreq->netfs_ops->request_pool ?: &netfs_request_pool); | 
|---|
| 116 | netfs_stat_d(&netfs_n_rh_rreq); | 
|---|
| 117 | } | 
|---|
| 118 |  | 
|---|
| 119 | static void netfs_deinit_request(struct netfs_io_request *rreq) | 
|---|
| 120 | { | 
|---|
| 121 | struct netfs_inode *ictx = netfs_inode(inode: rreq->inode); | 
|---|
| 122 | unsigned int i; | 
|---|
| 123 |  | 
|---|
| 124 | trace_netfs_rreq(rreq, what: netfs_rreq_trace_free); | 
|---|
| 125 |  | 
|---|
| 126 | /* Cancel/flush the result collection worker.  That does not carry a | 
|---|
| 127 | * ref of its own, so we must wait for it somewhere. | 
|---|
| 128 | */ | 
|---|
| 129 | cancel_work_sync(work: &rreq->work); | 
|---|
| 130 |  | 
|---|
| 131 | netfs_proc_del_rreq(rreq); | 
|---|
| 132 | netfs_clear_subrequests(rreq); | 
|---|
| 133 | if (rreq->netfs_ops->free_request) | 
|---|
| 134 | rreq->netfs_ops->free_request(rreq); | 
|---|
| 135 | if (rreq->cache_resources.ops) | 
|---|
| 136 | rreq->cache_resources.ops->end_operation(&rreq->cache_resources); | 
|---|
| 137 | if (rreq->direct_bv) { | 
|---|
| 138 | for (i = 0; i < rreq->direct_bv_count; i++) { | 
|---|
| 139 | if (rreq->direct_bv[i].bv_page) { | 
|---|
| 140 | if (rreq->direct_bv_unpin) | 
|---|
| 141 | unpin_user_page(page: rreq->direct_bv[i].bv_page); | 
|---|
| 142 | } | 
|---|
| 143 | } | 
|---|
| 144 | kvfree(addr: rreq->direct_bv); | 
|---|
| 145 | } | 
|---|
| 146 | rolling_buffer_clear(roll: &rreq->buffer); | 
|---|
| 147 |  | 
|---|
| 148 | if (atomic_dec_and_test(v: &ictx->io_count)) | 
|---|
| 149 | wake_up_var(var: &ictx->io_count); | 
|---|
| 150 | } | 
|---|
| 151 |  | 
|---|
| 152 | static void netfs_free_request(struct work_struct *work) | 
|---|
| 153 | { | 
|---|
| 154 | struct netfs_io_request *rreq = | 
|---|
| 155 | container_of(work, struct netfs_io_request, cleanup_work); | 
|---|
| 156 |  | 
|---|
| 157 | netfs_deinit_request(rreq); | 
|---|
| 158 | call_rcu(head: &rreq->rcu, func: netfs_free_request_rcu); | 
|---|
| 159 | } | 
|---|
| 160 |  | 
|---|
| 161 | void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what) | 
|---|
| 162 | { | 
|---|
| 163 | unsigned int debug_id; | 
|---|
| 164 | bool dead; | 
|---|
| 165 | int r; | 
|---|
| 166 |  | 
|---|
| 167 | if (rreq) { | 
|---|
| 168 | debug_id = rreq->debug_id; | 
|---|
| 169 | dead = __refcount_dec_and_test(r: &rreq->ref, oldp: &r); | 
|---|
| 170 | trace_netfs_rreq_ref(rreq_debug_id: debug_id, ref: r - 1, what); | 
|---|
| 171 | if (dead) | 
|---|
| 172 | WARN_ON(!queue_work(system_dfl_wq, &rreq->cleanup_work)); | 
|---|
| 173 | } | 
|---|
| 174 | } | 
|---|
| 175 |  | 
|---|
| 176 | /* | 
|---|
| 177 | * Free a request (synchronously) that was just allocated but has | 
|---|
| 178 | * failed before it could be submitted. | 
|---|
| 179 | */ | 
|---|
| 180 | void netfs_put_failed_request(struct netfs_io_request *rreq) | 
|---|
| 181 | { | 
|---|
| 182 | int r = refcount_read(r: &rreq->ref); | 
|---|
| 183 |  | 
|---|
| 184 | /* new requests have two references (see | 
|---|
| 185 | * netfs_alloc_request(), and this function is only allowed on | 
|---|
| 186 | * new request objects | 
|---|
| 187 | */ | 
|---|
| 188 | WARN_ON_ONCE(r != 2); | 
|---|
| 189 |  | 
|---|
| 190 | trace_netfs_rreq_ref(rreq_debug_id: rreq->debug_id, ref: r, what: netfs_rreq_trace_put_failed); | 
|---|
| 191 | netfs_free_request(work: &rreq->cleanup_work); | 
|---|
| 192 | } | 
|---|
| 193 |  | 
|---|
| 194 | /* | 
|---|
| 195 | * Allocate and partially initialise an I/O request structure. | 
|---|
| 196 | */ | 
|---|
| 197 | struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq) | 
|---|
| 198 | { | 
|---|
| 199 | struct netfs_io_subrequest *subreq; | 
|---|
| 200 | mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool; | 
|---|
| 201 | struct kmem_cache *cache = mempool->pool_data; | 
|---|
| 202 |  | 
|---|
| 203 | for (;;) { | 
|---|
| 204 | subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool, | 
|---|
| 205 | GFP_KERNEL); | 
|---|
| 206 | if (subreq) | 
|---|
| 207 | break; | 
|---|
| 208 | msleep(msecs: 10); | 
|---|
| 209 | } | 
|---|
| 210 |  | 
|---|
| 211 | memset(s: subreq, c: 0, n: kmem_cache_size(s: cache)); | 
|---|
| 212 | INIT_WORK(&subreq->work, NULL); | 
|---|
| 213 | INIT_LIST_HEAD(list: &subreq->rreq_link); | 
|---|
| 214 | refcount_set(r: &subreq->ref, n: 2); | 
|---|
| 215 | subreq->rreq = rreq; | 
|---|
| 216 | subreq->debug_index = atomic_inc_return(v: &rreq->subreq_counter); | 
|---|
| 217 | netfs_get_request(rreq, what: netfs_rreq_trace_get_subreq); | 
|---|
| 218 | netfs_stat(&netfs_n_rh_sreq); | 
|---|
| 219 | return subreq; | 
|---|
| 220 | } | 
|---|
| 221 |  | 
|---|
| 222 | void netfs_get_subrequest(struct netfs_io_subrequest *subreq, | 
|---|
| 223 | enum netfs_sreq_ref_trace what) | 
|---|
| 224 | { | 
|---|
| 225 | int r; | 
|---|
| 226 |  | 
|---|
| 227 | __refcount_inc(r: &subreq->ref, oldp: &r); | 
|---|
| 228 | trace_netfs_sreq_ref(rreq_debug_id: subreq->rreq->debug_id, subreq_debug_index: subreq->debug_index, ref: r + 1, | 
|---|
| 229 | what); | 
|---|
| 230 | } | 
|---|
| 231 |  | 
|---|
| 232 | static void netfs_free_subrequest(struct netfs_io_subrequest *subreq) | 
|---|
| 233 | { | 
|---|
| 234 | struct netfs_io_request *rreq = subreq->rreq; | 
|---|
| 235 |  | 
|---|
| 236 | trace_netfs_sreq(sreq: subreq, what: netfs_sreq_trace_free); | 
|---|
| 237 | if (rreq->netfs_ops->free_subrequest) | 
|---|
| 238 | rreq->netfs_ops->free_subrequest(subreq); | 
|---|
| 239 | mempool_free(element: subreq, pool: rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool); | 
|---|
| 240 | netfs_stat_d(&netfs_n_rh_sreq); | 
|---|
| 241 | netfs_put_request(rreq, what: netfs_rreq_trace_put_subreq); | 
|---|
| 242 | } | 
|---|
| 243 |  | 
|---|
| 244 | void netfs_put_subrequest(struct netfs_io_subrequest *subreq, | 
|---|
| 245 | enum netfs_sreq_ref_trace what) | 
|---|
| 246 | { | 
|---|
| 247 | unsigned int debug_index = subreq->debug_index; | 
|---|
| 248 | unsigned int debug_id = subreq->rreq->debug_id; | 
|---|
| 249 | bool dead; | 
|---|
| 250 | int r; | 
|---|
| 251 |  | 
|---|
| 252 | dead = __refcount_dec_and_test(r: &subreq->ref, oldp: &r); | 
|---|
| 253 | trace_netfs_sreq_ref(rreq_debug_id: debug_id, subreq_debug_index: debug_index, ref: r - 1, what); | 
|---|
| 254 | if (dead) | 
|---|
| 255 | netfs_free_subrequest(subreq); | 
|---|
| 256 | } | 
|---|
| 257 |  | 
|---|