1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* Internal definitions for network filesystem support
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/slab.h>
9#include <linux/seq_file.h>
10#include <linux/folio_queue.h>
11#include <linux/netfs.h>
12#include <linux/fscache.h>
13#include <linux/fscache-cache.h>
14#include <trace/events/netfs.h>
15#include <trace/events/fscache.h>
16
17#ifdef pr_fmt
18#undef pr_fmt
19#endif
20
21#define pr_fmt(fmt) "netfs: " fmt
22
23/*
24 * buffered_read.c
25 */
26void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error);
27int netfs_prefetch_for_write(struct file *file, struct folio *folio,
28 size_t offset, size_t len);
29
30/*
31 * buffered_write.c
32 */
33void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
34 loff_t pos, size_t copied);
35
36/*
37 * main.c
38 */
39extern unsigned int netfs_debug;
40extern struct list_head netfs_io_requests;
41extern spinlock_t netfs_proc_lock;
42extern mempool_t netfs_request_pool;
43extern mempool_t netfs_subrequest_pool;
44
45#ifdef CONFIG_PROC_FS
46static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
47{
48 spin_lock(lock: &netfs_proc_lock);
49 list_add_tail_rcu(new: &rreq->proc_link, head: &netfs_io_requests);
50 spin_unlock(lock: &netfs_proc_lock);
51}
52static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq)
53{
54 if (!list_empty(head: &rreq->proc_link)) {
55 spin_lock(lock: &netfs_proc_lock);
56 list_del_rcu(entry: &rreq->proc_link);
57 spin_unlock(lock: &netfs_proc_lock);
58 }
59}
60#else
61static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {}
62static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
63#endif
64
65/*
66 * misc.c
67 */
68struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq,
69 enum netfs_folioq_trace trace);
70void netfs_reset_iter(struct netfs_io_subrequest *subreq);
71void netfs_wake_collector(struct netfs_io_request *rreq);
72void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq);
73void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq,
74 struct netfs_io_stream *stream);
75ssize_t netfs_wait_for_read(struct netfs_io_request *rreq);
76ssize_t netfs_wait_for_write(struct netfs_io_request *rreq);
77void netfs_wait_for_paused_read(struct netfs_io_request *rreq);
78void netfs_wait_for_paused_write(struct netfs_io_request *rreq);
79
80/*
81 * objects.c
82 */
83struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
84 struct file *file,
85 loff_t start, size_t len,
86 enum netfs_io_origin origin);
87void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
88void netfs_clear_subrequests(struct netfs_io_request *rreq);
89void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
90void netfs_put_failed_request(struct netfs_io_request *rreq);
91struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
92
93static inline void netfs_see_request(struct netfs_io_request *rreq,
94 enum netfs_rreq_ref_trace what)
95{
96 trace_netfs_rreq_ref(rreq_debug_id: rreq->debug_id, ref: refcount_read(r: &rreq->ref), what);
97}
98
99static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq,
100 enum netfs_sreq_ref_trace what)
101{
102 trace_netfs_sreq_ref(rreq_debug_id: subreq->rreq->debug_id, subreq_debug_index: subreq->debug_index,
103 ref: refcount_read(r: &subreq->ref), what);
104}
105
106/*
107 * read_collect.c
108 */
109bool netfs_read_collection(struct netfs_io_request *rreq);
110void netfs_read_collection_worker(struct work_struct *work);
111void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error);
112
113/*
114 * read_pgpriv2.c
115 */
116void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio);
117void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq);
118bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq);
119
120/*
121 * read_retry.c
122 */
123void netfs_retry_reads(struct netfs_io_request *rreq);
124void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq);
125
126/*
127 * stats.c
128 */
129#ifdef CONFIG_NETFS_STATS
130extern atomic_t netfs_n_rh_dio_read;
131extern atomic_t netfs_n_rh_readahead;
132extern atomic_t netfs_n_rh_read_folio;
133extern atomic_t netfs_n_rh_read_single;
134extern atomic_t netfs_n_rh_rreq;
135extern atomic_t netfs_n_rh_sreq;
136extern atomic_t netfs_n_rh_download;
137extern atomic_t netfs_n_rh_download_done;
138extern atomic_t netfs_n_rh_download_failed;
139extern atomic_t netfs_n_rh_download_instead;
140extern atomic_t netfs_n_rh_read;
141extern atomic_t netfs_n_rh_read_done;
142extern atomic_t netfs_n_rh_read_failed;
143extern atomic_t netfs_n_rh_zero;
144extern atomic_t netfs_n_rh_short_read;
145extern atomic_t netfs_n_rh_write;
146extern atomic_t netfs_n_rh_write_begin;
147extern atomic_t netfs_n_rh_write_done;
148extern atomic_t netfs_n_rh_write_failed;
149extern atomic_t netfs_n_rh_write_zskip;
150extern atomic_t netfs_n_rh_retry_read_req;
151extern atomic_t netfs_n_rh_retry_read_subreq;
152extern atomic_t netfs_n_wh_buffered_write;
153extern atomic_t netfs_n_wh_writethrough;
154extern atomic_t netfs_n_wh_dio_write;
155extern atomic_t netfs_n_wh_writepages;
156extern atomic_t netfs_n_wh_copy_to_cache;
157extern atomic_t netfs_n_wh_wstream_conflict;
158extern atomic_t netfs_n_wh_upload;
159extern atomic_t netfs_n_wh_upload_done;
160extern atomic_t netfs_n_wh_upload_failed;
161extern atomic_t netfs_n_wh_write;
162extern atomic_t netfs_n_wh_write_done;
163extern atomic_t netfs_n_wh_write_failed;
164extern atomic_t netfs_n_wh_retry_write_req;
165extern atomic_t netfs_n_wh_retry_write_subreq;
166extern atomic_t netfs_n_wb_lock_skip;
167extern atomic_t netfs_n_wb_lock_wait;
168extern atomic_t netfs_n_folioq;
169
170int netfs_stats_show(struct seq_file *m, void *v);
171
172static inline void netfs_stat(atomic_t *stat)
173{
174 atomic_inc(stat);
175}
176
177static inline void netfs_stat_d(atomic_t *stat)
178{
179 atomic_dec(stat);
180}
181
182#else
183#define netfs_stat(x) do {} while(0)
184#define netfs_stat_d(x) do {} while(0)
185#endif
186
187/*
188 * write_collect.c
189 */
190int netfs_folio_written_back(struct folio *folio);
191bool netfs_write_collection(struct netfs_io_request *wreq);
192void netfs_write_collection_worker(struct work_struct *work);
193
194/*
195 * write_issue.c
196 */
197struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
198 struct file *file,
199 loff_t start,
200 enum netfs_io_origin origin);
201void netfs_reissue_write(struct netfs_io_stream *stream,
202 struct netfs_io_subrequest *subreq,
203 struct iov_iter *source);
204void netfs_issue_write(struct netfs_io_request *wreq,
205 struct netfs_io_stream *stream);
206size_t netfs_advance_write(struct netfs_io_request *wreq,
207 struct netfs_io_stream *stream,
208 loff_t start, size_t len, bool to_eof);
209struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
210int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
211 struct folio *folio, size_t copied, bool to_page_end,
212 struct folio **writethrough_cache);
213ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
214 struct folio *writethrough_cache);
215int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
216
217/*
218 * write_retry.c
219 */
220void netfs_retry_writes(struct netfs_io_request *wreq);
221
222/*
223 * Miscellaneous functions.
224 */
225static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
226{
227#if IS_ENABLED(CONFIG_FSCACHE)
228 struct fscache_cookie *cookie = ctx->cache;
229
230 return fscache_cookie_valid(cookie) && cookie->cache_priv &&
231 fscache_cookie_enabled(cookie);
232#else
233 return false;
234#endif
235}
236
237/*
238 * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
239 */
240static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
241{
242 if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE)
243 refcount_inc(r: &netfs_group->ref);
244 return netfs_group;
245}
246
247/*
248 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
249 */
250static inline void netfs_put_group(struct netfs_group *netfs_group)
251{
252 if (netfs_group &&
253 netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
254 refcount_dec_and_test(r: &netfs_group->ref))
255 netfs_group->free(netfs_group);
256}
257
258/*
259 * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
260 */
261static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
262{
263 if (netfs_group &&
264 netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
265 refcount_sub_and_test(i: nr, r: &netfs_group->ref))
266 netfs_group->free(netfs_group);
267}
268
269/*
270 * Clear and wake up a NETFS_RREQ_* flag bit on a request.
271 */
272static inline void netfs_wake_rreq_flag(struct netfs_io_request *rreq,
273 unsigned int rreq_flag,
274 enum netfs_rreq_trace trace)
275{
276 if (test_bit(rreq_flag, &rreq->flags)) {
277 clear_bit_unlock(nr: rreq_flag, addr: &rreq->flags);
278 smp_mb__after_atomic(); /* Set flag before task state */
279 trace_netfs_rreq(rreq, what: trace);
280 wake_up(&rreq->waitq);
281 }
282}
283
284/*
285 * Test the NETFS_RREQ_IN_PROGRESS flag, inserting an appropriate barrier.
286 */
287static inline bool netfs_check_rreq_in_progress(const struct netfs_io_request *rreq)
288{
289 /* Order read of flags before read of anything else, such as error. */
290 return test_bit_acquire(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
291}
292
293/*
294 * Test the NETFS_SREQ_IN_PROGRESS flag, inserting an appropriate barrier.
295 */
296static inline bool netfs_check_subreq_in_progress(const struct netfs_io_subrequest *subreq)
297{
298 /* Order read of flags before read of anything else, such as error. */
299 return test_bit_acquire(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
300}
301
302/*
303 * fscache-cache.c
304 */
305#ifdef CONFIG_PROC_FS
306extern const struct seq_operations fscache_caches_seq_ops;
307#endif
308bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
309void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
310struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
311void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
312
313static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
314{
315 return smp_load_acquire(&cache->state);
316}
317
318static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
319{
320 return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
321}
322
323static inline void fscache_set_cache_state(struct fscache_cache *cache,
324 enum fscache_cache_state new_state)
325{
326 smp_store_release(&cache->state, new_state);
327
328}
329
330static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
331 enum fscache_cache_state old_state,
332 enum fscache_cache_state new_state)
333{
334 return try_cmpxchg_release(&cache->state, &old_state, new_state);
335}
336
337/*
338 * fscache-cookie.c
339 */
340extern struct kmem_cache *fscache_cookie_jar;
341#ifdef CONFIG_PROC_FS
342extern const struct seq_operations fscache_cookies_seq_ops;
343#endif
344extern struct timer_list fscache_cookie_lru_timer;
345
346extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
347extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
348 enum fscache_access_trace why);
349
350static inline void fscache_see_cookie(struct fscache_cookie *cookie,
351 enum fscache_cookie_trace where)
352{
353 trace_fscache_cookie(cookie_debug_id: cookie->debug_id, ref: refcount_read(r: &cookie->ref),
354 where);
355}
356
357/*
358 * fscache-main.c
359 */
360extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
361#ifdef CONFIG_FSCACHE
362int __init fscache_init(void);
363void __exit fscache_exit(void);
364#else
365static inline int fscache_init(void) { return 0; }
366static inline void fscache_exit(void) {}
367#endif
368
369/*
370 * fscache-proc.c
371 */
372#ifdef CONFIG_PROC_FS
373extern int __init fscache_proc_init(void);
374extern void fscache_proc_cleanup(void);
375#else
376#define fscache_proc_init() (0)
377#define fscache_proc_cleanup() do {} while (0)
378#endif
379
380/*
381 * fscache-stats.c
382 */
383#ifdef CONFIG_FSCACHE_STATS
384extern atomic_t fscache_n_volumes;
385extern atomic_t fscache_n_volumes_collision;
386extern atomic_t fscache_n_volumes_nomem;
387extern atomic_t fscache_n_cookies;
388extern atomic_t fscache_n_cookies_lru;
389extern atomic_t fscache_n_cookies_lru_expired;
390extern atomic_t fscache_n_cookies_lru_removed;
391extern atomic_t fscache_n_cookies_lru_dropped;
392
393extern atomic_t fscache_n_acquires;
394extern atomic_t fscache_n_acquires_ok;
395extern atomic_t fscache_n_acquires_oom;
396
397extern atomic_t fscache_n_invalidates;
398
399extern atomic_t fscache_n_relinquishes;
400extern atomic_t fscache_n_relinquishes_retire;
401extern atomic_t fscache_n_relinquishes_dropped;
402
403extern atomic_t fscache_n_resizes;
404extern atomic_t fscache_n_resizes_null;
405
406static inline void fscache_stat(atomic_t *stat)
407{
408 atomic_inc(stat);
409}
410
411static inline void fscache_stat_d(atomic_t *stat)
412{
413 atomic_dec(stat);
414}
415
416#define __fscache_stat(stat) (stat)
417
418int fscache_stats_show(struct seq_file *m);
419#else
420
421#define __fscache_stat(stat) (NULL)
422#define fscache_stat(stat) do {} while (0)
423#define fscache_stat_d(stat) do {} while (0)
424
425static inline int fscache_stats_show(struct seq_file *m) { return 0; }
426#endif
427
428/*
429 * fscache-volume.c
430 */
431#ifdef CONFIG_PROC_FS
432extern const struct seq_operations fscache_volumes_seq_ops;
433#endif
434
435struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
436 enum fscache_volume_trace where);
437bool fscache_begin_volume_access(struct fscache_volume *volume,
438 struct fscache_cookie *cookie,
439 enum fscache_access_trace why);
440void fscache_create_volume(struct fscache_volume *volume, bool wait);
441
442/*****************************************************************************/
443/*
444 * debug tracing
445 */
446#define dbgprintk(FMT, ...) \
447 printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
448
449#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
450#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
451#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
452
453#ifdef __KDEBUG
454#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
455#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
456#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
457
458#elif defined(CONFIG_NETFS_DEBUG)
459#define _enter(FMT, ...) \
460do { \
461 if (netfs_debug) \
462 kenter(FMT, ##__VA_ARGS__); \
463} while (0)
464
465#define _leave(FMT, ...) \
466do { \
467 if (netfs_debug) \
468 kleave(FMT, ##__VA_ARGS__); \
469} while (0)
470
471#define _debug(FMT, ...) \
472do { \
473 if (netfs_debug) \
474 kdebug(FMT, ##__VA_ARGS__); \
475} while (0)
476
477#else
478#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
479#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
480#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
481#endif
482
483/*
484 * assertions
485 */
486#if 1 /* defined(__KDEBUGALL) */
487
488#define ASSERT(X) \
489do { \
490 if (unlikely(!(X))) { \
491 pr_err("\n"); \
492 pr_err("Assertion failed\n"); \
493 BUG(); \
494 } \
495} while (0)
496
497#define ASSERTCMP(X, OP, Y) \
498do { \
499 if (unlikely(!((X) OP (Y)))) { \
500 pr_err("\n"); \
501 pr_err("Assertion failed\n"); \
502 pr_err("%lx " #OP " %lx is false\n", \
503 (unsigned long)(X), (unsigned long)(Y)); \
504 BUG(); \
505 } \
506} while (0)
507
508#define ASSERTIF(C, X) \
509do { \
510 if (unlikely((C) && !(X))) { \
511 pr_err("\n"); \
512 pr_err("Assertion failed\n"); \
513 BUG(); \
514 } \
515} while (0)
516
517#define ASSERTIFCMP(C, X, OP, Y) \
518do { \
519 if (unlikely((C) && !((X) OP (Y)))) { \
520 pr_err("\n"); \
521 pr_err("Assertion failed\n"); \
522 pr_err("%lx " #OP " %lx is false\n", \
523 (unsigned long)(X), (unsigned long)(Y)); \
524 BUG(); \
525 } \
526} while (0)
527
528#else
529
530#define ASSERT(X) do {} while (0)
531#define ASSERTCMP(X, OP, Y) do {} while (0)
532#define ASSERTIF(C, X) do {} while (0)
533#define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
534
535#endif /* assert or not */
536