1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BLK_CGROUP_PRIVATE_H
3#define _BLK_CGROUP_PRIVATE_H
4/*
5 * block cgroup private header
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17#include <linux/blk-cgroup.h>
18#include <linux/cgroup.h>
19#include <linux/kthread.h>
20#include <linux/blk-mq.h>
21#include <linux/llist.h>
22#include "blk.h"
23
24struct blkcg_gq;
25struct blkg_policy_data;
26
27
28/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
29#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
30
31#ifdef CONFIG_BLK_CGROUP
32
33enum blkg_iostat_type {
34 BLKG_IOSTAT_READ,
35 BLKG_IOSTAT_WRITE,
36 BLKG_IOSTAT_DISCARD,
37
38 BLKG_IOSTAT_NR,
39};
40
41struct blkg_iostat {
42 u64 bytes[BLKG_IOSTAT_NR];
43 u64 ios[BLKG_IOSTAT_NR];
44};
45
46struct blkg_iostat_set {
47 struct u64_stats_sync sync;
48 struct blkcg_gq *blkg;
49 struct llist_node lnode;
50 int lqueued; /* queued in llist */
51 struct blkg_iostat cur;
52 struct blkg_iostat last;
53};
54
55/* association between a blk cgroup and a request queue */
56struct blkcg_gq {
57 /* Pointer to the associated request_queue */
58 struct request_queue *q;
59 struct list_head q_node;
60 struct hlist_node blkcg_node;
61 struct blkcg *blkcg;
62
63 /* all non-root blkcg_gq's are guaranteed to have access to parent */
64 struct blkcg_gq *parent;
65
66 /* reference count */
67 struct percpu_ref refcnt;
68
69 /* is this blkg online? protected by both blkcg and q locks */
70 bool online;
71
72 struct blkg_iostat_set __percpu *iostat_cpu;
73 struct blkg_iostat_set iostat;
74
75 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
76#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
77 spinlock_t async_bio_lock;
78 struct bio_list async_bios;
79#endif
80 union {
81 struct work_struct async_bio_work;
82 struct work_struct free_work;
83 };
84
85 atomic_t use_delay;
86 atomic64_t delay_nsec;
87 atomic64_t delay_start;
88 u64 last_delay;
89 int last_use;
90
91 struct rcu_head rcu_head;
92};
93
94struct blkcg {
95 struct cgroup_subsys_state css;
96 spinlock_t lock;
97 refcount_t online_pin;
98 /* If there is block congestion on this cgroup. */
99 atomic_t congestion_count;
100
101 struct radix_tree_root blkg_tree;
102 struct blkcg_gq __rcu *blkg_hint;
103 struct hlist_head blkg_list;
104
105 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
106
107 struct list_head all_blkcgs_node;
108
109 /*
110 * List of updated percpu blkg_iostat_set's since the last flush.
111 */
112 struct llist_head __percpu *lhead;
113
114#ifdef CONFIG_BLK_CGROUP_FC_APPID
115 char fc_app_id[FC_APPID_LEN];
116#endif
117#ifdef CONFIG_CGROUP_WRITEBACK
118 struct list_head cgwb_list;
119#endif
120};
121
122static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
123{
124 return css ? container_of(css, struct blkcg, css) : NULL;
125}
126
127/*
128 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
129 * request_queue (q). This is used by blkcg policies which need to track
130 * information per blkcg - q pair.
131 *
132 * There can be multiple active blkcg policies and each blkg:policy pair is
133 * represented by a blkg_policy_data which is allocated and freed by each
134 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
135 * area by allocating larger data structure which embeds blkg_policy_data
136 * at the beginning.
137 */
138struct blkg_policy_data {
139 /* the blkg and policy id this per-policy data belongs to */
140 struct blkcg_gq *blkg;
141 int plid;
142 bool online;
143};
144
145/*
146 * Policies that need to keep per-blkcg data which is independent from any
147 * request_queue associated to it should implement cpd_alloc/free_fn()
148 * methods. A policy can allocate private data area by allocating larger
149 * data structure which embeds blkcg_policy_data at the beginning.
150 * cpd_init() is invoked to let each policy handle per-blkcg data.
151 */
152struct blkcg_policy_data {
153 /* the blkcg and policy id this per-policy data belongs to */
154 struct blkcg *blkcg;
155 int plid;
156};
157
158typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
159typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
160typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
161typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
162typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(struct gendisk *disk,
163 struct blkcg *blkcg, gfp_t gfp);
164typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
165typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
166typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
167typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
168typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
169typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
170 struct seq_file *s);
171
172struct blkcg_policy {
173 int plid;
174 /* cgroup files for the policy */
175 struct cftype *dfl_cftypes;
176 struct cftype *legacy_cftypes;
177
178 /* operations */
179 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
180 blkcg_pol_free_cpd_fn *cpd_free_fn;
181
182 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
183 blkcg_pol_init_pd_fn *pd_init_fn;
184 blkcg_pol_online_pd_fn *pd_online_fn;
185 blkcg_pol_offline_pd_fn *pd_offline_fn;
186 blkcg_pol_free_pd_fn *pd_free_fn;
187 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
188 blkcg_pol_stat_pd_fn *pd_stat_fn;
189};
190
191extern struct blkcg blkcg_root;
192extern bool blkcg_debug_stats;
193
194void blkg_init_queue(struct request_queue *q);
195int blkcg_init_disk(struct gendisk *disk);
196void blkcg_exit_disk(struct gendisk *disk);
197
198/* Blkio controller policy registration */
199int blkcg_policy_register(struct blkcg_policy *pol);
200void blkcg_policy_unregister(struct blkcg_policy *pol);
201int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol);
202void blkcg_deactivate_policy(struct gendisk *disk,
203 const struct blkcg_policy *pol);
204
205const char *blkg_dev_name(struct blkcg_gq *blkg);
206void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
207 u64 (*prfill)(struct seq_file *,
208 struct blkg_policy_data *, int),
209 const struct blkcg_policy *pol, int data,
210 bool show_total);
211u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
212
213struct blkg_conf_ctx {
214 char *input;
215 char *body;
216 struct block_device *bdev;
217 struct blkcg_gq *blkg;
218};
219
220void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input);
221int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx);
222unsigned long blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx);
223int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
224 struct blkg_conf_ctx *ctx);
225void blkg_conf_exit(struct blkg_conf_ctx *ctx);
226void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags);
227
228/**
229 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
230 * @bio: the target &bio
231 *
232 * Return: true if this bio needs to be submitted with the root blkg context.
233 *
234 * In order to avoid priority inversions we sometimes need to issue a bio as if
235 * it were attached to the root blkg, and then backcharge to the actual owning
236 * blkg. The idea is we do bio_blkcg_css() to look up the actual context for
237 * the bio and attach the appropriate blkg to the bio. Then we call this helper
238 * and if it is true run with the root blkg for that queue and then do any
239 * backcharging to the originating cgroup once the io is complete.
240 */
241static inline bool bio_issue_as_root_blkg(struct bio *bio)
242{
243 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
244}
245
246/**
247 * blkg_lookup - lookup blkg for the specified blkcg - q pair
248 * @blkcg: blkcg of interest
249 * @q: request_queue of interest
250 *
251 * Lookup blkg for the @blkcg - @q pair.
252 *
253 * Must be called in a RCU critical section.
254 */
255static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
256 struct request_queue *q)
257{
258 struct blkcg_gq *blkg;
259
260 if (blkcg == &blkcg_root)
261 return q->root_blkg;
262
263 blkg = rcu_dereference_check(blkcg->blkg_hint,
264 lockdep_is_held(&q->queue_lock));
265 if (blkg && blkg->q == q)
266 return blkg;
267
268 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
269 if (blkg && blkg->q != q)
270 blkg = NULL;
271 return blkg;
272}
273
274/**
275 * blkg_to_pd - get policy private data
276 * @blkg: blkg of interest
277 * @pol: policy of interest
278 *
279 * Return pointer to private data associated with the @blkg-@pol pair.
280 */
281static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
282 struct blkcg_policy *pol)
283{
284 return blkg ? blkg->pd[pol->plid] : NULL;
285}
286
287static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
288 struct blkcg_policy *pol)
289{
290 return blkcg ? blkcg->cpd[pol->plid] : NULL;
291}
292
293/**
294 * pd_to_blkg - get blkg associated with policy private data
295 * @pd: policy private data of interest
296 *
297 * @pd is policy private data. Determine the blkg it's associated with.
298 */
299static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
300{
301 return pd ? pd->blkg : NULL;
302}
303
304static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
305{
306 return cpd ? cpd->blkcg : NULL;
307}
308
309/**
310 * blkg_get - get a blkg reference
311 * @blkg: blkg to get
312 *
313 * The caller should be holding an existing reference.
314 */
315static inline void blkg_get(struct blkcg_gq *blkg)
316{
317 percpu_ref_get(ref: &blkg->refcnt);
318}
319
320/**
321 * blkg_tryget - try and get a blkg reference
322 * @blkg: blkg to get
323 *
324 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
325 * of freeing this blkg, so we can only use it if the refcnt is not zero.
326 */
327static inline bool blkg_tryget(struct blkcg_gq *blkg)
328{
329 return blkg && percpu_ref_tryget(ref: &blkg->refcnt);
330}
331
332/**
333 * blkg_put - put a blkg reference
334 * @blkg: blkg to put
335 */
336static inline void blkg_put(struct blkcg_gq *blkg)
337{
338 percpu_ref_put(ref: &blkg->refcnt);
339}
340
341/**
342 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
343 * @d_blkg: loop cursor pointing to the current descendant
344 * @pos_css: used for iteration
345 * @p_blkg: target blkg to walk descendants of
346 *
347 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
348 * read locked. If called under either blkcg or queue lock, the iteration
349 * is guaranteed to include all and only online blkgs. The caller may
350 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
351 * @p_blkg is included in the iteration and the first node to be visited.
352 */
353#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
354 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
355 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
356 (p_blkg)->q)))
357
358/**
359 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
360 * @d_blkg: loop cursor pointing to the current descendant
361 * @pos_css: used for iteration
362 * @p_blkg: target blkg to walk descendants of
363 *
364 * Similar to blkg_for_each_descendant_pre() but performs post-order
365 * traversal instead. Synchronization rules are the same. @p_blkg is
366 * included in the iteration and the last node to be visited.
367 */
368#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
369 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
370 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
371 (p_blkg)->q)))
372
373static inline void blkcg_use_delay(struct blkcg_gq *blkg)
374{
375 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
376 return;
377 if (atomic_add_return(i: 1, v: &blkg->use_delay) == 1)
378 atomic_inc(v: &blkg->blkcg->congestion_count);
379}
380
381static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
382{
383 int old = atomic_read(v: &blkg->use_delay);
384
385 if (WARN_ON_ONCE(old < 0))
386 return 0;
387 if (old == 0)
388 return 0;
389
390 /*
391 * We do this song and dance because we can race with somebody else
392 * adding or removing delay. If we just did an atomic_dec we'd end up
393 * negative and we'd already be in trouble. We need to subtract 1 and
394 * then check to see if we were the last delay so we can drop the
395 * congestion count on the cgroup.
396 */
397 while (old && !atomic_try_cmpxchg(v: &blkg->use_delay, old: &old, new: old - 1))
398 ;
399
400 if (old == 0)
401 return 0;
402 if (old == 1)
403 atomic_dec(v: &blkg->blkcg->congestion_count);
404 return 1;
405}
406
407/**
408 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
409 * @blkg: target blkg
410 * @delay: delay duration in nsecs
411 *
412 * When enabled with this function, the delay is not decayed and must be
413 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
414 * blkcg_[un]use_delay() and blkcg_add_delay() usages.
415 */
416static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
417{
418 int old = atomic_read(v: &blkg->use_delay);
419
420 /* We only want 1 person setting the congestion count for this blkg. */
421 if (!old && atomic_try_cmpxchg(v: &blkg->use_delay, old: &old, new: -1))
422 atomic_inc(v: &blkg->blkcg->congestion_count);
423
424 atomic64_set(v: &blkg->delay_nsec, i: delay);
425}
426
427/**
428 * blkcg_clear_delay - Disable allocator delay mechanism
429 * @blkg: target blkg
430 *
431 * Disable use_delay mechanism. See blkcg_set_delay().
432 */
433static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
434{
435 int old = atomic_read(v: &blkg->use_delay);
436
437 /* We only want 1 person clearing the congestion count for this blkg. */
438 if (old && atomic_try_cmpxchg(v: &blkg->use_delay, old: &old, new: 0))
439 atomic_dec(v: &blkg->blkcg->congestion_count);
440}
441
442/**
443 * blk_cgroup_mergeable - Determine whether to allow or disallow merges
444 * @rq: request to merge into
445 * @bio: bio to merge
446 *
447 * @bio and @rq should belong to the same cgroup and their issue_as_root should
448 * match. The latter is necessary as we don't want to throttle e.g. a metadata
449 * update because it happens to be next to a regular IO.
450 */
451static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
452{
453 return rq->bio->bi_blkg == bio->bi_blkg &&
454 bio_issue_as_root_blkg(bio: rq->bio) == bio_issue_as_root_blkg(bio);
455}
456
457static inline bool blkcg_policy_enabled(struct request_queue *q,
458 const struct blkcg_policy *pol)
459{
460 return pol && test_bit(pol->plid, q->blkcg_pols);
461}
462
463void blk_cgroup_bio_start(struct bio *bio);
464void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
465#else /* CONFIG_BLK_CGROUP */
466
467struct blkg_policy_data {
468};
469
470struct blkcg_policy_data {
471};
472
473struct blkcg_policy {
474};
475
476struct blkcg {
477};
478
479static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
480static inline void blkg_init_queue(struct request_queue *q) { }
481static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
482static inline void blkcg_exit_disk(struct gendisk *disk) { }
483static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
484static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
485static inline int blkcg_activate_policy(struct gendisk *disk,
486 const struct blkcg_policy *pol) { return 0; }
487static inline void blkcg_deactivate_policy(struct gendisk *disk,
488 const struct blkcg_policy *pol) { }
489
490static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
491 struct blkcg_policy *pol) { return NULL; }
492static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
493static inline void blkg_get(struct blkcg_gq *blkg) { }
494static inline void blkg_put(struct blkcg_gq *blkg) { }
495static inline void blk_cgroup_bio_start(struct bio *bio) { }
496static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
497
498#define blk_queue_for_each_rl(rl, q) \
499 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
500
501#endif /* CONFIG_BLK_CGROUP */
502
503#endif /* _BLK_CGROUP_PRIVATE_H */
504