1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/backing-dev.h>
5#include <linux/bio.h>
6#include <linux/blkdev.h>
7#include <linux/mm.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/workqueue.h>
11#include <linux/smp.h>
12
13#include "blk.h"
14#include "blk-mq.h"
15
16static void blk_mq_sysfs_release(struct kobject *kobj)
17{
18 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
19
20 free_percpu(pdata: ctxs->queue_ctx);
21 kfree(objp: ctxs);
22}
23
24static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
25{
26 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
27
28 /* ctx->ctxs won't be released until all ctx are freed */
29 kobject_put(kobj: &ctx->ctxs->kobj);
30}
31
32static void blk_mq_hw_sysfs_release(struct kobject *kobj)
33{
34 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
35 kobj);
36
37 sbitmap_free(sb: &hctx->ctx_map);
38 free_cpumask_var(mask: hctx->cpumask);
39 kfree(objp: hctx->ctxs);
40 kfree(objp: hctx);
41}
42
43struct blk_mq_hw_ctx_sysfs_entry {
44 struct attribute attr;
45 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
46};
47
48static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
49 struct attribute *attr, char *page)
50{
51 struct blk_mq_hw_ctx_sysfs_entry *entry;
52 struct blk_mq_hw_ctx *hctx;
53 struct request_queue *q;
54 ssize_t res;
55
56 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
57 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
58 q = hctx->queue;
59
60 if (!entry->show)
61 return -EIO;
62
63 mutex_lock(lock: &q->elevator_lock);
64 res = entry->show(hctx, page);
65 mutex_unlock(lock: &q->elevator_lock);
66 return res;
67}
68
69static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
70 char *page)
71{
72 return sprintf(buf: page, fmt: "%u\n", hctx->tags->nr_tags);
73}
74
75static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
76 char *page)
77{
78 return sprintf(buf: page, fmt: "%u\n", hctx->tags->nr_reserved_tags);
79}
80
81static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
82{
83 const size_t size = PAGE_SIZE - 1;
84 unsigned int i, first = 1;
85 int ret = 0, pos = 0;
86
87 for_each_cpu(i, hctx->cpumask) {
88 if (first)
89 ret = snprintf(buf: pos + page, size: size - pos, fmt: "%u", i);
90 else
91 ret = snprintf(buf: pos + page, size: size - pos, fmt: ", %u", i);
92
93 if (ret >= size - pos)
94 break;
95
96 first = 0;
97 pos += ret;
98 }
99
100 ret = snprintf(buf: pos + page, size: size + 1 - pos, fmt: "\n");
101 return pos + ret;
102}
103
104static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
105 .attr = {.name = "nr_tags", .mode = 0444 },
106 .show = blk_mq_hw_sysfs_nr_tags_show,
107};
108static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
109 .attr = {.name = "nr_reserved_tags", .mode = 0444 },
110 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
111};
112static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
113 .attr = {.name = "cpu_list", .mode = 0444 },
114 .show = blk_mq_hw_sysfs_cpus_show,
115};
116
117static struct attribute *default_hw_ctx_attrs[] = {
118 &blk_mq_hw_sysfs_nr_tags.attr,
119 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
120 &blk_mq_hw_sysfs_cpus.attr,
121 NULL,
122};
123ATTRIBUTE_GROUPS(default_hw_ctx);
124
125static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
126 .show = blk_mq_hw_sysfs_show,
127};
128
129static const struct kobj_type blk_mq_ktype = {
130 .release = blk_mq_sysfs_release,
131};
132
133static const struct kobj_type blk_mq_ctx_ktype = {
134 .release = blk_mq_ctx_sysfs_release,
135};
136
137static const struct kobj_type blk_mq_hw_ktype = {
138 .sysfs_ops = &blk_mq_hw_sysfs_ops,
139 .default_groups = default_hw_ctx_groups,
140 .release = blk_mq_hw_sysfs_release,
141};
142
143static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
144{
145 struct blk_mq_ctx *ctx;
146 int i;
147
148 if (!hctx->nr_ctx)
149 return;
150
151 hctx_for_each_ctx(hctx, ctx, i)
152 if (ctx->kobj.state_in_sysfs)
153 kobject_del(kobj: &ctx->kobj);
154
155 if (hctx->kobj.state_in_sysfs)
156 kobject_del(kobj: &hctx->kobj);
157}
158
159static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
160{
161 struct request_queue *q = hctx->queue;
162 struct blk_mq_ctx *ctx;
163 int i, j, ret;
164
165 if (!hctx->nr_ctx)
166 return 0;
167
168 ret = kobject_add(kobj: &hctx->kobj, parent: q->mq_kobj, fmt: "%u", hctx->queue_num);
169 if (ret)
170 return ret;
171
172 hctx_for_each_ctx(hctx, ctx, i) {
173 ret = kobject_add(kobj: &ctx->kobj, parent: &hctx->kobj, fmt: "cpu%u", ctx->cpu);
174 if (ret)
175 goto out;
176 }
177
178 return 0;
179out:
180 hctx_for_each_ctx(hctx, ctx, j) {
181 if (j < i)
182 kobject_del(kobj: &ctx->kobj);
183 }
184 kobject_del(kobj: &hctx->kobj);
185 return ret;
186}
187
188void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
189{
190 kobject_init(kobj: &hctx->kobj, ktype: &blk_mq_hw_ktype);
191}
192
193void blk_mq_sysfs_deinit(struct request_queue *q)
194{
195 struct blk_mq_ctx *ctx;
196 int cpu;
197
198 for_each_possible_cpu(cpu) {
199 ctx = per_cpu_ptr(q->queue_ctx, cpu);
200 kobject_put(kobj: &ctx->kobj);
201 }
202 kobject_put(kobj: q->mq_kobj);
203}
204
205void blk_mq_sysfs_init(struct request_queue *q)
206{
207 struct blk_mq_ctx *ctx;
208 int cpu;
209
210 kobject_init(kobj: q->mq_kobj, ktype: &blk_mq_ktype);
211
212 for_each_possible_cpu(cpu) {
213 ctx = per_cpu_ptr(q->queue_ctx, cpu);
214
215 kobject_get(kobj: q->mq_kobj);
216 kobject_init(kobj: &ctx->kobj, ktype: &blk_mq_ctx_ktype);
217 }
218}
219
220int blk_mq_sysfs_register(struct gendisk *disk)
221{
222 struct request_queue *q = disk->queue;
223 struct blk_mq_hw_ctx *hctx;
224 unsigned long i, j;
225 int ret;
226
227 ret = kobject_add(kobj: q->mq_kobj, parent: &disk_to_dev(disk)->kobj, fmt: "mq");
228 if (ret < 0)
229 return ret;
230
231 kobject_uevent(kobj: q->mq_kobj, action: KOBJ_ADD);
232
233 mutex_lock(lock: &q->tag_set->tag_list_lock);
234 queue_for_each_hw_ctx(q, hctx, i) {
235 ret = blk_mq_register_hctx(hctx);
236 if (ret)
237 goto out_unreg;
238 }
239 mutex_unlock(lock: &q->tag_set->tag_list_lock);
240 return 0;
241
242out_unreg:
243 queue_for_each_hw_ctx(q, hctx, j) {
244 if (j < i)
245 blk_mq_unregister_hctx(hctx);
246 }
247 mutex_unlock(lock: &q->tag_set->tag_list_lock);
248
249 kobject_uevent(kobj: q->mq_kobj, action: KOBJ_REMOVE);
250 kobject_del(kobj: q->mq_kobj);
251 return ret;
252}
253
254void blk_mq_sysfs_unregister(struct gendisk *disk)
255{
256 struct request_queue *q = disk->queue;
257 struct blk_mq_hw_ctx *hctx;
258 unsigned long i;
259
260 mutex_lock(lock: &q->tag_set->tag_list_lock);
261 queue_for_each_hw_ctx(q, hctx, i)
262 blk_mq_unregister_hctx(hctx);
263 mutex_unlock(lock: &q->tag_set->tag_list_lock);
264
265 kobject_uevent(kobj: q->mq_kobj, action: KOBJ_REMOVE);
266 kobject_del(kobj: q->mq_kobj);
267}
268
269void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
270{
271 struct blk_mq_hw_ctx *hctx;
272 unsigned long i;
273
274 if (!blk_queue_registered(q))
275 return;
276
277 queue_for_each_hw_ctx(q, hctx, i)
278 blk_mq_unregister_hctx(hctx);
279}
280
281int blk_mq_sysfs_register_hctxs(struct request_queue *q)
282{
283 struct blk_mq_hw_ctx *hctx;
284 unsigned long i;
285 int ret = 0;
286
287 if (!blk_queue_registered(q))
288 goto out;
289
290 queue_for_each_hw_ctx(q, hctx, i) {
291 ret = blk_mq_register_hctx(hctx);
292 if (ret)
293 break;
294 }
295
296out:
297 return ret;
298}
299