1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef BLK_MQ_SCHED_H
3#define BLK_MQ_SCHED_H
4
5#include "elevator.h"
6#include "blk-mq.h"
7
8#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
9
10bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
11 unsigned int nr_segs, struct request **merged_request);
12bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
13 unsigned int nr_segs);
14bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
15 struct list_head *free);
16void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
17void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
18
19void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
20
21int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
22 struct elevator_tags *et);
23void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
24void blk_mq_sched_free_rqs(struct request_queue *q);
25
26struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
27 unsigned int nr_hw_queues, unsigned int nr_requests);
28int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
29 struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
30void blk_mq_free_sched_tags(struct elevator_tags *et,
31 struct blk_mq_tag_set *set);
32void blk_mq_free_sched_tags_batch(struct xarray *et_table,
33 struct blk_mq_tag_set *set);
34
35static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
36{
37 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
38 __blk_mq_sched_restart(hctx);
39}
40
41static inline bool bio_mergeable(struct bio *bio)
42{
43 return !(bio->bi_opf & REQ_NOMERGE_FLAGS);
44}
45
46static inline bool
47blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
48 struct bio *bio)
49{
50 if (rq->rq_flags & RQF_USE_SCHED) {
51 struct elevator_queue *e = q->elevator;
52
53 if (e->type->ops.allow_merge)
54 return e->type->ops.allow_merge(q, rq, bio);
55 }
56 return true;
57}
58
59static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
60{
61 if (rq->rq_flags & RQF_USE_SCHED) {
62 struct elevator_queue *e = rq->q->elevator;
63
64 if (e->type->ops.completed_request)
65 e->type->ops.completed_request(rq, now);
66 }
67}
68
69static inline void blk_mq_sched_requeue_request(struct request *rq)
70{
71 if (rq->rq_flags & RQF_USE_SCHED) {
72 struct request_queue *q = rq->q;
73 struct elevator_queue *e = q->elevator;
74
75 if (e->type->ops.requeue_request)
76 e->type->ops.requeue_request(rq);
77 }
78}
79
80static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
81{
82 struct elevator_queue *e = hctx->queue->elevator;
83
84 if (e && e->type->ops.has_work)
85 return e->type->ops.has_work(hctx);
86
87 return false;
88}
89
90static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
91{
92 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
93}
94
95static inline void blk_mq_set_min_shallow_depth(struct request_queue *q,
96 unsigned int depth)
97{
98 struct blk_mq_hw_ctx *hctx;
99 unsigned long i;
100
101 queue_for_each_hw_ctx(q, hctx, i)
102 sbitmap_queue_min_shallow_depth(sbq: &hctx->sched_tags->bitmap_tags,
103 min_shallow_depth: depth);
104}
105
106#endif
107