1/* SPDX-License-Identifier: GPL-2.0-or-later */
2#ifndef _LINUX_IO_URING_CMD_H
3#define _LINUX_IO_URING_CMD_H
4
5#include <uapi/linux/io_uring.h>
6#include <linux/io_uring_types.h>
7#include <linux/blk-mq.h>
8
9/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
10#define IORING_URING_CMD_CANCELABLE (1U << 30)
11/* io_uring_cmd is being issued again */
12#define IORING_URING_CMD_REISSUE (1U << 31)
13
14typedef void (*io_uring_cmd_tw_t)(struct io_uring_cmd *cmd,
15 unsigned issue_flags);
16
17struct io_uring_cmd {
18 struct file *file;
19 const struct io_uring_sqe *sqe;
20 /* callback to defer completions to task context */
21 io_uring_cmd_tw_t task_work_cb;
22 u32 cmd_op;
23 u32 flags;
24 u8 pdu[32]; /* available inline for free use */
25};
26
27static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
28{
29 return sqe->cmd;
30}
31
32static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
33{
34 BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
35}
36#define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
37 io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
38 ((pdu_type *)&(cmd)->pdu) \
39)
40
41#if defined(CONFIG_IO_URING)
42int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
43 struct iov_iter *iter,
44 struct io_uring_cmd *ioucmd,
45 unsigned int issue_flags);
46int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
47 const struct iovec __user *uvec,
48 size_t uvec_segs,
49 int ddir, struct iov_iter *iter,
50 unsigned issue_flags);
51
52/*
53 * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
54 * and the corresponding io_uring request.
55 *
56 * Note: the caller should never hard code @issue_flags and is only allowed
57 * to pass the mask provided by the core io_uring code.
58 */
59void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2,
60 unsigned issue_flags, bool is_cqe32);
61
62void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
63 io_uring_cmd_tw_t task_work_cb,
64 unsigned flags);
65
66/*
67 * Note: the caller should never hard code @issue_flags and only use the
68 * mask provided by the core io_uring code.
69 */
70void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
71 unsigned int issue_flags);
72
73/* Execute the request from a blocking context */
74void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
75
76/*
77 * Select a buffer from the provided buffer group for multishot uring_cmd.
78 * Returns the selected buffer address and size.
79 */
80struct io_br_sel io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd,
81 unsigned buf_group, size_t *len,
82 unsigned int issue_flags);
83
84/*
85 * Complete a multishot uring_cmd event. This will post a CQE to the completion
86 * queue and update the provided buffer.
87 */
88bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
89 struct io_br_sel *sel, unsigned int issue_flags);
90
91#else
92static inline int
93io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
94 struct iov_iter *iter, struct io_uring_cmd *ioucmd,
95 unsigned int issue_flags)
96{
97 return -EOPNOTSUPP;
98}
99static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
100 const struct iovec __user *uvec,
101 size_t uvec_segs,
102 int ddir, struct iov_iter *iter,
103 unsigned issue_flags)
104{
105 return -EOPNOTSUPP;
106}
107static inline void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret,
108 u64 ret2, unsigned issue_flags, bool is_cqe32)
109{
110}
111static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
112 io_uring_cmd_tw_t task_work_cb, unsigned flags)
113{
114}
115static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
116 unsigned int issue_flags)
117{
118}
119static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
120{
121}
122static inline struct io_br_sel
123io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, unsigned buf_group,
124 size_t *len, unsigned int issue_flags)
125{
126 return (struct io_br_sel) { .val = -EOPNOTSUPP };
127}
128static inline bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
129 struct io_br_sel *sel, unsigned int issue_flags)
130{
131 return true;
132}
133#endif
134
135/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
136static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
137 io_uring_cmd_tw_t task_work_cb)
138{
139 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, flags: IOU_F_TWQ_LAZY_WAKE);
140}
141
142static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
143 io_uring_cmd_tw_t task_work_cb)
144{
145 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, flags: 0);
146}
147
148static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
149{
150 return cmd_to_io_kiocb(ptr: cmd)->tctx->task;
151}
152
153/*
154 * Return uring_cmd's context reference as its context handle for driver to
155 * track per-context resource, such as registered kernel IO buffer
156 */
157static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd)
158{
159 return cmd_to_io_kiocb(ptr: cmd)->ctx;
160}
161
162static inline void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret,
163 unsigned issue_flags)
164{
165 return __io_uring_cmd_done(cmd: ioucmd, ret, res2: 0, issue_flags, is_cqe32: false);
166}
167
168static inline void io_uring_cmd_done32(struct io_uring_cmd *ioucmd, s32 ret,
169 u64 res2, unsigned issue_flags)
170{
171 return __io_uring_cmd_done(cmd: ioucmd, ret, res2, issue_flags, is_cqe32: true);
172}
173
174int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
175 void (*release)(void *), unsigned int index,
176 unsigned int issue_flags);
177int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
178 unsigned int issue_flags);
179
180#endif /* _LINUX_IO_URING_CMD_H */
181