| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #ifndef IOU_KBUF_H |
| 3 | #define IOU_KBUF_H |
| 4 | |
| 5 | #include <uapi/linux/io_uring.h> |
| 6 | #include <linux/io_uring_types.h> |
| 7 | |
| 8 | enum { |
| 9 | /* ring mapped provided buffers */ |
| 10 | IOBL_BUF_RING = 1, |
| 11 | /* buffers are consumed incrementally rather than always fully */ |
| 12 | IOBL_INC = 2, |
| 13 | }; |
| 14 | |
| 15 | struct io_buffer_list { |
| 16 | /* |
| 17 | * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, |
| 18 | * then these are classic provided buffers and ->buf_list is used. |
| 19 | */ |
| 20 | union { |
| 21 | struct list_head buf_list; |
| 22 | struct io_uring_buf_ring *buf_ring; |
| 23 | }; |
| 24 | /* count of classic/legacy buffers in buffer list */ |
| 25 | int nbufs; |
| 26 | |
| 27 | __u16 bgid; |
| 28 | |
| 29 | /* below is for ring provided buffers */ |
| 30 | __u16 buf_nr_pages; |
| 31 | __u16 nr_entries; |
| 32 | __u16 head; |
| 33 | __u16 mask; |
| 34 | |
| 35 | __u16 flags; |
| 36 | |
| 37 | struct io_mapped_region region; |
| 38 | }; |
| 39 | |
| 40 | struct io_buffer { |
| 41 | struct list_head list; |
| 42 | __u64 addr; |
| 43 | __u32 len; |
| 44 | __u16 bid; |
| 45 | __u16 bgid; |
| 46 | }; |
| 47 | |
| 48 | enum { |
| 49 | /* can alloc a bigger vec */ |
| 50 | KBUF_MODE_EXPAND = 1, |
| 51 | /* if bigger vec allocated, free old one */ |
| 52 | KBUF_MODE_FREE = 2, |
| 53 | }; |
| 54 | |
| 55 | struct buf_sel_arg { |
| 56 | struct iovec *iovs; |
| 57 | size_t out_len; |
| 58 | size_t max_len; |
| 59 | unsigned short nr_iovs; |
| 60 | unsigned short mode; |
| 61 | unsigned short buf_group; |
| 62 | unsigned short partial_map; |
| 63 | }; |
| 64 | |
| 65 | struct io_br_sel io_buffer_select(struct io_kiocb *req, size_t *len, |
| 66 | unsigned buf_group, unsigned int issue_flags); |
| 67 | int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, |
| 68 | struct io_br_sel *sel, unsigned int issue_flags); |
| 69 | int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg, |
| 70 | struct io_br_sel *sel); |
| 71 | void io_destroy_buffers(struct io_ring_ctx *ctx); |
| 72 | |
| 73 | int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); |
| 74 | int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); |
| 75 | int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags); |
| 76 | |
| 77 | int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); |
| 78 | int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); |
| 79 | int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg); |
| 80 | |
| 81 | bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); |
| 82 | void io_kbuf_drop_legacy(struct io_kiocb *req); |
| 83 | |
| 84 | unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl, |
| 85 | int len, int nbufs); |
| 86 | bool io_kbuf_commit(struct io_kiocb *req, |
| 87 | struct io_buffer_list *bl, int len, int nr); |
| 88 | |
| 89 | struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx, |
| 90 | unsigned int bgid); |
| 91 | |
| 92 | static inline bool io_kbuf_recycle_ring(struct io_kiocb *req, |
| 93 | struct io_buffer_list *bl) |
| 94 | { |
| 95 | if (bl) { |
| 96 | req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT); |
| 97 | return true; |
| 98 | } |
| 99 | return false; |
| 100 | } |
| 101 | |
| 102 | static inline bool io_do_buffer_select(struct io_kiocb *req) |
| 103 | { |
| 104 | if (!(req->flags & REQ_F_BUFFER_SELECT)) |
| 105 | return false; |
| 106 | return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); |
| 107 | } |
| 108 | |
| 109 | static inline bool io_kbuf_recycle(struct io_kiocb *req, struct io_buffer_list *bl, |
| 110 | unsigned issue_flags) |
| 111 | { |
| 112 | if (req->flags & REQ_F_BL_NO_RECYCLE) |
| 113 | return false; |
| 114 | if (req->flags & REQ_F_BUFFER_RING) |
| 115 | return io_kbuf_recycle_ring(req, bl); |
| 116 | if (req->flags & REQ_F_BUFFER_SELECTED) |
| 117 | return io_kbuf_recycle_legacy(req, issue_flags); |
| 118 | return false; |
| 119 | } |
| 120 | |
| 121 | static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len, |
| 122 | struct io_buffer_list *bl) |
| 123 | { |
| 124 | if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) |
| 125 | return 0; |
| 126 | return __io_put_kbufs(req, bl, len, nbufs: 1); |
| 127 | } |
| 128 | |
| 129 | static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len, |
| 130 | struct io_buffer_list *bl, int nbufs) |
| 131 | { |
| 132 | if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) |
| 133 | return 0; |
| 134 | return __io_put_kbufs(req, bl, len, nbufs); |
| 135 | } |
| 136 | #endif |
| 137 | |