1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/file.h>
4#include <linux/slab.h>
5#include <linux/net.h>
6#include <linux/io_uring.h>
7
8#include "io_uring.h"
9#include "notif.h"
10#include "rsrc.h"
11
12static const struct ubuf_info_ops io_ubuf_ops;
13
14static void io_notif_tw_complete(struct io_kiocb *notif, io_tw_token_t tw)
15{
16 struct io_notif_data *nd = io_notif_to_data(notif);
17 struct io_ring_ctx *ctx = notif->ctx;
18
19 lockdep_assert_held(&ctx->uring_lock);
20
21 do {
22 notif = cmd_to_io_kiocb(ptr: nd);
23
24 if (WARN_ON_ONCE(ctx != notif->ctx))
25 return;
26 lockdep_assert(refcount_read(&nd->uarg.refcnt) == 0);
27
28 if (unlikely(nd->zc_report) && (nd->zc_copied || !nd->zc_used))
29 notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
30
31 if (nd->account_pages && notif->ctx->user) {
32 __io_unaccount_mem(user: notif->ctx->user, nr_pages: nd->account_pages);
33 nd->account_pages = 0;
34 }
35
36 nd = nd->next;
37 io_req_task_complete(req: notif, tw);
38 } while (nd);
39}
40
41void io_tx_ubuf_complete(struct sk_buff *skb, struct ubuf_info *uarg,
42 bool success)
43{
44 struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
45 struct io_kiocb *notif = cmd_to_io_kiocb(ptr: nd);
46 unsigned tw_flags;
47
48 if (nd->zc_report) {
49 if (success && !nd->zc_used && skb)
50 WRITE_ONCE(nd->zc_used, true);
51 else if (!success && !nd->zc_copied)
52 WRITE_ONCE(nd->zc_copied, true);
53 }
54
55 if (!refcount_dec_and_test(r: &uarg->refcnt))
56 return;
57
58 if (nd->head != nd) {
59 io_tx_ubuf_complete(skb, uarg: &nd->head->uarg, success);
60 return;
61 }
62
63 tw_flags = nd->next ? 0 : IOU_F_TWQ_LAZY_WAKE;
64 notif->io_task_work.func = io_notif_tw_complete;
65 __io_req_task_work_add(req: notif, flags: tw_flags);
66}
67
68static int io_link_skb(struct sk_buff *skb, struct ubuf_info *uarg)
69{
70 struct io_notif_data *nd, *prev_nd;
71 struct io_kiocb *prev_notif, *notif;
72 struct ubuf_info *prev_uarg = skb_zcopy(skb);
73
74 nd = container_of(uarg, struct io_notif_data, uarg);
75 notif = cmd_to_io_kiocb(ptr: nd);
76
77 if (!prev_uarg) {
78 net_zcopy_get(uarg: &nd->uarg);
79 skb_zcopy_init(skb, uarg: &nd->uarg);
80 return 0;
81 }
82 /* handle it separately as we can't link a notif to itself */
83 if (unlikely(prev_uarg == &nd->uarg))
84 return 0;
85 /* we can't join two links together, just request a fresh skb */
86 if (unlikely(nd->head != nd || nd->next))
87 return -EEXIST;
88 /* don't mix zc providers */
89 if (unlikely(prev_uarg->ops != &io_ubuf_ops))
90 return -EEXIST;
91
92 prev_nd = container_of(prev_uarg, struct io_notif_data, uarg);
93 prev_notif = cmd_to_io_kiocb(ptr: prev_nd);
94
95 /* make sure all noifications can be finished in the same task_work */
96 if (unlikely(notif->ctx != prev_notif->ctx ||
97 notif->tctx != prev_notif->tctx))
98 return -EEXIST;
99
100 nd->head = prev_nd->head;
101 nd->next = prev_nd->next;
102 prev_nd->next = nd;
103 net_zcopy_get(uarg: &nd->head->uarg);
104 return 0;
105}
106
107static const struct ubuf_info_ops io_ubuf_ops = {
108 .complete = io_tx_ubuf_complete,
109 .link_skb = io_link_skb,
110};
111
112struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
113 __must_hold(&ctx->uring_lock)
114{
115 struct io_kiocb *notif;
116 struct io_notif_data *nd;
117
118 if (unlikely(!io_alloc_req(ctx, &notif)))
119 return NULL;
120 notif->ctx = ctx;
121 notif->opcode = IORING_OP_NOP;
122 notif->flags = 0;
123 notif->file = NULL;
124 notif->tctx = current->io_uring;
125 io_get_task_refs(nr: 1);
126 notif->file_node = NULL;
127 notif->buf_node = NULL;
128
129 nd = io_notif_to_data(notif);
130 nd->zc_report = false;
131 nd->account_pages = 0;
132 nd->next = NULL;
133 nd->head = nd;
134
135 nd->uarg.flags = IO_NOTIF_UBUF_FLAGS;
136 nd->uarg.ops = &io_ubuf_ops;
137 refcount_set(r: &nd->uarg.refcnt, n: 1);
138 return notif;
139}
140