| 1 | // SPDX-License-Identifier: GPL-2.0-or-later | 
|---|
| 2 | /* | 
|---|
| 3 | *   ALSA sequencer FIFO | 
|---|
| 4 | *   Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> | 
|---|
| 5 | */ | 
|---|
| 6 |  | 
|---|
| 7 | #include <sound/core.h> | 
|---|
| 8 | #include <linux/slab.h> | 
|---|
| 9 | #include <linux/sched/signal.h> | 
|---|
| 10 |  | 
|---|
| 11 | #include "seq_fifo.h" | 
|---|
| 12 | #include "seq_lock.h" | 
|---|
| 13 |  | 
|---|
| 14 |  | 
|---|
| 15 | /* FIFO */ | 
|---|
| 16 |  | 
|---|
| 17 | /* create new fifo */ | 
|---|
| 18 | struct snd_seq_fifo *snd_seq_fifo_new(int poolsize) | 
|---|
| 19 | { | 
|---|
| 20 | struct snd_seq_fifo *f; | 
|---|
| 21 |  | 
|---|
| 22 | f = kzalloc(sizeof(*f), GFP_KERNEL); | 
|---|
| 23 | if (!f) | 
|---|
| 24 | return NULL; | 
|---|
| 25 |  | 
|---|
| 26 | f->pool = snd_seq_pool_new(poolsize); | 
|---|
| 27 | if (f->pool == NULL) { | 
|---|
| 28 | kfree(objp: f); | 
|---|
| 29 | return NULL; | 
|---|
| 30 | } | 
|---|
| 31 | if (snd_seq_pool_init(pool: f->pool) < 0) { | 
|---|
| 32 | snd_seq_pool_delete(pool: &f->pool); | 
|---|
| 33 | kfree(objp: f); | 
|---|
| 34 | return NULL; | 
|---|
| 35 | } | 
|---|
| 36 |  | 
|---|
| 37 | spin_lock_init(&f->lock); | 
|---|
| 38 | snd_use_lock_init(&f->use_lock); | 
|---|
| 39 | init_waitqueue_head(&f->input_sleep); | 
|---|
| 40 | atomic_set(v: &f->overflow, i: 0); | 
|---|
| 41 |  | 
|---|
| 42 | f->head = NULL; | 
|---|
| 43 | f->tail = NULL; | 
|---|
| 44 | f->cells = 0; | 
|---|
| 45 |  | 
|---|
| 46 | return f; | 
|---|
| 47 | } | 
|---|
| 48 |  | 
|---|
| 49 | void snd_seq_fifo_delete(struct snd_seq_fifo **fifo) | 
|---|
| 50 | { | 
|---|
| 51 | struct snd_seq_fifo *f; | 
|---|
| 52 |  | 
|---|
| 53 | if (snd_BUG_ON(!fifo)) | 
|---|
| 54 | return; | 
|---|
| 55 | f = *fifo; | 
|---|
| 56 | if (snd_BUG_ON(!f)) | 
|---|
| 57 | return; | 
|---|
| 58 | *fifo = NULL; | 
|---|
| 59 |  | 
|---|
| 60 | if (f->pool) | 
|---|
| 61 | snd_seq_pool_mark_closing(pool: f->pool); | 
|---|
| 62 |  | 
|---|
| 63 | snd_seq_fifo_clear(f); | 
|---|
| 64 |  | 
|---|
| 65 | /* wake up clients if any */ | 
|---|
| 66 | if (waitqueue_active(wq_head: &f->input_sleep)) | 
|---|
| 67 | wake_up(&f->input_sleep); | 
|---|
| 68 |  | 
|---|
| 69 | /* release resources...*/ | 
|---|
| 70 | /*....................*/ | 
|---|
| 71 |  | 
|---|
| 72 | if (f->pool) { | 
|---|
| 73 | snd_seq_pool_done(pool: f->pool); | 
|---|
| 74 | snd_seq_pool_delete(pool: &f->pool); | 
|---|
| 75 | } | 
|---|
| 76 |  | 
|---|
| 77 | kfree(objp: f); | 
|---|
| 78 | } | 
|---|
| 79 |  | 
|---|
| 80 | static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f); | 
|---|
| 81 |  | 
|---|
| 82 | /* clear queue */ | 
|---|
| 83 | void snd_seq_fifo_clear(struct snd_seq_fifo *f) | 
|---|
| 84 | { | 
|---|
| 85 | struct snd_seq_event_cell *cell; | 
|---|
| 86 |  | 
|---|
| 87 | /* clear overflow flag */ | 
|---|
| 88 | atomic_set(v: &f->overflow, i: 0); | 
|---|
| 89 |  | 
|---|
| 90 | snd_use_lock_sync(&f->use_lock); | 
|---|
| 91 | guard(spinlock_irq)(l: &f->lock); | 
|---|
| 92 | /* drain the fifo */ | 
|---|
| 93 | while ((cell = fifo_cell_out(f)) != NULL) { | 
|---|
| 94 | snd_seq_cell_free(cell); | 
|---|
| 95 | } | 
|---|
| 96 | } | 
|---|
| 97 |  | 
|---|
| 98 |  | 
|---|
| 99 | /* enqueue event to fifo */ | 
|---|
| 100 | int snd_seq_fifo_event_in(struct snd_seq_fifo *f, | 
|---|
| 101 | struct snd_seq_event *event) | 
|---|
| 102 | { | 
|---|
| 103 | struct snd_seq_event_cell *cell; | 
|---|
| 104 | int err; | 
|---|
| 105 |  | 
|---|
| 106 | if (snd_BUG_ON(!f)) | 
|---|
| 107 | return -EINVAL; | 
|---|
| 108 |  | 
|---|
| 109 | guard(snd_seq_fifo)(T: f); | 
|---|
| 110 | err = snd_seq_event_dup(pool: f->pool, event, cellp: &cell, nonblock: 1, NULL, NULL); /* always non-blocking */ | 
|---|
| 111 | if (err < 0) { | 
|---|
| 112 | if ((err == -ENOMEM) || (err == -EAGAIN)) | 
|---|
| 113 | atomic_inc(v: &f->overflow); | 
|---|
| 114 | return err; | 
|---|
| 115 | } | 
|---|
| 116 |  | 
|---|
| 117 | /* append new cells to fifo */ | 
|---|
| 118 | scoped_guard(spinlock_irqsave, &f->lock) { | 
|---|
| 119 | if (f->tail != NULL) | 
|---|
| 120 | f->tail->next = cell; | 
|---|
| 121 | f->tail = cell; | 
|---|
| 122 | if (f->head == NULL) | 
|---|
| 123 | f->head = cell; | 
|---|
| 124 | cell->next = NULL; | 
|---|
| 125 | f->cells++; | 
|---|
| 126 | } | 
|---|
| 127 |  | 
|---|
| 128 | /* wakeup client */ | 
|---|
| 129 | if (waitqueue_active(wq_head: &f->input_sleep)) | 
|---|
| 130 | wake_up(&f->input_sleep); | 
|---|
| 131 |  | 
|---|
| 132 | return 0; /* success */ | 
|---|
| 133 |  | 
|---|
| 134 | } | 
|---|
| 135 |  | 
|---|
| 136 | /* dequeue cell from fifo */ | 
|---|
| 137 | static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f) | 
|---|
| 138 | { | 
|---|
| 139 | struct snd_seq_event_cell *cell; | 
|---|
| 140 |  | 
|---|
| 141 | cell = f->head; | 
|---|
| 142 | if (cell) { | 
|---|
| 143 | f->head = cell->next; | 
|---|
| 144 |  | 
|---|
| 145 | /* reset tail if this was the last element */ | 
|---|
| 146 | if (f->tail == cell) | 
|---|
| 147 | f->tail = NULL; | 
|---|
| 148 |  | 
|---|
| 149 | cell->next = NULL; | 
|---|
| 150 | f->cells--; | 
|---|
| 151 | } | 
|---|
| 152 |  | 
|---|
| 153 | return cell; | 
|---|
| 154 | } | 
|---|
| 155 |  | 
|---|
| 156 | /* dequeue cell from fifo and copy on user space */ | 
|---|
| 157 | int snd_seq_fifo_cell_out(struct snd_seq_fifo *f, | 
|---|
| 158 | struct snd_seq_event_cell **cellp, int nonblock) | 
|---|
| 159 | { | 
|---|
| 160 | struct snd_seq_event_cell *cell; | 
|---|
| 161 | unsigned long flags; | 
|---|
| 162 | wait_queue_entry_t wait; | 
|---|
| 163 |  | 
|---|
| 164 | if (snd_BUG_ON(!f)) | 
|---|
| 165 | return -EINVAL; | 
|---|
| 166 |  | 
|---|
| 167 | *cellp = NULL; | 
|---|
| 168 | init_waitqueue_entry(wq_entry: &wait, current); | 
|---|
| 169 | spin_lock_irqsave(&f->lock, flags); | 
|---|
| 170 | while ((cell = fifo_cell_out(f)) == NULL) { | 
|---|
| 171 | if (nonblock) { | 
|---|
| 172 | /* non-blocking - return immediately */ | 
|---|
| 173 | spin_unlock_irqrestore(lock: &f->lock, flags); | 
|---|
| 174 | return -EAGAIN; | 
|---|
| 175 | } | 
|---|
| 176 | set_current_state(TASK_INTERRUPTIBLE); | 
|---|
| 177 | add_wait_queue(wq_head: &f->input_sleep, wq_entry: &wait); | 
|---|
| 178 | spin_unlock_irqrestore(lock: &f->lock, flags); | 
|---|
| 179 | schedule(); | 
|---|
| 180 | spin_lock_irqsave(&f->lock, flags); | 
|---|
| 181 | remove_wait_queue(wq_head: &f->input_sleep, wq_entry: &wait); | 
|---|
| 182 | if (signal_pending(current)) { | 
|---|
| 183 | spin_unlock_irqrestore(lock: &f->lock, flags); | 
|---|
| 184 | return -ERESTARTSYS; | 
|---|
| 185 | } | 
|---|
| 186 | } | 
|---|
| 187 | spin_unlock_irqrestore(lock: &f->lock, flags); | 
|---|
| 188 | *cellp = cell; | 
|---|
| 189 |  | 
|---|
| 190 | return 0; | 
|---|
| 191 | } | 
|---|
| 192 |  | 
|---|
| 193 |  | 
|---|
| 194 | void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f, | 
|---|
| 195 | struct snd_seq_event_cell *cell) | 
|---|
| 196 | { | 
|---|
| 197 | if (cell) { | 
|---|
| 198 | guard(spinlock_irqsave)(l: &f->lock); | 
|---|
| 199 | cell->next = f->head; | 
|---|
| 200 | f->head = cell; | 
|---|
| 201 | if (!f->tail) | 
|---|
| 202 | f->tail = cell; | 
|---|
| 203 | f->cells++; | 
|---|
| 204 | } | 
|---|
| 205 | } | 
|---|
| 206 |  | 
|---|
| 207 |  | 
|---|
| 208 | /* polling; return non-zero if queue is available */ | 
|---|
| 209 | int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, | 
|---|
| 210 | poll_table *wait) | 
|---|
| 211 | { | 
|---|
| 212 | poll_wait(filp: file, wait_address: &f->input_sleep, p: wait); | 
|---|
| 213 | guard(spinlock_irq)(l: &f->lock); | 
|---|
| 214 | return (f->cells > 0); | 
|---|
| 215 | } | 
|---|
| 216 |  | 
|---|
| 217 | /* change the size of pool; all old events are removed */ | 
|---|
| 218 | int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize) | 
|---|
| 219 | { | 
|---|
| 220 | struct snd_seq_pool *newpool, *oldpool; | 
|---|
| 221 | struct snd_seq_event_cell *cell, *next, *oldhead; | 
|---|
| 222 |  | 
|---|
| 223 | if (snd_BUG_ON(!f || !f->pool)) | 
|---|
| 224 | return -EINVAL; | 
|---|
| 225 |  | 
|---|
| 226 | /* allocate new pool */ | 
|---|
| 227 | newpool = snd_seq_pool_new(poolsize); | 
|---|
| 228 | if (newpool == NULL) | 
|---|
| 229 | return -ENOMEM; | 
|---|
| 230 | if (snd_seq_pool_init(pool: newpool) < 0) { | 
|---|
| 231 | snd_seq_pool_delete(pool: &newpool); | 
|---|
| 232 | return -ENOMEM; | 
|---|
| 233 | } | 
|---|
| 234 |  | 
|---|
| 235 | scoped_guard(spinlock_irq, &f->lock) { | 
|---|
| 236 | /* remember old pool */ | 
|---|
| 237 | oldpool = f->pool; | 
|---|
| 238 | oldhead = f->head; | 
|---|
| 239 | /* exchange pools */ | 
|---|
| 240 | f->pool = newpool; | 
|---|
| 241 | f->head = NULL; | 
|---|
| 242 | f->tail = NULL; | 
|---|
| 243 | f->cells = 0; | 
|---|
| 244 | /* NOTE: overflow flag is not cleared */ | 
|---|
| 245 | } | 
|---|
| 246 |  | 
|---|
| 247 | /* close the old pool and wait until all users are gone */ | 
|---|
| 248 | snd_seq_pool_mark_closing(pool: oldpool); | 
|---|
| 249 | snd_use_lock_sync(&f->use_lock); | 
|---|
| 250 |  | 
|---|
| 251 | /* release cells in old pool */ | 
|---|
| 252 | for (cell = oldhead; cell; cell = next) { | 
|---|
| 253 | next = cell->next; | 
|---|
| 254 | snd_seq_cell_free(cell); | 
|---|
| 255 | } | 
|---|
| 256 | snd_seq_pool_delete(pool: &oldpool); | 
|---|
| 257 |  | 
|---|
| 258 | return 0; | 
|---|
| 259 | } | 
|---|
| 260 |  | 
|---|
| 261 | /* get the number of unused cells safely */ | 
|---|
| 262 | int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f) | 
|---|
| 263 | { | 
|---|
| 264 | if (!f) | 
|---|
| 265 | return 0; | 
|---|
| 266 |  | 
|---|
| 267 | guard(snd_seq_fifo)(T: f); | 
|---|
| 268 | guard(spinlock_irqsave)(l: &f->lock); | 
|---|
| 269 | return snd_seq_unused_cells(pool: f->pool); | 
|---|
| 270 | } | 
|---|
| 271 |  | 
|---|