| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* | 
|---|
| 3 | * Mailbox: Common code for Mailbox controllers and users | 
|---|
| 4 | * | 
|---|
| 5 | * Copyright (C) 2013-2014 Linaro Ltd. | 
|---|
| 6 | * Author: Jassi Brar <jassisinghbrar@gmail.com> | 
|---|
| 7 | */ | 
|---|
| 8 |  | 
|---|
| 9 | #include <linux/cleanup.h> | 
|---|
| 10 | #include <linux/delay.h> | 
|---|
| 11 | #include <linux/device.h> | 
|---|
| 12 | #include <linux/err.h> | 
|---|
| 13 | #include <linux/mailbox_client.h> | 
|---|
| 14 | #include <linux/mailbox_controller.h> | 
|---|
| 15 | #include <linux/module.h> | 
|---|
| 16 | #include <linux/mutex.h> | 
|---|
| 17 | #include <linux/of.h> | 
|---|
| 18 | #include <linux/property.h> | 
|---|
| 19 | #include <linux/spinlock.h> | 
|---|
| 20 |  | 
|---|
| 21 | #include "mailbox.h" | 
|---|
| 22 |  | 
|---|
| 23 | static LIST_HEAD(mbox_cons); | 
|---|
| 24 | static DEFINE_MUTEX(con_mutex); | 
|---|
| 25 |  | 
|---|
| 26 | static int add_to_rbuf(struct mbox_chan *chan, void *mssg) | 
|---|
| 27 | { | 
|---|
| 28 | int idx; | 
|---|
| 29 |  | 
|---|
| 30 | guard(spinlock_irqsave)(l: &chan->lock); | 
|---|
| 31 |  | 
|---|
| 32 | /* See if there is any space left */ | 
|---|
| 33 | if (chan->msg_count == MBOX_TX_QUEUE_LEN) | 
|---|
| 34 | return -ENOBUFS; | 
|---|
| 35 |  | 
|---|
| 36 | idx = chan->msg_free; | 
|---|
| 37 | chan->msg_data[idx] = mssg; | 
|---|
| 38 | chan->msg_count++; | 
|---|
| 39 |  | 
|---|
| 40 | if (idx == MBOX_TX_QUEUE_LEN - 1) | 
|---|
| 41 | chan->msg_free = 0; | 
|---|
| 42 | else | 
|---|
| 43 | chan->msg_free++; | 
|---|
| 44 |  | 
|---|
| 45 | return idx; | 
|---|
| 46 | } | 
|---|
| 47 |  | 
|---|
| 48 | static void msg_submit(struct mbox_chan *chan) | 
|---|
| 49 | { | 
|---|
| 50 | unsigned count, idx; | 
|---|
| 51 | void *data; | 
|---|
| 52 | int err = -EBUSY; | 
|---|
| 53 |  | 
|---|
| 54 | scoped_guard(spinlock_irqsave, &chan->lock) { | 
|---|
| 55 | if (!chan->msg_count || chan->active_req) | 
|---|
| 56 | break; | 
|---|
| 57 |  | 
|---|
| 58 | count = chan->msg_count; | 
|---|
| 59 | idx = chan->msg_free; | 
|---|
| 60 | if (idx >= count) | 
|---|
| 61 | idx -= count; | 
|---|
| 62 | else | 
|---|
| 63 | idx += MBOX_TX_QUEUE_LEN - count; | 
|---|
| 64 |  | 
|---|
| 65 | data = chan->msg_data[idx]; | 
|---|
| 66 |  | 
|---|
| 67 | if (chan->cl->tx_prepare) | 
|---|
| 68 | chan->cl->tx_prepare(chan->cl, data); | 
|---|
| 69 | /* Try to submit a message to the MBOX controller */ | 
|---|
| 70 | err = chan->mbox->ops->send_data(chan, data); | 
|---|
| 71 | if (!err) { | 
|---|
| 72 | chan->active_req = data; | 
|---|
| 73 | chan->msg_count--; | 
|---|
| 74 | } | 
|---|
| 75 | } | 
|---|
| 76 |  | 
|---|
| 77 | if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { | 
|---|
| 78 | /* kick start the timer immediately to avoid delays */ | 
|---|
| 79 | scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock) | 
|---|
| 80 | hrtimer_start(timer: &chan->mbox->poll_hrt, tim: 0, mode: HRTIMER_MODE_REL); | 
|---|
| 81 | } | 
|---|
| 82 | } | 
|---|
| 83 |  | 
|---|
| 84 | static void tx_tick(struct mbox_chan *chan, int r) | 
|---|
| 85 | { | 
|---|
| 86 | void *mssg; | 
|---|
| 87 |  | 
|---|
| 88 | scoped_guard(spinlock_irqsave, &chan->lock) { | 
|---|
| 89 | mssg = chan->active_req; | 
|---|
| 90 | chan->active_req = NULL; | 
|---|
| 91 | } | 
|---|
| 92 |  | 
|---|
| 93 | /* Submit next message */ | 
|---|
| 94 | msg_submit(chan); | 
|---|
| 95 |  | 
|---|
| 96 | if (!mssg) | 
|---|
| 97 | return; | 
|---|
| 98 |  | 
|---|
| 99 | /* Notify the client */ | 
|---|
| 100 | if (chan->cl->tx_done) | 
|---|
| 101 | chan->cl->tx_done(chan->cl, mssg, r); | 
|---|
| 102 |  | 
|---|
| 103 | if (r != -ETIME && chan->cl->tx_block) | 
|---|
| 104 | complete(&chan->tx_complete); | 
|---|
| 105 | } | 
|---|
| 106 |  | 
|---|
| 107 | static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) | 
|---|
| 108 | { | 
|---|
| 109 | struct mbox_controller *mbox = | 
|---|
| 110 | container_of(hrtimer, struct mbox_controller, poll_hrt); | 
|---|
| 111 | bool txdone, resched = false; | 
|---|
| 112 | int i; | 
|---|
| 113 |  | 
|---|
| 114 | for (i = 0; i < mbox->num_chans; i++) { | 
|---|
| 115 | struct mbox_chan *chan = &mbox->chans[i]; | 
|---|
| 116 |  | 
|---|
| 117 | if (chan->active_req && chan->cl) { | 
|---|
| 118 | txdone = chan->mbox->ops->last_tx_done(chan); | 
|---|
| 119 | if (txdone) | 
|---|
| 120 | tx_tick(chan, r: 0); | 
|---|
| 121 | else | 
|---|
| 122 | resched = true; | 
|---|
| 123 | } | 
|---|
| 124 | } | 
|---|
| 125 |  | 
|---|
| 126 | if (resched) { | 
|---|
| 127 | scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) { | 
|---|
| 128 | if (!hrtimer_is_queued(timer: hrtimer)) | 
|---|
| 129 | hrtimer_forward_now(timer: hrtimer, interval: ms_to_ktime(ms: mbox->txpoll_period)); | 
|---|
| 130 | } | 
|---|
| 131 |  | 
|---|
| 132 | return HRTIMER_RESTART; | 
|---|
| 133 | } | 
|---|
| 134 | return HRTIMER_NORESTART; | 
|---|
| 135 | } | 
|---|
| 136 |  | 
|---|
| 137 | /** | 
|---|
| 138 | * mbox_chan_received_data - A way for controller driver to push data | 
|---|
| 139 | *				received from remote to the upper layer. | 
|---|
| 140 | * @chan: Pointer to the mailbox channel on which RX happened. | 
|---|
| 141 | * @mssg: Client specific message typecasted as void * | 
|---|
| 142 | * | 
|---|
| 143 | * After startup and before shutdown any data received on the chan | 
|---|
| 144 | * is passed on to the API via atomic mbox_chan_received_data(). | 
|---|
| 145 | * The controller should ACK the RX only after this call returns. | 
|---|
| 146 | */ | 
|---|
| 147 | void mbox_chan_received_data(struct mbox_chan *chan, void *mssg) | 
|---|
| 148 | { | 
|---|
| 149 | /* No buffering the received data */ | 
|---|
| 150 | if (chan->cl->rx_callback) | 
|---|
| 151 | chan->cl->rx_callback(chan->cl, mssg); | 
|---|
| 152 | } | 
|---|
| 153 | EXPORT_SYMBOL_GPL(mbox_chan_received_data); | 
|---|
| 154 |  | 
|---|
| 155 | /** | 
|---|
| 156 | * mbox_chan_txdone - A way for controller driver to notify the | 
|---|
| 157 | *			framework that the last TX has completed. | 
|---|
| 158 | * @chan: Pointer to the mailbox chan on which TX happened. | 
|---|
| 159 | * @r: Status of last TX - OK or ERROR | 
|---|
| 160 | * | 
|---|
| 161 | * The controller that has IRQ for TX ACK calls this atomic API | 
|---|
| 162 | * to tick the TX state machine. It works only if txdone_irq | 
|---|
| 163 | * is set by the controller. | 
|---|
| 164 | */ | 
|---|
| 165 | void mbox_chan_txdone(struct mbox_chan *chan, int r) | 
|---|
| 166 | { | 
|---|
| 167 | if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) { | 
|---|
| 168 | dev_err(chan->mbox->dev, | 
|---|
| 169 | "Controller can't run the TX ticker\n"); | 
|---|
| 170 | return; | 
|---|
| 171 | } | 
|---|
| 172 |  | 
|---|
| 173 | tx_tick(chan, r); | 
|---|
| 174 | } | 
|---|
| 175 | EXPORT_SYMBOL_GPL(mbox_chan_txdone); | 
|---|
| 176 |  | 
|---|
| 177 | /** | 
|---|
| 178 | * mbox_client_txdone - The way for a client to run the TX state machine. | 
|---|
| 179 | * @chan: Mailbox channel assigned to this client. | 
|---|
| 180 | * @r: Success status of last transmission. | 
|---|
| 181 | * | 
|---|
| 182 | * The client/protocol had received some 'ACK' packet and it notifies | 
|---|
| 183 | * the API that the last packet was sent successfully. This only works | 
|---|
| 184 | * if the controller can't sense TX-Done. | 
|---|
| 185 | */ | 
|---|
| 186 | void mbox_client_txdone(struct mbox_chan *chan, int r) | 
|---|
| 187 | { | 
|---|
| 188 | if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) { | 
|---|
| 189 | dev_err(chan->mbox->dev, "Client can't run the TX ticker\n"); | 
|---|
| 190 | return; | 
|---|
| 191 | } | 
|---|
| 192 |  | 
|---|
| 193 | tx_tick(chan, r); | 
|---|
| 194 | } | 
|---|
| 195 | EXPORT_SYMBOL_GPL(mbox_client_txdone); | 
|---|
| 196 |  | 
|---|
| 197 | /** | 
|---|
| 198 | * mbox_client_peek_data - A way for client driver to pull data | 
|---|
| 199 | *			received from remote by the controller. | 
|---|
| 200 | * @chan: Mailbox channel assigned to this client. | 
|---|
| 201 | * | 
|---|
| 202 | * A poke to controller driver for any received data. | 
|---|
| 203 | * The data is actually passed onto client via the | 
|---|
| 204 | * mbox_chan_received_data() | 
|---|
| 205 | * The call can be made from atomic context, so the controller's | 
|---|
| 206 | * implementation of peek_data() must not sleep. | 
|---|
| 207 | * | 
|---|
| 208 | * Return: True, if controller has, and is going to push after this, | 
|---|
| 209 | *          some data. | 
|---|
| 210 | *         False, if controller doesn't have any data to be read. | 
|---|
| 211 | */ | 
|---|
| 212 | bool mbox_client_peek_data(struct mbox_chan *chan) | 
|---|
| 213 | { | 
|---|
| 214 | if (chan->mbox->ops->peek_data) | 
|---|
| 215 | return chan->mbox->ops->peek_data(chan); | 
|---|
| 216 |  | 
|---|
| 217 | return false; | 
|---|
| 218 | } | 
|---|
| 219 | EXPORT_SYMBOL_GPL(mbox_client_peek_data); | 
|---|
| 220 |  | 
|---|
| 221 | /** | 
|---|
| 222 | * mbox_send_message -	For client to submit a message to be | 
|---|
| 223 | *				sent to the remote. | 
|---|
| 224 | * @chan: Mailbox channel assigned to this client. | 
|---|
| 225 | * @mssg: Client specific message typecasted. | 
|---|
| 226 | * | 
|---|
| 227 | * For client to submit data to the controller destined for a remote | 
|---|
| 228 | * processor. If the client had set 'tx_block', the call will return | 
|---|
| 229 | * either when the remote receives the data or when 'tx_tout' millisecs | 
|---|
| 230 | * run out. | 
|---|
| 231 | *  In non-blocking mode, the requests are buffered by the API and a | 
|---|
| 232 | * non-negative token is returned for each queued request. If the request | 
|---|
| 233 | * is not queued, a negative token is returned. Upon failure or successful | 
|---|
| 234 | * TX, the API calls 'tx_done' from atomic context, from which the client | 
|---|
| 235 | * could submit yet another request. | 
|---|
| 236 | * The pointer to message should be preserved until it is sent | 
|---|
| 237 | * over the chan, i.e, tx_done() is made. | 
|---|
| 238 | * This function could be called from atomic context as it simply | 
|---|
| 239 | * queues the data and returns a token against the request. | 
|---|
| 240 | * | 
|---|
| 241 | * Return: Non-negative integer for successful submission (non-blocking mode) | 
|---|
| 242 | *	or transmission over chan (blocking mode). | 
|---|
| 243 | *	Negative value denotes failure. | 
|---|
| 244 | */ | 
|---|
| 245 | int mbox_send_message(struct mbox_chan *chan, void *mssg) | 
|---|
| 246 | { | 
|---|
| 247 | int t; | 
|---|
| 248 |  | 
|---|
| 249 | if (!chan || !chan->cl) | 
|---|
| 250 | return -EINVAL; | 
|---|
| 251 |  | 
|---|
| 252 | t = add_to_rbuf(chan, mssg); | 
|---|
| 253 | if (t < 0) { | 
|---|
| 254 | dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n"); | 
|---|
| 255 | return t; | 
|---|
| 256 | } | 
|---|
| 257 |  | 
|---|
| 258 | msg_submit(chan); | 
|---|
| 259 |  | 
|---|
| 260 | if (chan->cl->tx_block) { | 
|---|
| 261 | unsigned long wait; | 
|---|
| 262 | int ret; | 
|---|
| 263 |  | 
|---|
| 264 | if (!chan->cl->tx_tout) /* wait forever */ | 
|---|
| 265 | wait = msecs_to_jiffies(m: 3600000); | 
|---|
| 266 | else | 
|---|
| 267 | wait = msecs_to_jiffies(m: chan->cl->tx_tout); | 
|---|
| 268 |  | 
|---|
| 269 | ret = wait_for_completion_timeout(x: &chan->tx_complete, timeout: wait); | 
|---|
| 270 | if (ret == 0) { | 
|---|
| 271 | t = -ETIME; | 
|---|
| 272 | tx_tick(chan, r: t); | 
|---|
| 273 | } | 
|---|
| 274 | } | 
|---|
| 275 |  | 
|---|
| 276 | return t; | 
|---|
| 277 | } | 
|---|
| 278 | EXPORT_SYMBOL_GPL(mbox_send_message); | 
|---|
| 279 |  | 
|---|
| 280 | /** | 
|---|
| 281 | * mbox_flush - flush a mailbox channel | 
|---|
| 282 | * @chan: mailbox channel to flush | 
|---|
| 283 | * @timeout: time, in milliseconds, to allow the flush operation to succeed | 
|---|
| 284 | * | 
|---|
| 285 | * Mailbox controllers that need to work in atomic context can implement the | 
|---|
| 286 | * ->flush() callback to busy loop until a transmission has been completed. | 
|---|
| 287 | * The implementation must call mbox_chan_txdone() upon success. Clients can | 
|---|
| 288 | * call the mbox_flush() function at any time after mbox_send_message() to | 
|---|
| 289 | * flush the transmission. After the function returns success, the mailbox | 
|---|
| 290 | * transmission is guaranteed to have completed. | 
|---|
| 291 | * | 
|---|
| 292 | * Returns: 0 on success or a negative error code on failure. | 
|---|
| 293 | */ | 
|---|
| 294 | int mbox_flush(struct mbox_chan *chan, unsigned long timeout) | 
|---|
| 295 | { | 
|---|
| 296 | int ret; | 
|---|
| 297 |  | 
|---|
| 298 | if (!chan->mbox->ops->flush) | 
|---|
| 299 | return -ENOTSUPP; | 
|---|
| 300 |  | 
|---|
| 301 | ret = chan->mbox->ops->flush(chan, timeout); | 
|---|
| 302 | if (ret < 0) | 
|---|
| 303 | tx_tick(chan, r: ret); | 
|---|
| 304 |  | 
|---|
| 305 | return ret; | 
|---|
| 306 | } | 
|---|
| 307 | EXPORT_SYMBOL_GPL(mbox_flush); | 
|---|
| 308 |  | 
|---|
| 309 | static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) | 
|---|
| 310 | { | 
|---|
| 311 | struct device *dev = cl->dev; | 
|---|
| 312 | int ret; | 
|---|
| 313 |  | 
|---|
| 314 | if (chan->cl || !try_module_get(module: chan->mbox->dev->driver->owner)) { | 
|---|
| 315 | dev_err(dev, "%s: mailbox not free\n", __func__); | 
|---|
| 316 | return -EBUSY; | 
|---|
| 317 | } | 
|---|
| 318 |  | 
|---|
| 319 | scoped_guard(spinlock_irqsave, &chan->lock) { | 
|---|
| 320 | chan->msg_free = 0; | 
|---|
| 321 | chan->msg_count = 0; | 
|---|
| 322 | chan->active_req = NULL; | 
|---|
| 323 | chan->cl = cl; | 
|---|
| 324 | init_completion(x: &chan->tx_complete); | 
|---|
| 325 |  | 
|---|
| 326 | if (chan->txdone_method	== TXDONE_BY_POLL && cl->knows_txdone) | 
|---|
| 327 | chan->txdone_method = TXDONE_BY_ACK; | 
|---|
| 328 | } | 
|---|
| 329 |  | 
|---|
| 330 | if (chan->mbox->ops->startup) { | 
|---|
| 331 | ret = chan->mbox->ops->startup(chan); | 
|---|
| 332 |  | 
|---|
| 333 | if (ret) { | 
|---|
| 334 | dev_err(dev, "Unable to startup the chan (%d)\n", ret); | 
|---|
| 335 | mbox_free_channel(chan); | 
|---|
| 336 | return ret; | 
|---|
| 337 | } | 
|---|
| 338 | } | 
|---|
| 339 |  | 
|---|
| 340 | return 0; | 
|---|
| 341 | } | 
|---|
| 342 |  | 
|---|
| 343 | /** | 
|---|
| 344 | * mbox_bind_client - Request a mailbox channel. | 
|---|
| 345 | * @chan: The mailbox channel to bind the client to. | 
|---|
| 346 | * @cl: Identity of the client requesting the channel. | 
|---|
| 347 | * | 
|---|
| 348 | * The Client specifies its requirements and capabilities while asking for | 
|---|
| 349 | * a mailbox channel. It can't be called from atomic context. | 
|---|
| 350 | * The channel is exclusively allocated and can't be used by another | 
|---|
| 351 | * client before the owner calls mbox_free_channel. | 
|---|
| 352 | * After assignment, any packet received on this channel will be | 
|---|
| 353 | * handed over to the client via the 'rx_callback'. | 
|---|
| 354 | * The framework holds reference to the client, so the mbox_client | 
|---|
| 355 | * structure shouldn't be modified until the mbox_free_channel returns. | 
|---|
| 356 | * | 
|---|
| 357 | * Return: 0 if the channel was assigned to the client successfully. | 
|---|
| 358 | *         <0 for request failure. | 
|---|
| 359 | */ | 
|---|
| 360 | int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) | 
|---|
| 361 | { | 
|---|
| 362 | guard(mutex)(T: &con_mutex); | 
|---|
| 363 |  | 
|---|
| 364 | return __mbox_bind_client(chan, cl); | 
|---|
| 365 | } | 
|---|
| 366 | EXPORT_SYMBOL_GPL(mbox_bind_client); | 
|---|
| 367 |  | 
|---|
| 368 | /** | 
|---|
| 369 | * mbox_request_channel - Request a mailbox channel. | 
|---|
| 370 | * @cl: Identity of the client requesting the channel. | 
|---|
| 371 | * @index: Index of mailbox specifier in 'mboxes' property. | 
|---|
| 372 | * | 
|---|
| 373 | * The Client specifies its requirements and capabilities while asking for | 
|---|
| 374 | * a mailbox channel. It can't be called from atomic context. | 
|---|
| 375 | * The channel is exclusively allocated and can't be used by another | 
|---|
| 376 | * client before the owner calls mbox_free_channel. | 
|---|
| 377 | * After assignment, any packet received on this channel will be | 
|---|
| 378 | * handed over to the client via the 'rx_callback'. | 
|---|
| 379 | * The framework holds reference to the client, so the mbox_client | 
|---|
| 380 | * structure shouldn't be modified until the mbox_free_channel returns. | 
|---|
| 381 | * | 
|---|
| 382 | * Return: Pointer to the channel assigned to the client if successful. | 
|---|
| 383 | *		ERR_PTR for request failure. | 
|---|
| 384 | */ | 
|---|
| 385 | struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) | 
|---|
| 386 | { | 
|---|
| 387 | struct fwnode_reference_args fwspec; | 
|---|
| 388 | struct fwnode_handle *fwnode; | 
|---|
| 389 | struct mbox_controller *mbox; | 
|---|
| 390 | struct of_phandle_args spec; | 
|---|
| 391 | struct mbox_chan *chan; | 
|---|
| 392 | struct device *dev; | 
|---|
| 393 | unsigned int i; | 
|---|
| 394 | int ret; | 
|---|
| 395 |  | 
|---|
| 396 | dev = cl->dev; | 
|---|
| 397 | if (!dev) { | 
|---|
| 398 | pr_debug( "No owner device\n"); | 
|---|
| 399 | return ERR_PTR(error: -ENODEV); | 
|---|
| 400 | } | 
|---|
| 401 |  | 
|---|
| 402 | fwnode = dev_fwnode(dev); | 
|---|
| 403 | if (!fwnode) { | 
|---|
| 404 | dev_dbg(dev, "No owner fwnode\n"); | 
|---|
| 405 | return ERR_PTR(error: -ENODEV); | 
|---|
| 406 | } | 
|---|
| 407 |  | 
|---|
| 408 | ret = fwnode_property_get_reference_args(fwnode, prop: "mboxes", nargs_prop: "#mbox-cells", | 
|---|
| 409 | nargs: 0, index, args: &fwspec); | 
|---|
| 410 | if (ret) { | 
|---|
| 411 | dev_err(dev, "%s: can't parse \"%s\" property\n", __func__, "mboxes"); | 
|---|
| 412 | return ERR_PTR(error: ret); | 
|---|
| 413 | } | 
|---|
| 414 |  | 
|---|
| 415 | spec.np = to_of_node(fwnode: fwspec.fwnode); | 
|---|
| 416 | spec.args_count = fwspec.nargs; | 
|---|
| 417 | for (i = 0; i < spec.args_count; i++) | 
|---|
| 418 | spec.args[i] = fwspec.args[i]; | 
|---|
| 419 |  | 
|---|
| 420 | scoped_guard(mutex, &con_mutex) { | 
|---|
| 421 | chan = ERR_PTR(error: -EPROBE_DEFER); | 
|---|
| 422 | list_for_each_entry(mbox, &mbox_cons, node) { | 
|---|
| 423 | if (device_match_fwnode(dev: mbox->dev, fwnode: fwspec.fwnode)) { | 
|---|
| 424 | if (mbox->fw_xlate) { | 
|---|
| 425 | chan = mbox->fw_xlate(mbox, &fwspec); | 
|---|
| 426 | if (!IS_ERR(ptr: chan)) | 
|---|
| 427 | break; | 
|---|
| 428 | } else if (mbox->of_xlate) { | 
|---|
| 429 | chan = mbox->of_xlate(mbox, &spec); | 
|---|
| 430 | if (!IS_ERR(ptr: chan)) | 
|---|
| 431 | break; | 
|---|
| 432 | } | 
|---|
| 433 | } | 
|---|
| 434 | } | 
|---|
| 435 |  | 
|---|
| 436 | fwnode_handle_put(fwnode: fwspec.fwnode); | 
|---|
| 437 |  | 
|---|
| 438 | if (IS_ERR(ptr: chan)) | 
|---|
| 439 | return chan; | 
|---|
| 440 |  | 
|---|
| 441 | ret = __mbox_bind_client(chan, cl); | 
|---|
| 442 | if (ret) | 
|---|
| 443 | chan = ERR_PTR(error: ret); | 
|---|
| 444 | } | 
|---|
| 445 |  | 
|---|
| 446 | return chan; | 
|---|
| 447 | } | 
|---|
| 448 | EXPORT_SYMBOL_GPL(mbox_request_channel); | 
|---|
| 449 |  | 
|---|
| 450 | struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, | 
|---|
| 451 | const char *name) | 
|---|
| 452 | { | 
|---|
| 453 | int index = device_property_match_string(dev: cl->dev, propname: "mbox-names", string: name); | 
|---|
| 454 |  | 
|---|
| 455 | if (index < 0) { | 
|---|
| 456 | dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", | 
|---|
| 457 | __func__, name); | 
|---|
| 458 | return ERR_PTR(error: index); | 
|---|
| 459 | } | 
|---|
| 460 | return mbox_request_channel(cl, index); | 
|---|
| 461 | } | 
|---|
| 462 | EXPORT_SYMBOL_GPL(mbox_request_channel_byname); | 
|---|
| 463 |  | 
|---|
| 464 | /** | 
|---|
| 465 | * mbox_free_channel - The client relinquishes control of a mailbox | 
|---|
| 466 | *			channel by this call. | 
|---|
| 467 | * @chan: The mailbox channel to be freed. | 
|---|
| 468 | */ | 
|---|
| 469 | void mbox_free_channel(struct mbox_chan *chan) | 
|---|
| 470 | { | 
|---|
| 471 | if (!chan || !chan->cl) | 
|---|
| 472 | return; | 
|---|
| 473 |  | 
|---|
| 474 | if (chan->mbox->ops->shutdown) | 
|---|
| 475 | chan->mbox->ops->shutdown(chan); | 
|---|
| 476 |  | 
|---|
| 477 | /* The queued TX requests are simply aborted, no callbacks are made */ | 
|---|
| 478 | scoped_guard(spinlock_irqsave, &chan->lock) { | 
|---|
| 479 | chan->cl = NULL; | 
|---|
| 480 | chan->active_req = NULL; | 
|---|
| 481 | if (chan->txdone_method == TXDONE_BY_ACK) | 
|---|
| 482 | chan->txdone_method = TXDONE_BY_POLL; | 
|---|
| 483 | } | 
|---|
| 484 |  | 
|---|
| 485 | module_put(module: chan->mbox->dev->driver->owner); | 
|---|
| 486 | } | 
|---|
| 487 | EXPORT_SYMBOL_GPL(mbox_free_channel); | 
|---|
| 488 |  | 
|---|
| 489 | static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox, | 
|---|
| 490 | const struct fwnode_reference_args *sp) | 
|---|
| 491 | { | 
|---|
| 492 | int ind = sp->args[0]; | 
|---|
| 493 |  | 
|---|
| 494 | if (ind >= mbox->num_chans) | 
|---|
| 495 | return ERR_PTR(error: -EINVAL); | 
|---|
| 496 |  | 
|---|
| 497 | return &mbox->chans[ind]; | 
|---|
| 498 | } | 
|---|
| 499 |  | 
|---|
| 500 | /** | 
|---|
| 501 | * mbox_controller_register - Register the mailbox controller | 
|---|
| 502 | * @mbox:	Pointer to the mailbox controller. | 
|---|
| 503 | * | 
|---|
| 504 | * The controller driver registers its communication channels | 
|---|
| 505 | */ | 
|---|
| 506 | int mbox_controller_register(struct mbox_controller *mbox) | 
|---|
| 507 | { | 
|---|
| 508 | int i, txdone; | 
|---|
| 509 |  | 
|---|
| 510 | /* Sanity check */ | 
|---|
| 511 | if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans) | 
|---|
| 512 | return -EINVAL; | 
|---|
| 513 |  | 
|---|
| 514 | if (mbox->txdone_irq) | 
|---|
| 515 | txdone = TXDONE_BY_IRQ; | 
|---|
| 516 | else if (mbox->txdone_poll) | 
|---|
| 517 | txdone = TXDONE_BY_POLL; | 
|---|
| 518 | else /* It has to be ACK then */ | 
|---|
| 519 | txdone = TXDONE_BY_ACK; | 
|---|
| 520 |  | 
|---|
| 521 | if (txdone == TXDONE_BY_POLL) { | 
|---|
| 522 |  | 
|---|
| 523 | if (!mbox->ops->last_tx_done) { | 
|---|
| 524 | dev_err(mbox->dev, "last_tx_done method is absent\n"); | 
|---|
| 525 | return -EINVAL; | 
|---|
| 526 | } | 
|---|
| 527 |  | 
|---|
| 528 | hrtimer_setup(timer: &mbox->poll_hrt, function: txdone_hrtimer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); | 
|---|
| 529 | spin_lock_init(&mbox->poll_hrt_lock); | 
|---|
| 530 | } | 
|---|
| 531 |  | 
|---|
| 532 | for (i = 0; i < mbox->num_chans; i++) { | 
|---|
| 533 | struct mbox_chan *chan = &mbox->chans[i]; | 
|---|
| 534 |  | 
|---|
| 535 | chan->cl = NULL; | 
|---|
| 536 | chan->mbox = mbox; | 
|---|
| 537 | chan->txdone_method = txdone; | 
|---|
| 538 | spin_lock_init(&chan->lock); | 
|---|
| 539 | } | 
|---|
| 540 |  | 
|---|
| 541 | if (!mbox->fw_xlate && !mbox->of_xlate) | 
|---|
| 542 | mbox->fw_xlate = fw_mbox_index_xlate; | 
|---|
| 543 |  | 
|---|
| 544 | scoped_guard(mutex, &con_mutex) | 
|---|
| 545 | list_add_tail(new: &mbox->node, head: &mbox_cons); | 
|---|
| 546 |  | 
|---|
| 547 | return 0; | 
|---|
| 548 | } | 
|---|
| 549 | EXPORT_SYMBOL_GPL(mbox_controller_register); | 
|---|
| 550 |  | 
|---|
| 551 | /** | 
|---|
| 552 | * mbox_controller_unregister - Unregister the mailbox controller | 
|---|
| 553 | * @mbox:	Pointer to the mailbox controller. | 
|---|
| 554 | */ | 
|---|
| 555 | void mbox_controller_unregister(struct mbox_controller *mbox) | 
|---|
| 556 | { | 
|---|
| 557 | int i; | 
|---|
| 558 |  | 
|---|
| 559 | if (!mbox) | 
|---|
| 560 | return; | 
|---|
| 561 |  | 
|---|
| 562 | scoped_guard(mutex, &con_mutex) { | 
|---|
| 563 | list_del(entry: &mbox->node); | 
|---|
| 564 |  | 
|---|
| 565 | for (i = 0; i < mbox->num_chans; i++) | 
|---|
| 566 | mbox_free_channel(&mbox->chans[i]); | 
|---|
| 567 |  | 
|---|
| 568 | if (mbox->txdone_poll) | 
|---|
| 569 | hrtimer_cancel(timer: &mbox->poll_hrt); | 
|---|
| 570 | } | 
|---|
| 571 | } | 
|---|
| 572 | EXPORT_SYMBOL_GPL(mbox_controller_unregister); | 
|---|
| 573 |  | 
|---|
| 574 | static void __devm_mbox_controller_unregister(struct device *dev, void *res) | 
|---|
| 575 | { | 
|---|
| 576 | struct mbox_controller **mbox = res; | 
|---|
| 577 |  | 
|---|
| 578 | mbox_controller_unregister(*mbox); | 
|---|
| 579 | } | 
|---|
| 580 |  | 
|---|
| 581 | /** | 
|---|
| 582 | * devm_mbox_controller_register() - managed mbox_controller_register() | 
|---|
| 583 | * @dev: device owning the mailbox controller being registered | 
|---|
| 584 | * @mbox: mailbox controller being registered | 
|---|
| 585 | * | 
|---|
| 586 | * This function adds a device-managed resource that will make sure that the | 
|---|
| 587 | * mailbox controller, which is registered using mbox_controller_register() | 
|---|
| 588 | * as part of this function, will be unregistered along with the rest of | 
|---|
| 589 | * device-managed resources upon driver probe failure or driver removal. | 
|---|
| 590 | * | 
|---|
| 591 | * Returns 0 on success or a negative error code on failure. | 
|---|
| 592 | */ | 
|---|
| 593 | int devm_mbox_controller_register(struct device *dev, | 
|---|
| 594 | struct mbox_controller *mbox) | 
|---|
| 595 | { | 
|---|
| 596 | struct mbox_controller **ptr; | 
|---|
| 597 | int err; | 
|---|
| 598 |  | 
|---|
| 599 | ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr), | 
|---|
| 600 | GFP_KERNEL); | 
|---|
| 601 | if (!ptr) | 
|---|
| 602 | return -ENOMEM; | 
|---|
| 603 |  | 
|---|
| 604 | err = mbox_controller_register(mbox); | 
|---|
| 605 | if (err < 0) { | 
|---|
| 606 | devres_free(res: ptr); | 
|---|
| 607 | return err; | 
|---|
| 608 | } | 
|---|
| 609 |  | 
|---|
| 610 | devres_add(dev, res: ptr); | 
|---|
| 611 | *ptr = mbox; | 
|---|
| 612 |  | 
|---|
| 613 | return 0; | 
|---|
| 614 | } | 
|---|
| 615 | EXPORT_SYMBOL_GPL(devm_mbox_controller_register); | 
|---|
| 616 |  | 
|---|