1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2012-2023, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7#include <linux/module.h>
8#include <linux/device.h>
9#include <linux/kernel.h>
10#include <linux/sched/signal.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <linux/mutex.h>
15#include <linux/interrupt.h>
16#include <linux/scatterlist.h>
17#include <linux/mei_cl_bus.h>
18
19#include "mei_dev.h"
20#include "client.h"
21
22#define to_mei_cl_driver(d) container_of_const(d, struct mei_cl_driver, driver)
23
24/**
25 * __mei_cl_send - internal client send (write)
26 *
27 * @cl: host client
28 * @buf: buffer to send
29 * @length: buffer length
30 * @vtag: virtual tag
31 * @mode: sending mode
32 *
33 * Return: written size bytes or < 0 on error
34 */
35ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
36 unsigned int mode)
37{
38 return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT);
39}
40
41/**
42 * __mei_cl_send_timeout - internal client send (write)
43 *
44 * @cl: host client
45 * @buf: buffer to send
46 * @length: buffer length
47 * @vtag: virtual tag
48 * @mode: sending mode
49 * @timeout: send timeout in milliseconds.
50 * effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set.
51 * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
52 *
53 * Return: written size bytes or < 0 on error
54 */
55ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
56 unsigned int mode, unsigned long timeout)
57{
58 struct mei_device *bus;
59 struct mei_cl_cb *cb;
60 ssize_t rets;
61
62 if (WARN_ON(!cl || !cl->dev))
63 return -ENODEV;
64
65 bus = cl->dev;
66
67 mutex_lock(lock: &bus->device_lock);
68 if (bus->dev_state != MEI_DEV_ENABLED &&
69 bus->dev_state != MEI_DEV_POWERING_DOWN) {
70 rets = -ENODEV;
71 goto out;
72 }
73
74 if (!mei_cl_is_connected(cl)) {
75 rets = -ENODEV;
76 goto out;
77 }
78
79 /* Check if we have an ME client device */
80 if (!mei_me_cl_is_active(me_cl: cl->me_cl)) {
81 rets = -ENOTTY;
82 goto out;
83 }
84
85 if (vtag) {
86 /* Check if vtag is supported by client */
87 rets = mei_cl_vt_support_check(cl);
88 if (rets)
89 goto out;
90 }
91
92 if (length > mei_cl_mtu(cl)) {
93 rets = -EFBIG;
94 goto out;
95 }
96
97 while (cl->tx_cb_queued >= bus->tx_queue_limit) {
98 mutex_unlock(lock: &bus->device_lock);
99 rets = wait_event_interruptible(cl->tx_wait,
100 cl->writing_state == MEI_WRITE_COMPLETE ||
101 (!mei_cl_is_connected(cl)));
102 mutex_lock(lock: &bus->device_lock);
103 if (rets) {
104 if (signal_pending(current))
105 rets = -EINTR;
106 goto out;
107 }
108 if (!mei_cl_is_connected(cl)) {
109 rets = -ENODEV;
110 goto out;
111 }
112 }
113
114 cb = mei_cl_alloc_cb(cl, length, type: MEI_FOP_WRITE, NULL);
115 if (!cb) {
116 rets = -ENOMEM;
117 goto out;
118 }
119 cb->vtag = vtag;
120
121 cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
122 cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
123 memcpy(to: cb->buf.data, from: buf, len: length);
124 /* hack we point data to header */
125 if (mode & MEI_CL_IO_SGL) {
126 cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data;
127 cb->buf.data = NULL;
128 cb->buf.size = 0;
129 }
130
131 rets = mei_cl_write(cl, cb, timeout);
132
133 if (mode & MEI_CL_IO_SGL && rets == 0)
134 rets = length;
135
136out:
137 mutex_unlock(lock: &bus->device_lock);
138
139 return rets;
140}
141
142/**
143 * __mei_cl_recv - internal client receive (read)
144 *
145 * @cl: host client
146 * @buf: buffer to receive
147 * @length: buffer length
148 * @vtag: virtual tag
149 * @mode: io mode
150 * @timeout: recv timeout, 0 for infinite timeout
151 *
152 * Return: read size in bytes of < 0 on error
153 */
154ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
155 unsigned int mode, unsigned long timeout)
156{
157 struct mei_device *bus;
158 struct mei_cl_cb *cb;
159 size_t r_length;
160 ssize_t rets;
161 bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
162
163 if (WARN_ON(!cl || !cl->dev))
164 return -ENODEV;
165
166 bus = cl->dev;
167
168 mutex_lock(lock: &bus->device_lock);
169 if (bus->dev_state != MEI_DEV_ENABLED &&
170 bus->dev_state != MEI_DEV_POWERING_DOWN) {
171 rets = -ENODEV;
172 goto out;
173 }
174
175 cb = mei_cl_read_cb(cl, NULL);
176 if (cb)
177 goto copy;
178
179 rets = mei_cl_read_start(cl, length, NULL);
180 if (rets && rets != -EBUSY)
181 goto out;
182
183 if (nonblock) {
184 rets = -EAGAIN;
185 goto out;
186 }
187
188 /* wait on event only if there is no other waiter */
189 /* synchronized under device mutex */
190 if (!waitqueue_active(wq_head: &cl->rx_wait)) {
191
192 mutex_unlock(lock: &bus->device_lock);
193
194 if (timeout) {
195 rets = wait_event_interruptible_timeout
196 (cl->rx_wait,
197 mei_cl_read_cb(cl, NULL) ||
198 (!mei_cl_is_connected(cl)),
199 msecs_to_jiffies(timeout));
200 if (rets == 0)
201 return -ETIME;
202 if (rets < 0) {
203 if (signal_pending(current))
204 return -EINTR;
205 return -ERESTARTSYS;
206 }
207 } else {
208 if (wait_event_interruptible
209 (cl->rx_wait,
210 mei_cl_read_cb(cl, NULL) ||
211 (!mei_cl_is_connected(cl)))) {
212 if (signal_pending(current))
213 return -EINTR;
214 return -ERESTARTSYS;
215 }
216 }
217
218 mutex_lock(lock: &bus->device_lock);
219
220 if (!mei_cl_is_connected(cl)) {
221 rets = -ENODEV;
222 goto out;
223 }
224 }
225
226 cb = mei_cl_read_cb(cl, NULL);
227 if (!cb) {
228 rets = 0;
229 goto out;
230 }
231
232copy:
233 if (cb->status) {
234 rets = cb->status;
235 goto free;
236 }
237
238 /* for the GSC type - copy the extended header to the buffer */
239 if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) {
240 r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32));
241 memcpy(to: buf, from: cb->ext_hdr, len: r_length);
242 } else {
243 r_length = min_t(size_t, length, cb->buf_idx);
244 memcpy(to: buf, from: cb->buf.data, len: r_length);
245 }
246 rets = r_length;
247
248 if (vtag)
249 *vtag = cb->vtag;
250
251free:
252 mei_cl_del_rd_completed(cl, cb);
253out:
254 mutex_unlock(lock: &bus->device_lock);
255
256 return rets;
257}
258
259/**
260 * mei_cldev_send_vtag - me device send with vtag (write)
261 *
262 * @cldev: me client device
263 * @buf: buffer to send
264 * @length: buffer length
265 * @vtag: virtual tag
266 *
267 * Return:
268 * * written size in bytes
269 * * < 0 on error
270 */
271
272ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
273 size_t length, u8 vtag)
274{
275 struct mei_cl *cl = cldev->cl;
276
277 return __mei_cl_send(cl, buf, length, vtag, mode: MEI_CL_IO_TX_BLOCKING);
278}
279EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
280
281/**
282 * mei_cldev_send_vtag_timeout - me device send with vtag and timeout (write)
283 *
284 * @cldev: me client device
285 * @buf: buffer to send
286 * @length: buffer length
287 * @vtag: virtual tag
288 * @timeout: send timeout in milliseconds, 0 for infinite timeout
289 *
290 * Return:
291 * * written size in bytes
292 * * < 0 on error
293 */
294
295ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
296 size_t length, u8 vtag, unsigned long timeout)
297{
298 struct mei_cl *cl = cldev->cl;
299
300 return __mei_cl_send_timeout(cl, buf, length, vtag, mode: MEI_CL_IO_TX_BLOCKING, timeout);
301}
302EXPORT_SYMBOL_GPL(mei_cldev_send_vtag_timeout);
303
304/**
305 * mei_cldev_recv_vtag - client receive with vtag (read)
306 *
307 * @cldev: me client device
308 * @buf: buffer to receive
309 * @length: buffer length
310 * @vtag: virtual tag
311 *
312 * Return:
313 * * read size in bytes
314 * * < 0 on error
315 */
316
317ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
318 u8 *vtag)
319{
320 struct mei_cl *cl = cldev->cl;
321
322 return __mei_cl_recv(cl, buf, length, vtag, mode: 0, timeout: 0);
323}
324EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
325
326/**
327 * mei_cldev_recv_timeout - client receive with timeout (read)
328 *
329 * @cldev: me client device
330 * @buf: buffer to receive
331 * @length: buffer length
332 * @timeout: send timeout in milliseconds, 0 for infinite timeout
333 *
334 * Return:
335 * * read size in bytes
336 * * < 0 on error
337 */
338ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
339 unsigned long timeout)
340{
341 return mei_cldev_recv_vtag_timeout(cldev, buf, length, NULL, timeout);
342}
343EXPORT_SYMBOL_GPL(mei_cldev_recv_timeout);
344
345/**
346 * mei_cldev_recv_vtag_timeout - client receive with vtag (read)
347 *
348 * @cldev: me client device
349 * @buf: buffer to receive
350 * @length: buffer length
351 * @vtag: virtual tag
352 * @timeout: recv timeout in milliseconds, 0 for infinite timeout
353 *
354 * Return:
355 * * read size in bytes
356 * * < 0 on error
357 */
358
359ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
360 u8 *vtag, unsigned long timeout)
361{
362 struct mei_cl *cl = cldev->cl;
363
364 return __mei_cl_recv(cl, buf, length, vtag, mode: 0, timeout);
365}
366EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag_timeout);
367
368/**
369 * mei_cldev_send - me device send (write)
370 *
371 * @cldev: me client device
372 * @buf: buffer to send
373 * @length: buffer length
374 *
375 * Return:
376 * * written size in bytes
377 * * < 0 on error
378 */
379ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
380{
381 return mei_cldev_send_vtag(cldev, buf, length, 0);
382}
383EXPORT_SYMBOL_GPL(mei_cldev_send);
384
385/**
386 * mei_cldev_send_timeout - me device send with timeout (write)
387 *
388 * @cldev: me client device
389 * @buf: buffer to send
390 * @length: buffer length
391 * @timeout: send timeout in milliseconds, 0 for infinite timeout
392 *
393 * Return:
394 * * written size in bytes
395 * * < 0 on error
396 */
397ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf, size_t length,
398 unsigned long timeout)
399{
400 return mei_cldev_send_vtag_timeout(cldev, buf, length, 0, timeout);
401}
402EXPORT_SYMBOL_GPL(mei_cldev_send_timeout);
403
404/**
405 * mei_cldev_recv - client receive (read)
406 *
407 * @cldev: me client device
408 * @buf: buffer to receive
409 * @length: buffer length
410 *
411 * Return: read size in bytes of < 0 on error
412 */
413ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
414{
415 return mei_cldev_recv_vtag(cldev, buf, length, NULL);
416}
417EXPORT_SYMBOL_GPL(mei_cldev_recv);
418
419/**
420 * mei_cl_bus_rx_work - dispatch rx event for a bus device
421 *
422 * @work: work
423 */
424static void mei_cl_bus_rx_work(struct work_struct *work)
425{
426 struct mei_cl_device *cldev;
427 struct mei_device *bus;
428
429 cldev = container_of(work, struct mei_cl_device, rx_work);
430
431 bus = cldev->bus;
432
433 if (cldev->rx_cb)
434 cldev->rx_cb(cldev);
435
436 mutex_lock(lock: &bus->device_lock);
437 if (mei_cl_is_connected(cl: cldev->cl))
438 mei_cl_read_start(cl: cldev->cl, length: mei_cl_mtu(cl: cldev->cl), NULL);
439 mutex_unlock(lock: &bus->device_lock);
440}
441
442/**
443 * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
444 *
445 * @work: work
446 */
447static void mei_cl_bus_notif_work(struct work_struct *work)
448{
449 struct mei_cl_device *cldev;
450
451 cldev = container_of(work, struct mei_cl_device, notif_work);
452
453 if (cldev->notif_cb)
454 cldev->notif_cb(cldev);
455}
456
457/**
458 * mei_cl_bus_notify_event - schedule notify cb on bus client
459 *
460 * @cl: host client
461 *
462 * Return: true if event was scheduled
463 * false if the client is not waiting for event
464 */
465bool mei_cl_bus_notify_event(struct mei_cl *cl)
466{
467 struct mei_cl_device *cldev = cl->cldev;
468
469 if (!cldev || !cldev->notif_cb)
470 return false;
471
472 if (!cl->notify_ev)
473 return false;
474
475 schedule_work(work: &cldev->notif_work);
476
477 cl->notify_ev = false;
478
479 return true;
480}
481
482/**
483 * mei_cl_bus_rx_event - schedule rx event
484 *
485 * @cl: host client
486 *
487 * Return: true if event was scheduled
488 * false if the client is not waiting for event
489 */
490bool mei_cl_bus_rx_event(struct mei_cl *cl)
491{
492 struct mei_cl_device *cldev = cl->cldev;
493
494 if (!cldev || !cldev->rx_cb)
495 return false;
496
497 schedule_work(work: &cldev->rx_work);
498
499 return true;
500}
501
502/**
503 * mei_cldev_register_rx_cb - register Rx event callback
504 *
505 * @cldev: me client devices
506 * @rx_cb: callback function
507 *
508 * Return: 0 on success
509 * -EALREADY if an callback is already registered
510 * <0 on other errors
511 */
512int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
513{
514 struct mei_device *bus = cldev->bus;
515 int ret;
516
517 if (!rx_cb)
518 return -EINVAL;
519 if (cldev->rx_cb)
520 return -EALREADY;
521
522 cldev->rx_cb = rx_cb;
523 INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
524
525 mutex_lock(lock: &bus->device_lock);
526 if (mei_cl_is_connected(cl: cldev->cl))
527 ret = mei_cl_read_start(cl: cldev->cl, length: mei_cl_mtu(cl: cldev->cl), NULL);
528 else
529 ret = -ENODEV;
530 mutex_unlock(lock: &bus->device_lock);
531 if (ret && ret != -EBUSY) {
532 cancel_work_sync(work: &cldev->rx_work);
533 cldev->rx_cb = NULL;
534 return ret;
535 }
536
537 return 0;
538}
539EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
540
541/**
542 * mei_cldev_register_notif_cb - register FW notification event callback
543 *
544 * @cldev: me client devices
545 * @notif_cb: callback function
546 *
547 * Return: 0 on success
548 * -EALREADY if an callback is already registered
549 * <0 on other errors
550 */
551int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
552 mei_cldev_cb_t notif_cb)
553{
554 struct mei_device *bus = cldev->bus;
555 int ret;
556
557 if (!notif_cb)
558 return -EINVAL;
559
560 if (cldev->notif_cb)
561 return -EALREADY;
562
563 cldev->notif_cb = notif_cb;
564 INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
565
566 mutex_lock(lock: &bus->device_lock);
567 ret = mei_cl_notify_request(cl: cldev->cl, NULL, request: 1);
568 mutex_unlock(lock: &bus->device_lock);
569 if (ret) {
570 cancel_work_sync(work: &cldev->notif_work);
571 cldev->notif_cb = NULL;
572 return ret;
573 }
574
575 return 0;
576}
577EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
578
579/**
580 * mei_cldev_get_drvdata - driver data getter
581 *
582 * @cldev: mei client device
583 *
584 * Return: driver private data
585 */
586void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
587{
588 return dev_get_drvdata(dev: &cldev->dev);
589}
590EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
591
592/**
593 * mei_cldev_set_drvdata - driver data setter
594 *
595 * @cldev: mei client device
596 * @data: data to store
597 */
598void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
599{
600 dev_set_drvdata(dev: &cldev->dev, data);
601}
602EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
603
604/**
605 * mei_cldev_ver - return protocol version of the underlying me client
606 *
607 * @cldev: mei client device
608 *
609 * Return: me client protocol version
610 */
611u8 mei_cldev_ver(const struct mei_cl_device *cldev)
612{
613 return mei_me_cl_ver(me_cl: cldev->me_cl);
614}
615EXPORT_SYMBOL_GPL(mei_cldev_ver);
616
617/**
618 * mei_cldev_mtu - max message that client can send and receive
619 *
620 * @cldev: mei client device
621 *
622 * Return: mtu or 0 if client is not connected
623 */
624size_t mei_cldev_mtu(const struct mei_cl_device *cldev)
625{
626 return mei_cl_mtu(cl: cldev->cl);
627}
628EXPORT_SYMBOL_GPL(mei_cldev_mtu);
629
630/**
631 * mei_cldev_enabled - check whether the device is enabled
632 *
633 * @cldev: mei client device
634 *
635 * Return: true if me client is initialized and connected
636 */
637bool mei_cldev_enabled(const struct mei_cl_device *cldev)
638{
639 return mei_cl_is_connected(cl: cldev->cl);
640}
641EXPORT_SYMBOL_GPL(mei_cldev_enabled);
642
643/**
644 * mei_cl_bus_module_get - acquire module of the underlying
645 * hw driver.
646 *
647 * @cldev: mei client device
648 *
649 * Return: true on success; false if the module was removed.
650 */
651static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
652{
653 return try_module_get(module: cldev->bus->parent->driver->owner);
654}
655
656/**
657 * mei_cl_bus_module_put - release the underlying hw module.
658 *
659 * @cldev: mei client device
660 */
661static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
662{
663 module_put(module: cldev->bus->parent->driver->owner);
664}
665
666/**
667 * mei_cl_bus_vtag - get bus vtag entry wrapper
668 * The tag for bus client is always first.
669 *
670 * @cl: host client
671 *
672 * Return: bus vtag or NULL
673 */
674static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
675{
676 return list_first_entry_or_null(&cl->vtag_map,
677 struct mei_cl_vtag, list);
678}
679
680/**
681 * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
682 *
683 * @cldev: me client device
684 *
685 * Return:
686 * * 0 on success
687 * * -ENOMEM if memory allocation failed
688 */
689static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
690{
691 struct mei_cl *cl = cldev->cl;
692 struct mei_cl_vtag *cl_vtag;
693
694 /*
695 * Bail out if the client does not supports vtags
696 * or has already allocated one
697 */
698 if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
699 return 0;
700
701 cl_vtag = mei_cl_vtag_alloc(NULL, vtag: 0);
702 if (IS_ERR(ptr: cl_vtag))
703 return -ENOMEM;
704
705 list_add_tail(new: &cl_vtag->list, head: &cl->vtag_map);
706
707 return 0;
708}
709
710/**
711 * mei_cl_bus_vtag_free - remove the bus entry from vtag map
712 *
713 * @cldev: me client device
714 */
715static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
716{
717 struct mei_cl *cl = cldev->cl;
718 struct mei_cl_vtag *cl_vtag;
719
720 cl_vtag = mei_cl_bus_vtag(cl);
721 if (!cl_vtag)
722 return;
723
724 list_del(entry: &cl_vtag->list);
725 kfree(objp: cl_vtag);
726}
727
728void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
729{
730 struct mei_device *bus;
731 struct mei_cl *cl;
732 int ret;
733
734 if (!cldev || !buffer_id || !size)
735 return ERR_PTR(error: -EINVAL);
736
737 if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
738 dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
739 MEI_FW_PAGE_SIZE);
740 return ERR_PTR(error: -EINVAL);
741 }
742
743 cl = cldev->cl;
744 bus = cldev->bus;
745
746 mutex_lock(lock: &bus->device_lock);
747 if (cl->state == MEI_FILE_UNINITIALIZED) {
748 ret = mei_cl_link(cl);
749 if (ret)
750 goto notlinked;
751 /* update pointers */
752 cl->cldev = cldev;
753 }
754
755 ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
756 if (ret)
757 mei_cl_unlink(cl);
758notlinked:
759 mutex_unlock(lock: &bus->device_lock);
760 if (ret)
761 return ERR_PTR(error: ret);
762 return cl->dma.vaddr;
763}
764EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
765
766int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
767{
768 struct mei_device *bus;
769 struct mei_cl *cl;
770 int ret;
771
772 if (!cldev)
773 return -EINVAL;
774
775 cl = cldev->cl;
776 bus = cldev->bus;
777
778 mutex_lock(lock: &bus->device_lock);
779 ret = mei_cl_dma_unmap(cl, NULL);
780
781 mei_cl_flush_queues(cl, NULL);
782 mei_cl_unlink(cl);
783 mutex_unlock(lock: &bus->device_lock);
784 return ret;
785}
786EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
787
788/**
789 * mei_cldev_enable - enable me client device
790 * create connection with me client
791 *
792 * @cldev: me client device
793 *
794 * Return: 0 on success and < 0 on error
795 */
796int mei_cldev_enable(struct mei_cl_device *cldev)
797{
798 struct mei_device *bus = cldev->bus;
799 struct mei_cl *cl;
800 int ret;
801
802 cl = cldev->cl;
803
804 mutex_lock(lock: &bus->device_lock);
805 if (cl->state == MEI_FILE_UNINITIALIZED) {
806 ret = mei_cl_link(cl);
807 if (ret)
808 goto notlinked;
809 /* update pointers */
810 cl->cldev = cldev;
811 }
812
813 if (mei_cl_is_connected(cl)) {
814 ret = 0;
815 goto out;
816 }
817
818 if (!mei_me_cl_is_active(me_cl: cldev->me_cl)) {
819 dev_err(&cldev->dev, "me client is not active\n");
820 ret = -ENOTTY;
821 goto out;
822 }
823
824 ret = mei_cl_bus_vtag_alloc(cldev);
825 if (ret)
826 goto out;
827
828 ret = mei_cl_connect(cl, me_cl: cldev->me_cl, NULL);
829 if (ret < 0) {
830 dev_dbg(&cldev->dev, "cannot connect\n");
831 mei_cl_bus_vtag_free(cldev);
832 }
833
834out:
835 if (ret)
836 mei_cl_unlink(cl);
837notlinked:
838 mutex_unlock(lock: &bus->device_lock);
839
840 return ret;
841}
842EXPORT_SYMBOL_GPL(mei_cldev_enable);
843
844/**
845 * mei_cldev_unregister_callbacks - internal wrapper for unregistering
846 * callbacks.
847 *
848 * @cldev: client device
849 */
850static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
851{
852 if (cldev->rx_cb) {
853 cancel_work_sync(work: &cldev->rx_work);
854 cldev->rx_cb = NULL;
855 }
856
857 if (cldev->notif_cb) {
858 cancel_work_sync(work: &cldev->notif_work);
859 cldev->notif_cb = NULL;
860 }
861}
862
863/**
864 * mei_cldev_disable - disable me client device
865 * disconnect form the me client
866 *
867 * @cldev: me client device
868 *
869 * Return: 0 on success and < 0 on error
870 */
871int mei_cldev_disable(struct mei_cl_device *cldev)
872{
873 struct mei_device *bus;
874 struct mei_cl *cl;
875 int err;
876
877 if (!cldev)
878 return -ENODEV;
879
880 cl = cldev->cl;
881
882 bus = cldev->bus;
883
884 mei_cldev_unregister_callbacks(cldev);
885
886 mutex_lock(lock: &bus->device_lock);
887
888 mei_cl_bus_vtag_free(cldev);
889
890 if (!mei_cl_is_connected(cl)) {
891 dev_dbg(&cldev->dev, "Already disconnected\n");
892 err = 0;
893 goto out;
894 }
895
896 err = mei_cl_disconnect(cl);
897 if (err < 0)
898 dev_err(&cldev->dev, "Could not disconnect from the ME client\n");
899
900out:
901 /* Flush queues and remove any pending read unless we have mapped DMA */
902 if (!cl->dma_mapped) {
903 mei_cl_flush_queues(cl, NULL);
904 mei_cl_unlink(cl);
905 }
906
907 mutex_unlock(lock: &bus->device_lock);
908 return err;
909}
910EXPORT_SYMBOL_GPL(mei_cldev_disable);
911
912/**
913 * mei_cldev_send_gsc_command - sends a gsc command, by sending
914 * a gsl mei message to gsc and receiving reply from gsc
915 *
916 * @cldev: me client device
917 * @client_id: client id to send the command to
918 * @fence_id: fence id to send the command to
919 * @sg_in: scatter gather list containing addresses for rx message buffer
920 * @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes
921 * @sg_out: scatter gather list containing addresses for tx message buffer
922 *
923 * Return:
924 * * written size in bytes
925 * * < 0 on error
926 */
927ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
928 u8 client_id, u32 fence_id,
929 struct scatterlist *sg_in,
930 size_t total_in_len,
931 struct scatterlist *sg_out)
932{
933 struct mei_cl *cl;
934 struct mei_device *bus;
935 ssize_t ret = 0;
936
937 struct mei_ext_hdr_gsc_h2f *ext_hdr;
938 size_t buf_sz = sizeof(struct mei_ext_hdr_gsc_h2f);
939 int sg_out_nents, sg_in_nents;
940 int i;
941 struct scatterlist *sg;
942 struct mei_ext_hdr_gsc_f2h rx_msg;
943 unsigned int sg_len;
944
945 if (!cldev || !sg_in || !sg_out)
946 return -EINVAL;
947
948 cl = cldev->cl;
949 bus = cldev->bus;
950
951 dev_dbg(&cldev->dev, "client_id %u, fence_id %u\n", client_id, fence_id);
952
953 if (!bus->hbm_f_gsc_supported)
954 return -EOPNOTSUPP;
955
956 sg_out_nents = sg_nents(sg: sg_out);
957 sg_in_nents = sg_nents(sg: sg_in);
958 /* at least one entry in tx and rx sgls must be present */
959 if (sg_out_nents <= 0 || sg_in_nents <= 0)
960 return -EINVAL;
961
962 buf_sz += (sg_out_nents + sg_in_nents) * sizeof(struct mei_gsc_sgl);
963 ext_hdr = kzalloc(buf_sz, GFP_KERNEL);
964 if (!ext_hdr)
965 return -ENOMEM;
966
967 /* construct the GSC message */
968 ext_hdr->hdr.type = MEI_EXT_HDR_GSC;
969 ext_hdr->hdr.length = buf_sz / sizeof(u32); /* length is in dw */
970
971 ext_hdr->client_id = client_id;
972 ext_hdr->addr_type = GSC_ADDRESS_TYPE_PHYSICAL_SGL;
973 ext_hdr->fence_id = fence_id;
974 ext_hdr->input_address_count = sg_in_nents;
975 ext_hdr->output_address_count = sg_out_nents;
976 ext_hdr->reserved[0] = 0;
977 ext_hdr->reserved[1] = 0;
978
979 /* copy in-sgl to the message */
980 for (i = 0, sg = sg_in; i < sg_in_nents; i++, sg++) {
981 ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
982 ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
983 sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
984 ext_hdr->sgl[i].length = (sg_len <= total_in_len) ? sg_len : total_in_len;
985 total_in_len -= ext_hdr->sgl[i].length;
986 }
987
988 /* copy out-sgl to the message */
989 for (i = sg_in_nents, sg = sg_out; i < sg_in_nents + sg_out_nents; i++, sg++) {
990 ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
991 ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
992 sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
993 ext_hdr->sgl[i].length = sg_len;
994 }
995
996 /* send the message to GSC */
997 ret = __mei_cl_send(cl, buf: (u8 *)ext_hdr, length: buf_sz, vtag: 0, mode: MEI_CL_IO_SGL);
998 if (ret < 0) {
999 dev_err(&cldev->dev, "__mei_cl_send failed, returned %zd\n", ret);
1000 goto end;
1001 }
1002 if (ret != buf_sz) {
1003 dev_err(&cldev->dev, "__mei_cl_send returned %zd instead of expected %zd\n",
1004 ret, buf_sz);
1005 ret = -EIO;
1006 goto end;
1007 }
1008
1009 /* receive the reply from GSC, note that at this point sg_in should contain the reply */
1010 ret = __mei_cl_recv(cl, buf: (u8 *)&rx_msg, length: sizeof(rx_msg), NULL, mode: MEI_CL_IO_SGL, timeout: 0);
1011
1012 if (ret != sizeof(rx_msg)) {
1013 dev_err(&cldev->dev, "__mei_cl_recv returned %zd instead of expected %zd\n",
1014 ret, sizeof(rx_msg));
1015 if (ret >= 0)
1016 ret = -EIO;
1017 goto end;
1018 }
1019
1020 /* check rx_msg.client_id and rx_msg.fence_id match the ones we send */
1021 if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) {
1022 dev_err(&cldev->dev, "received client_id/fence_id %u/%u instead of %u/%u sent\n",
1023 rx_msg.client_id, rx_msg.fence_id, client_id, fence_id);
1024 ret = -EFAULT;
1025 goto end;
1026 }
1027
1028 dev_dbg(&cldev->dev, "gsc command: successfully written %u bytes\n", rx_msg.written);
1029 ret = rx_msg.written;
1030
1031end:
1032 kfree(objp: ext_hdr);
1033 return ret;
1034}
1035EXPORT_SYMBOL_GPL(mei_cldev_send_gsc_command);
1036
1037/**
1038 * mei_cl_device_find - find matching entry in the driver id table
1039 *
1040 * @cldev: me client device
1041 * @cldrv: me client driver
1042 *
1043 * Return: id on success; NULL if no id is matching
1044 */
1045static const
1046struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
1047 const struct mei_cl_driver *cldrv)
1048{
1049 const struct mei_cl_device_id *id;
1050 const uuid_le *uuid;
1051 u8 version;
1052 bool match;
1053
1054 uuid = mei_me_cl_uuid(me_cl: cldev->me_cl);
1055 version = mei_me_cl_ver(me_cl: cldev->me_cl);
1056
1057 id = cldrv->id_table;
1058 while (uuid_le_cmp(NULL_UUID_LE, u2: id->uuid)) {
1059 if (!uuid_le_cmp(u1: *uuid, u2: id->uuid)) {
1060 match = true;
1061
1062 if (cldev->name[0])
1063 if (strncmp(cldev->name, id->name,
1064 sizeof(id->name)))
1065 match = false;
1066
1067 if (id->version != MEI_CL_VERSION_ANY)
1068 if (id->version != version)
1069 match = false;
1070 if (match)
1071 return id;
1072 }
1073
1074 id++;
1075 }
1076
1077 return NULL;
1078}
1079
1080/**
1081 * mei_cl_device_match - device match function
1082 *
1083 * @dev: device
1084 * @drv: driver
1085 *
1086 * Return: 1 if matching device was found 0 otherwise
1087 */
1088static int mei_cl_device_match(struct device *dev, const struct device_driver *drv)
1089{
1090 const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1091 const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
1092 const struct mei_cl_device_id *found_id;
1093
1094 if (!cldev->do_match)
1095 return 0;
1096
1097 if (!cldrv || !cldrv->id_table)
1098 return 0;
1099
1100 found_id = mei_cl_device_find(cldev, cldrv);
1101 if (found_id)
1102 return 1;
1103
1104 return 0;
1105}
1106
1107/**
1108 * mei_cl_device_probe - bus probe function
1109 *
1110 * @dev: device
1111 *
1112 * Return: 0 on success; < 0 otherwise
1113 */
1114static int mei_cl_device_probe(struct device *dev)
1115{
1116 struct mei_cl_device *cldev;
1117 struct mei_cl_driver *cldrv;
1118 const struct mei_cl_device_id *id;
1119 int ret;
1120
1121 cldev = to_mei_cl_device(dev);
1122 cldrv = to_mei_cl_driver(dev->driver);
1123
1124 if (!cldrv || !cldrv->probe)
1125 return -ENODEV;
1126
1127 id = mei_cl_device_find(cldev, cldrv);
1128 if (!id)
1129 return -ENODEV;
1130
1131 if (!mei_cl_bus_module_get(cldev)) {
1132 dev_err(&cldev->dev, "get hw module failed");
1133 return -ENODEV;
1134 }
1135
1136 ret = cldrv->probe(cldev, id);
1137 if (ret) {
1138 mei_cl_bus_module_put(cldev);
1139 return ret;
1140 }
1141
1142 __module_get(THIS_MODULE);
1143 return 0;
1144}
1145
1146/**
1147 * mei_cl_device_remove - remove device from the bus
1148 *
1149 * @dev: device
1150 *
1151 * Return: 0 on success; < 0 otherwise
1152 */
1153static void mei_cl_device_remove(struct device *dev)
1154{
1155 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1156 struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
1157
1158 if (cldrv->remove)
1159 cldrv->remove(cldev);
1160
1161 mei_cldev_unregister_callbacks(cldev);
1162
1163 mei_cl_bus_module_put(cldev);
1164 module_put(THIS_MODULE);
1165}
1166
1167static ssize_t name_show(struct device *dev, struct device_attribute *a,
1168 char *buf)
1169{
1170 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1171
1172 return sysfs_emit(buf, fmt: "%s", cldev->name);
1173}
1174static DEVICE_ATTR_RO(name);
1175
1176static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
1177 char *buf)
1178{
1179 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1180 const uuid_le *uuid = mei_me_cl_uuid(me_cl: cldev->me_cl);
1181
1182 return sysfs_emit(buf, fmt: "%pUl", uuid);
1183}
1184static DEVICE_ATTR_RO(uuid);
1185
1186static ssize_t version_show(struct device *dev, struct device_attribute *a,
1187 char *buf)
1188{
1189 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1190 u8 version = mei_me_cl_ver(me_cl: cldev->me_cl);
1191
1192 return sysfs_emit(buf, fmt: "%02X", version);
1193}
1194static DEVICE_ATTR_RO(version);
1195
1196static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
1197 char *buf)
1198{
1199 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1200 const uuid_le *uuid = mei_me_cl_uuid(me_cl: cldev->me_cl);
1201 u8 version = mei_me_cl_ver(me_cl: cldev->me_cl);
1202
1203 return sysfs_emit(buf, fmt: "mei:%s:%pUl:%02X:", cldev->name, uuid, version);
1204}
1205static DEVICE_ATTR_RO(modalias);
1206
1207static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
1208 char *buf)
1209{
1210 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1211 u8 maxconn = mei_me_cl_max_conn(me_cl: cldev->me_cl);
1212
1213 return sysfs_emit(buf, fmt: "%d", maxconn);
1214}
1215static DEVICE_ATTR_RO(max_conn);
1216
1217static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
1218 char *buf)
1219{
1220 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1221 u8 fixed = mei_me_cl_fixed(me_cl: cldev->me_cl);
1222
1223 return sysfs_emit(buf, fmt: "%d", fixed);
1224}
1225static DEVICE_ATTR_RO(fixed);
1226
1227static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
1228 char *buf)
1229{
1230 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1231 bool vt = mei_me_cl_vt(me_cl: cldev->me_cl);
1232
1233 return sysfs_emit(buf, fmt: "%d", vt);
1234}
1235static DEVICE_ATTR_RO(vtag);
1236
1237static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
1238 char *buf)
1239{
1240 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1241 u32 maxlen = mei_me_cl_max_len(me_cl: cldev->me_cl);
1242
1243 return sysfs_emit(buf, fmt: "%u", maxlen);
1244}
1245static DEVICE_ATTR_RO(max_len);
1246
1247static struct attribute *mei_cldev_attrs[] = {
1248 &dev_attr_name.attr,
1249 &dev_attr_uuid.attr,
1250 &dev_attr_version.attr,
1251 &dev_attr_modalias.attr,
1252 &dev_attr_max_conn.attr,
1253 &dev_attr_fixed.attr,
1254 &dev_attr_vtag.attr,
1255 &dev_attr_max_len.attr,
1256 NULL,
1257};
1258ATTRIBUTE_GROUPS(mei_cldev);
1259
1260/**
1261 * mei_cl_device_uevent - me client bus uevent handler
1262 *
1263 * @dev: device
1264 * @env: uevent kobject
1265 *
1266 * Return: 0 on success -ENOMEM on when add_uevent_var fails
1267 */
1268static int mei_cl_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
1269{
1270 const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1271 const uuid_le *uuid = mei_me_cl_uuid(me_cl: cldev->me_cl);
1272 u8 version = mei_me_cl_ver(me_cl: cldev->me_cl);
1273
1274 if (add_uevent_var(env, format: "MEI_CL_VERSION=%d", version))
1275 return -ENOMEM;
1276
1277 if (add_uevent_var(env, format: "MEI_CL_UUID=%pUl", uuid))
1278 return -ENOMEM;
1279
1280 if (add_uevent_var(env, format: "MEI_CL_NAME=%s", cldev->name))
1281 return -ENOMEM;
1282
1283 if (add_uevent_var(env, format: "MODALIAS=mei:%s:%pUl:%02X:",
1284 cldev->name, uuid, version))
1285 return -ENOMEM;
1286
1287 return 0;
1288}
1289
1290static const struct bus_type mei_cl_bus_type = {
1291 .name = "mei",
1292 .dev_groups = mei_cldev_groups,
1293 .match = mei_cl_device_match,
1294 .probe = mei_cl_device_probe,
1295 .remove = mei_cl_device_remove,
1296 .uevent = mei_cl_device_uevent,
1297};
1298
1299static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
1300{
1301 if (bus) {
1302 get_device(dev: &bus->dev);
1303 get_device(dev: bus->parent);
1304 }
1305
1306 return bus;
1307}
1308
1309static void mei_dev_bus_put(struct mei_device *bus)
1310{
1311 if (bus) {
1312 put_device(dev: bus->parent);
1313 put_device(dev: &bus->dev);
1314 }
1315}
1316
1317static void mei_cl_bus_dev_release(struct device *dev)
1318{
1319 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1320 struct mei_device *mdev = cldev->cl->dev;
1321 struct mei_cl *cl;
1322
1323 mei_cl_flush_queues(cl: cldev->cl, NULL);
1324 mei_me_cl_put(me_cl: cldev->me_cl);
1325 mei_dev_bus_put(bus: cldev->bus);
1326
1327 list_for_each_entry(cl, &mdev->file_list, link)
1328 WARN_ON(cl == cldev->cl);
1329
1330 kfree(objp: cldev->cl);
1331 kfree(objp: cldev);
1332}
1333
1334static const struct device_type mei_cl_device_type = {
1335 .release = mei_cl_bus_dev_release,
1336};
1337
1338/**
1339 * mei_cl_bus_set_name - set device name for me client device
1340 * <controller>-<client device>
1341 * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
1342 *
1343 * @cldev: me client device
1344 */
1345static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
1346{
1347 dev_set_name(dev: &cldev->dev, name: "%s-%pUl",
1348 dev_name(dev: cldev->bus->parent),
1349 mei_me_cl_uuid(me_cl: cldev->me_cl));
1350}
1351
1352/**
1353 * mei_cl_bus_dev_alloc - initialize and allocate mei client device
1354 *
1355 * @bus: mei device
1356 * @me_cl: me client
1357 *
1358 * Return: allocated device structure or NULL on allocation failure
1359 */
1360static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
1361 struct mei_me_client *me_cl)
1362{
1363 struct mei_cl_device *cldev;
1364 struct mei_cl *cl;
1365
1366 cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
1367 if (!cldev)
1368 return NULL;
1369
1370 cl = mei_cl_allocate(dev: bus);
1371 if (!cl) {
1372 kfree(objp: cldev);
1373 return NULL;
1374 }
1375
1376 device_initialize(dev: &cldev->dev);
1377 cldev->dev.parent = bus->parent;
1378 cldev->dev.bus = &mei_cl_bus_type;
1379 cldev->dev.type = &mei_cl_device_type;
1380 cldev->bus = mei_dev_bus_get(bus);
1381 cldev->me_cl = mei_me_cl_get(me_cl);
1382 cldev->cl = cl;
1383 mei_cl_bus_set_name(cldev);
1384 cldev->is_added = 0;
1385 INIT_LIST_HEAD(list: &cldev->bus_list);
1386 device_enable_async_suspend(dev: &cldev->dev);
1387
1388 return cldev;
1389}
1390
1391/**
1392 * mei_cl_bus_dev_setup - setup me client device
1393 * run fix up routines and set the device name
1394 *
1395 * @bus: mei device
1396 * @cldev: me client device
1397 *
1398 * Return: true if the device is eligible for enumeration
1399 */
1400static bool mei_cl_bus_dev_setup(struct mei_device *bus,
1401 struct mei_cl_device *cldev)
1402{
1403 cldev->do_match = 1;
1404 mei_cl_bus_dev_fixup(dev: cldev);
1405
1406 /* the device name can change during fix up */
1407 if (cldev->do_match)
1408 mei_cl_bus_set_name(cldev);
1409
1410 return cldev->do_match == 1;
1411}
1412
1413/**
1414 * mei_cl_bus_dev_add - add me client devices
1415 *
1416 * @cldev: me client device
1417 *
1418 * Return: 0 on success; < 0 on failure
1419 */
1420static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
1421{
1422 int ret;
1423
1424 dev_dbg(&cldev->dev, "adding %pUL:%02X\n",
1425 mei_me_cl_uuid(cldev->me_cl),
1426 mei_me_cl_ver(cldev->me_cl));
1427 ret = device_add(dev: &cldev->dev);
1428 if (!ret)
1429 cldev->is_added = 1;
1430
1431 return ret;
1432}
1433
1434/**
1435 * mei_cl_bus_dev_stop - stop the driver
1436 *
1437 * @cldev: me client device
1438 */
1439static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
1440{
1441 cldev->do_match = 0;
1442 if (cldev->is_added)
1443 device_release_driver(dev: &cldev->dev);
1444}
1445
1446/**
1447 * mei_cl_bus_dev_destroy - destroy me client devices object
1448 *
1449 * @cldev: me client device
1450 *
1451 * Locking: called under "dev->cl_bus_lock" lock
1452 */
1453static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
1454{
1455
1456 WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
1457
1458 if (!cldev->is_added)
1459 return;
1460
1461 device_del(dev: &cldev->dev);
1462
1463 list_del_init(entry: &cldev->bus_list);
1464
1465 cldev->is_added = 0;
1466 put_device(dev: &cldev->dev);
1467}
1468
1469/**
1470 * mei_cl_bus_remove_device - remove a devices form the bus
1471 *
1472 * @cldev: me client device
1473 */
1474static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
1475{
1476 mei_cl_bus_dev_stop(cldev);
1477 mei_cl_bus_dev_destroy(cldev);
1478}
1479
1480/**
1481 * mei_cl_bus_remove_devices - remove all devices form the bus
1482 *
1483 * @bus: mei device
1484 */
1485void mei_cl_bus_remove_devices(struct mei_device *bus)
1486{
1487 struct mei_cl_device *cldev, *next;
1488
1489 mutex_lock(lock: &bus->cl_bus_lock);
1490 list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
1491 mei_cl_bus_remove_device(cldev);
1492 mutex_unlock(lock: &bus->cl_bus_lock);
1493}
1494
1495
1496/**
1497 * mei_cl_bus_dev_init - allocate and initializes an mei client devices
1498 * based on me client
1499 *
1500 * @bus: mei device
1501 * @me_cl: me client
1502 *
1503 * Locking: called under "dev->cl_bus_lock" lock
1504 */
1505static void mei_cl_bus_dev_init(struct mei_device *bus,
1506 struct mei_me_client *me_cl)
1507{
1508 struct mei_cl_device *cldev;
1509
1510 WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
1511
1512 dev_dbg(&bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
1513
1514 if (me_cl->bus_added)
1515 return;
1516
1517 cldev = mei_cl_bus_dev_alloc(bus, me_cl);
1518 if (!cldev)
1519 return;
1520
1521 me_cl->bus_added = true;
1522 list_add_tail(new: &cldev->bus_list, head: &bus->device_list);
1523
1524}
1525
1526/**
1527 * mei_cl_bus_rescan - scan me clients list and add create
1528 * devices for eligible clients
1529 *
1530 * @bus: mei device
1531 */
1532static void mei_cl_bus_rescan(struct mei_device *bus)
1533{
1534 struct mei_cl_device *cldev, *n;
1535 struct mei_me_client *me_cl;
1536
1537 mutex_lock(lock: &bus->cl_bus_lock);
1538
1539 down_read(sem: &bus->me_clients_rwsem);
1540 list_for_each_entry(me_cl, &bus->me_clients, list)
1541 mei_cl_bus_dev_init(bus, me_cl);
1542 up_read(sem: &bus->me_clients_rwsem);
1543
1544 list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
1545
1546 if (!mei_me_cl_is_active(me_cl: cldev->me_cl)) {
1547 mei_cl_bus_remove_device(cldev);
1548 continue;
1549 }
1550
1551 if (cldev->is_added)
1552 continue;
1553
1554 if (mei_cl_bus_dev_setup(bus, cldev))
1555 mei_cl_bus_dev_add(cldev);
1556 else {
1557 list_del_init(entry: &cldev->bus_list);
1558 put_device(dev: &cldev->dev);
1559 }
1560 }
1561 mutex_unlock(lock: &bus->cl_bus_lock);
1562
1563 dev_dbg(&bus->dev, "rescan end");
1564}
1565
1566void mei_cl_bus_rescan_work(struct work_struct *work)
1567{
1568 struct mei_device *bus =
1569 container_of(work, struct mei_device, bus_rescan_work);
1570
1571 mei_cl_bus_rescan(bus);
1572}
1573
1574int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
1575 struct module *owner)
1576{
1577 int err;
1578
1579 cldrv->driver.name = cldrv->name;
1580 cldrv->driver.owner = owner;
1581 cldrv->driver.bus = &mei_cl_bus_type;
1582
1583 err = driver_register(drv: &cldrv->driver);
1584 if (err)
1585 return err;
1586
1587 pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
1588
1589 return 0;
1590}
1591EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
1592
1593void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
1594{
1595 driver_unregister(drv: &cldrv->driver);
1596
1597 pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
1598}
1599EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
1600
1601
1602int __init mei_cl_bus_init(void)
1603{
1604 return bus_register(bus: &mei_cl_bus_type);
1605}
1606
1607void __exit mei_cl_bus_exit(void)
1608{
1609 bus_unregister(bus: &mei_cl_bus_type);
1610}
1611