1/*
2 * Copyright © 2012 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
26 *
27 */
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
31#include <linux/rbtree.h>
32#include <linux/module.h>
33
34#include <drm/drm.h>
35#include <drm/drm_drv.h>
36#include <drm/drm_file.h>
37#include <drm/drm_framebuffer.h>
38#include <drm/drm_gem.h>
39#include <drm/drm_prime.h>
40
41#include "drm_internal.h"
42
43MODULE_IMPORT_NS("DMA_BUF");
44
45/**
46 * DOC: overview and lifetime rules
47 *
48 * Similar to GEM global names, PRIME file descriptors are also used to share
49 * buffer objects across processes. They offer additional security: as file
50 * descriptors must be explicitly sent over UNIX domain sockets to be shared
51 * between applications, they can't be guessed like the globally unique GEM
52 * names.
53 *
54 * Drivers that support the PRIME API implement the drm_gem_object_funcs.export
55 * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for
56 * drivers are all individually exported for drivers which need to overwrite
57 * or reimplement some of them.
58 *
59 * Reference Counting for GEM Drivers
60 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61 *
62 * On the export the &dma_buf holds a reference to the exported buffer object,
63 * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
64 * IOCTL, when it first calls &drm_gem_object_funcs.export
65 * and stores the exporting GEM object in the &dma_buf.priv field. This
66 * reference needs to be released when the final reference to the &dma_buf
67 * itself is dropped and its &dma_buf_ops.release function is called. For
68 * GEM-based drivers, the &dma_buf should be exported using
69 * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
70 *
71 * Thus the chain of references always flows in one direction, avoiding loops:
72 * importing GEM object -> dma-buf -> exported GEM bo. A further complication
73 * are the lookup caches for import and export. These are required to guarantee
74 * that any given object will always have only one unique userspace handle. This
75 * is required to allow userspace to detect duplicated imports, since some GEM
76 * drivers do fail command submissions if a given buffer object is listed more
77 * than once. These import and export caches in &drm_prime_file_private only
78 * retain a weak reference, which is cleaned up when the corresponding object is
79 * released.
80 *
81 * Self-importing: If userspace is using PRIME as a replacement for flink then
82 * it will get a fd->handle request for a GEM object that it created. Drivers
83 * should detect this situation and return back the underlying object from the
84 * dma-buf private. For GEM based drivers this is handled in
85 * drm_gem_prime_import() already.
86 */
87
88struct drm_prime_member {
89 struct dma_buf *dma_buf;
90 uint32_t handle;
91
92 struct rb_node dmabuf_rb;
93 struct rb_node handle_rb;
94};
95
96int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
97 struct dma_buf *dma_buf, uint32_t handle)
98{
99 struct drm_prime_member *member;
100 struct rb_node **p, *rb;
101
102 member = kmalloc(sizeof(*member), GFP_KERNEL);
103 if (!member)
104 return -ENOMEM;
105
106 get_dma_buf(dmabuf: dma_buf);
107 member->dma_buf = dma_buf;
108 member->handle = handle;
109
110 rb = NULL;
111 p = &prime_fpriv->dmabufs.rb_node;
112 while (*p) {
113 struct drm_prime_member *pos;
114
115 rb = *p;
116 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
117 if (dma_buf > pos->dma_buf)
118 p = &rb->rb_right;
119 else
120 p = &rb->rb_left;
121 }
122 rb_link_node(node: &member->dmabuf_rb, parent: rb, rb_link: p);
123 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
124
125 rb = NULL;
126 p = &prime_fpriv->handles.rb_node;
127 while (*p) {
128 struct drm_prime_member *pos;
129
130 rb = *p;
131 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
132 if (handle > pos->handle)
133 p = &rb->rb_right;
134 else
135 p = &rb->rb_left;
136 }
137 rb_link_node(node: &member->handle_rb, parent: rb, rb_link: p);
138 rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
139
140 return 0;
141}
142
143static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
144 uint32_t handle)
145{
146 struct rb_node *rb;
147
148 rb = prime_fpriv->handles.rb_node;
149 while (rb) {
150 struct drm_prime_member *member;
151
152 member = rb_entry(rb, struct drm_prime_member, handle_rb);
153 if (member->handle == handle)
154 return member->dma_buf;
155 else if (member->handle < handle)
156 rb = rb->rb_right;
157 else
158 rb = rb->rb_left;
159 }
160
161 return NULL;
162}
163
164static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
165 struct dma_buf *dma_buf,
166 uint32_t *handle)
167{
168 struct rb_node *rb;
169
170 rb = prime_fpriv->dmabufs.rb_node;
171 while (rb) {
172 struct drm_prime_member *member;
173
174 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
175 if (member->dma_buf == dma_buf) {
176 *handle = member->handle;
177 return 0;
178 } else if (member->dma_buf < dma_buf) {
179 rb = rb->rb_right;
180 } else {
181 rb = rb->rb_left;
182 }
183 }
184
185 return -ENOENT;
186}
187
188void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
189 uint32_t handle)
190{
191 struct rb_node *rb;
192
193 rb = prime_fpriv->handles.rb_node;
194 while (rb) {
195 struct drm_prime_member *member;
196
197 member = rb_entry(rb, struct drm_prime_member, handle_rb);
198 if (member->handle == handle) {
199 rb_erase(&member->handle_rb, &prime_fpriv->handles);
200 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
201
202 dma_buf_put(dmabuf: member->dma_buf);
203 kfree(objp: member);
204 break;
205 } else if (member->handle < handle) {
206 rb = rb->rb_right;
207 } else {
208 rb = rb->rb_left;
209 }
210 }
211}
212
213void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
214{
215 mutex_init(&prime_fpriv->lock);
216 prime_fpriv->dmabufs = RB_ROOT;
217 prime_fpriv->handles = RB_ROOT;
218}
219
220void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
221{
222 /* by now drm_gem_release should've made sure the list is empty */
223 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
224}
225
226/**
227 * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
228 * @dev: parent device for the exported dmabuf
229 * @exp_info: the export information used by dma_buf_export()
230 *
231 * This wraps dma_buf_export() for use by generic GEM drivers that are using
232 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
233 * a reference to the &drm_device and the exported &drm_gem_object (stored in
234 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
235 *
236 * Returns the new dmabuf.
237 */
238struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
239 struct dma_buf_export_info *exp_info)
240{
241 struct drm_gem_object *obj = exp_info->priv;
242 struct dma_buf *dma_buf;
243
244 dma_buf = dma_buf_export(exp_info);
245 if (IS_ERR(ptr: dma_buf))
246 return dma_buf;
247
248 drm_dev_get(dev);
249 drm_gem_object_get(obj);
250 dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
251
252 return dma_buf;
253}
254EXPORT_SYMBOL(drm_gem_dmabuf_export);
255
256/**
257 * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
258 * @dma_buf: buffer to be released
259 *
260 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
261 * must use this in their &dma_buf_ops structure as the release callback.
262 * drm_gem_dmabuf_release() should be used in conjunction with
263 * drm_gem_dmabuf_export().
264 */
265void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
266{
267 struct drm_gem_object *obj = dma_buf->priv;
268 struct drm_device *dev = obj->dev;
269
270 /* drop the reference on the export fd holds */
271 drm_gem_object_put(obj);
272
273 drm_dev_put(dev);
274}
275EXPORT_SYMBOL(drm_gem_dmabuf_release);
276
277/**
278 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
279 * @dev: drm_device to import into
280 * @file_priv: drm file-private structure
281 * @prime_fd: fd id of the dma-buf which should be imported
282 * @handle: pointer to storage for the handle of the imported buffer object
283 *
284 * This is the PRIME import function which must be used mandatorily by GEM
285 * drivers to ensure correct lifetime management of the underlying GEM object.
286 * The actual importing of GEM object from the dma-buf is done through the
287 * &drm_driver.gem_prime_import driver callback.
288 *
289 * Returns 0 on success or a negative error code on failure.
290 */
291int drm_gem_prime_fd_to_handle(struct drm_device *dev,
292 struct drm_file *file_priv, int prime_fd,
293 uint32_t *handle)
294{
295 struct dma_buf *dma_buf;
296 struct drm_gem_object *obj;
297 int ret;
298
299 dma_buf = dma_buf_get(fd: prime_fd);
300 if (IS_ERR(ptr: dma_buf))
301 return PTR_ERR(ptr: dma_buf);
302
303 mutex_lock(lock: &file_priv->prime.lock);
304
305 ret = drm_prime_lookup_buf_handle(prime_fpriv: &file_priv->prime,
306 dma_buf, handle);
307 if (ret == 0)
308 goto out_put;
309
310 /* never seen this one, need to import */
311 mutex_lock(lock: &dev->object_name_lock);
312 if (dev->driver->gem_prime_import)
313 obj = dev->driver->gem_prime_import(dev, dma_buf);
314 else
315 obj = drm_gem_prime_import(dev, dma_buf);
316 if (IS_ERR(ptr: obj)) {
317 ret = PTR_ERR(ptr: obj);
318 goto out_unlock;
319 }
320
321 if (obj->dma_buf) {
322 WARN_ON(obj->dma_buf != dma_buf);
323 } else {
324 obj->dma_buf = dma_buf;
325 get_dma_buf(dmabuf: dma_buf);
326 }
327
328 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
329 ret = drm_gem_handle_create_tail(file_priv, obj, handlep: handle);
330 drm_gem_object_put(obj);
331 if (ret)
332 goto out_put;
333
334 ret = drm_prime_add_buf_handle(prime_fpriv: &file_priv->prime,
335 dma_buf, handle: *handle);
336 mutex_unlock(lock: &file_priv->prime.lock);
337 if (ret)
338 goto fail;
339
340 dma_buf_put(dmabuf: dma_buf);
341
342 return 0;
343
344fail:
345 /* hmm, if driver attached, we are relying on the free-object path
346 * to detach.. which seems ok..
347 */
348 drm_gem_handle_delete(filp: file_priv, handle: *handle);
349 dma_buf_put(dmabuf: dma_buf);
350 return ret;
351
352out_unlock:
353 mutex_unlock(lock: &dev->object_name_lock);
354out_put:
355 mutex_unlock(lock: &file_priv->prime.lock);
356 dma_buf_put(dmabuf: dma_buf);
357 return ret;
358}
359EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
360
361int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
362 struct drm_file *file_priv)
363{
364 struct drm_prime_handle *args = data;
365
366 if (dev->driver->prime_fd_to_handle) {
367 return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd,
368 &args->handle);
369 }
370
371 return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle);
372}
373
374static struct dma_buf *export_and_register_object(struct drm_device *dev,
375 struct drm_gem_object *obj,
376 uint32_t flags)
377{
378 struct dma_buf *dmabuf;
379
380 /* prevent races with concurrent gem_close. */
381 if (obj->handle_count == 0) {
382 dmabuf = ERR_PTR(error: -ENOENT);
383 return dmabuf;
384 }
385
386 if (obj->funcs && obj->funcs->export)
387 dmabuf = obj->funcs->export(obj, flags);
388 else
389 dmabuf = drm_gem_prime_export(obj, flags);
390 if (IS_ERR(ptr: dmabuf)) {
391 /* normally the created dma-buf takes ownership of the ref,
392 * but if that fails then drop the ref
393 */
394 return dmabuf;
395 }
396
397 /*
398 * Note that callers do not need to clean up the export cache
399 * since the check for obj->handle_count guarantees that someone
400 * will clean it up.
401 */
402 obj->dma_buf = dmabuf;
403 get_dma_buf(dmabuf: obj->dma_buf);
404
405 return dmabuf;
406}
407
408/**
409 * drm_gem_prime_handle_to_dmabuf - PRIME export function for GEM drivers
410 * @dev: dev to export the buffer from
411 * @file_priv: drm file-private structure
412 * @handle: buffer handle to export
413 * @flags: flags like DRM_CLOEXEC
414 *
415 * This is the PRIME export function which must be used mandatorily by GEM
416 * drivers to ensure correct lifetime management of the underlying GEM object.
417 * The actual exporting from GEM object to a dma-buf is done through the
418 * &drm_gem_object_funcs.export callback.
419 *
420 * Unlike drm_gem_prime_handle_to_fd(), it returns the struct dma_buf it
421 * has created, without attaching it to any file descriptors. The difference
422 * between those two is similar to that between anon_inode_getfile() and
423 * anon_inode_getfd(); insertion into descriptor table is something you
424 * can not revert if any cleanup is needed, so the descriptor-returning
425 * variants should only be used when you are past the last failure exit
426 * and the only thing left is passing the new file descriptor to userland.
427 * When all you need is the object itself or when you need to do something
428 * else that might fail, use that one instead.
429 */
430struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
431 struct drm_file *file_priv, uint32_t handle,
432 uint32_t flags)
433{
434 struct drm_gem_object *obj;
435 int ret = 0;
436 struct dma_buf *dmabuf;
437
438 mutex_lock(lock: &file_priv->prime.lock);
439 obj = drm_gem_object_lookup(filp: file_priv, handle);
440 if (!obj) {
441 dmabuf = ERR_PTR(error: -ENOENT);
442 goto out_unlock;
443 }
444
445 dmabuf = drm_prime_lookup_buf_by_handle(prime_fpriv: &file_priv->prime, handle);
446 if (dmabuf) {
447 get_dma_buf(dmabuf);
448 goto out;
449 }
450
451 mutex_lock(lock: &dev->object_name_lock);
452 /* re-export the original imported object */
453 if (obj->import_attach) {
454 dmabuf = obj->import_attach->dmabuf;
455 get_dma_buf(dmabuf);
456 goto out_have_obj;
457 }
458
459 if (obj->dma_buf) {
460 get_dma_buf(dmabuf: obj->dma_buf);
461 dmabuf = obj->dma_buf;
462 goto out_have_obj;
463 }
464
465 dmabuf = export_and_register_object(dev, obj, flags);
466 if (IS_ERR(ptr: dmabuf)) {
467 /* normally the created dma-buf takes ownership of the ref,
468 * but if that fails then drop the ref
469 */
470 mutex_unlock(lock: &dev->object_name_lock);
471 goto out;
472 }
473
474out_have_obj:
475 /*
476 * If we've exported this buffer then cheat and add it to the import list
477 * so we get the correct handle back. We must do this under the
478 * protection of dev->object_name_lock to ensure that a racing gem close
479 * ioctl doesn't miss to remove this buffer handle from the cache.
480 */
481 ret = drm_prime_add_buf_handle(prime_fpriv: &file_priv->prime,
482 dma_buf: dmabuf, handle);
483 mutex_unlock(lock: &dev->object_name_lock);
484 if (ret) {
485 dma_buf_put(dmabuf);
486 dmabuf = ERR_PTR(error: ret);
487 }
488out:
489 drm_gem_object_put(obj);
490out_unlock:
491 mutex_unlock(lock: &file_priv->prime.lock);
492 return dmabuf;
493}
494EXPORT_SYMBOL(drm_gem_prime_handle_to_dmabuf);
495
496/**
497 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
498 * @dev: dev to export the buffer from
499 * @file_priv: drm file-private structure
500 * @handle: buffer handle to export
501 * @flags: flags like DRM_CLOEXEC
502 * @prime_fd: pointer to storage for the fd id of the create dma-buf
503 *
504 * This is the PRIME export function which must be used mandatorily by GEM
505 * drivers to ensure correct lifetime management of the underlying GEM object.
506 * The actual exporting from GEM object to a dma-buf is done through the
507 * &drm_gem_object_funcs.export callback.
508 */
509int drm_gem_prime_handle_to_fd(struct drm_device *dev,
510 struct drm_file *file_priv, uint32_t handle,
511 uint32_t flags,
512 int *prime_fd)
513{
514 struct dma_buf *dmabuf;
515 int fd = get_unused_fd_flags(flags);
516
517 if (fd < 0)
518 return fd;
519
520 dmabuf = drm_gem_prime_handle_to_dmabuf(dev, file_priv, handle, flags);
521 if (IS_ERR(ptr: dmabuf)) {
522 put_unused_fd(fd);
523 return PTR_ERR(ptr: dmabuf);
524 }
525
526 fd_install(fd, file: dmabuf->file);
527 *prime_fd = fd;
528 return 0;
529}
530EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
531
532int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
533 struct drm_file *file_priv)
534{
535 struct drm_prime_handle *args = data;
536
537 /* check flags are valid */
538 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
539 return -EINVAL;
540
541 if (dev->driver->prime_handle_to_fd) {
542 return dev->driver->prime_handle_to_fd(dev, file_priv,
543 args->handle, args->flags,
544 &args->fd);
545 }
546 return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle,
547 args->flags, &args->fd);
548}
549
550/**
551 * DOC: PRIME Helpers
552 *
553 * Drivers can implement &drm_gem_object_funcs.export and
554 * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
555 * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
556 * implement dma-buf support in terms of some lower-level helpers, which are
557 * again exported for drivers to use individually:
558 *
559 * Exporting buffers
560 * ~~~~~~~~~~~~~~~~~
561 *
562 * Optional pinning of buffers is handled at dma-buf attach and detach time in
563 * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
564 * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
565 * &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is
566 * unimplemented, exports into another device are rejected.
567 *
568 * For kernel-internal access there's drm_gem_dmabuf_vmap() and
569 * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
570 * drm_gem_dmabuf_mmap().
571 *
572 * Note that these export helpers can only be used if the underlying backing
573 * storage is fully coherent and either permanently pinned, or it is safe to pin
574 * it indefinitely.
575 *
576 * FIXME: The underlying helper functions are named rather inconsistently.
577 *
578 * Importing buffers
579 * ~~~~~~~~~~~~~~~~~
580 *
581 * Importing dma-bufs using drm_gem_prime_import() relies on
582 * &drm_driver.gem_prime_import_sg_table.
583 *
584 * Note that similarly to the export helpers this permanently pins the
585 * underlying backing storage. Which is ok for scanout, but is not the best
586 * option for sharing lots of buffers for rendering.
587 */
588
589/**
590 * drm_gem_map_attach - dma_buf attach implementation for GEM
591 * @dma_buf: buffer to attach device to
592 * @attach: buffer attachment data
593 *
594 * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
595 * used as the &dma_buf_ops.attach callback. Must be used together with
596 * drm_gem_map_detach().
597 *
598 * Returns 0 on success, negative error code on failure.
599 */
600int drm_gem_map_attach(struct dma_buf *dma_buf,
601 struct dma_buf_attachment *attach)
602{
603 struct drm_gem_object *obj = dma_buf->priv;
604 int ret;
605
606 /*
607 * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
608 * that implement their own ->map_dma_buf() do not.
609 */
610 if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf &&
611 !obj->funcs->get_sg_table)
612 return -ENOSYS;
613
614 if (!obj->funcs->pin)
615 return 0;
616
617 ret = dma_resv_lock(obj: obj->resv, NULL);
618 if (ret)
619 return ret;
620 ret = obj->funcs->pin(obj);
621 dma_resv_unlock(obj: obj->resv);
622
623 return ret;
624}
625EXPORT_SYMBOL(drm_gem_map_attach);
626
627/**
628 * drm_gem_map_detach - dma_buf detach implementation for GEM
629 * @dma_buf: buffer to detach from
630 * @attach: attachment to be detached
631 *
632 * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up
633 * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
634 * &dma_buf_ops.detach callback.
635 */
636void drm_gem_map_detach(struct dma_buf *dma_buf,
637 struct dma_buf_attachment *attach)
638{
639 struct drm_gem_object *obj = dma_buf->priv;
640 int ret;
641
642 if (!obj->funcs->unpin)
643 return;
644
645 ret = dma_resv_lock(obj: obj->resv, NULL);
646 if (drm_WARN_ON(obj->dev, ret))
647 return;
648 obj->funcs->unpin(obj);
649 dma_resv_unlock(obj: obj->resv);
650}
651EXPORT_SYMBOL(drm_gem_map_detach);
652
653/**
654 * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
655 * @attach: attachment whose scatterlist is to be returned
656 * @dir: direction of DMA transfer
657 *
658 * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
659 * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
660 * with drm_gem_unmap_dma_buf().
661 *
662 * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
663 * on error. May return -EINTR if it is interrupted by a signal.
664 */
665struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
666 enum dma_data_direction dir)
667{
668 struct drm_gem_object *obj = attach->dmabuf->priv;
669 struct sg_table *sgt;
670 int ret;
671
672 if (WARN_ON(dir == DMA_NONE))
673 return ERR_PTR(error: -EINVAL);
674
675 if (WARN_ON(!obj->funcs->get_sg_table))
676 return ERR_PTR(error: -ENOSYS);
677
678 sgt = obj->funcs->get_sg_table(obj);
679 if (IS_ERR(ptr: sgt))
680 return sgt;
681
682 ret = dma_map_sgtable(dev: attach->dev, sgt, dir,
683 DMA_ATTR_SKIP_CPU_SYNC);
684 if (ret) {
685 sg_free_table(sgt);
686 kfree(objp: sgt);
687 sgt = ERR_PTR(error: ret);
688 }
689
690 return sgt;
691}
692EXPORT_SYMBOL(drm_gem_map_dma_buf);
693
694/**
695 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
696 * @attach: attachment to unmap buffer from
697 * @sgt: scatterlist info of the buffer to unmap
698 * @dir: direction of DMA transfer
699 *
700 * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
701 */
702void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
703 struct sg_table *sgt,
704 enum dma_data_direction dir)
705{
706 if (!sgt)
707 return;
708
709 dma_unmap_sgtable(dev: attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
710 sg_free_table(sgt);
711 kfree(objp: sgt);
712}
713EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
714
715/**
716 * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
717 * @dma_buf: buffer to be mapped
718 * @map: the virtual address of the buffer
719 *
720 * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
721 * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
722 * The kernel virtual address is returned in map.
723 *
724 * Returns 0 on success or a negative errno code otherwise.
725 */
726int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
727{
728 struct drm_gem_object *obj = dma_buf->priv;
729
730 return drm_gem_vmap_locked(obj, map);
731}
732EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
733
734/**
735 * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
736 * @dma_buf: buffer to be unmapped
737 * @map: the virtual address of the buffer
738 *
739 * Releases a kernel virtual mapping. This can be used as the
740 * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
741 */
742void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
743{
744 struct drm_gem_object *obj = dma_buf->priv;
745
746 drm_gem_vunmap_locked(obj, map);
747}
748EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
749
750/**
751 * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
752 * @obj: GEM object
753 * @vma: Virtual address range
754 *
755 * This function sets up a userspace mapping for PRIME exported buffers using
756 * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
757 * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
758 * called to set up the mapping.
759 */
760int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
761{
762 struct drm_file *priv;
763 struct file *fil;
764 int ret;
765
766 /* Add the fake offset */
767 vma->vm_pgoff += drm_vma_node_start(node: &obj->vma_node);
768
769 if (obj->funcs && obj->funcs->mmap) {
770 vma->vm_ops = obj->funcs->vm_ops;
771
772 drm_gem_object_get(obj);
773 ret = obj->funcs->mmap(obj, vma);
774 if (ret) {
775 drm_gem_object_put(obj);
776 return ret;
777 }
778 vma->vm_private_data = obj;
779 return 0;
780 }
781
782 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
783 fil = kzalloc(sizeof(*fil), GFP_KERNEL);
784 if (!priv || !fil) {
785 ret = -ENOMEM;
786 goto out;
787 }
788
789 /* Used by drm_gem_mmap() to lookup the GEM object */
790 priv->minor = obj->dev->primary;
791 fil->private_data = priv;
792
793 ret = drm_vma_node_allow(node: &obj->vma_node, tag: priv);
794 if (ret)
795 goto out;
796
797 ret = obj->dev->driver->fops->mmap(fil, vma);
798
799 drm_vma_node_revoke(node: &obj->vma_node, tag: priv);
800out:
801 kfree(objp: priv);
802 kfree(objp: fil);
803
804 return ret;
805}
806EXPORT_SYMBOL(drm_gem_prime_mmap);
807
808/**
809 * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
810 * @dma_buf: buffer to be mapped
811 * @vma: virtual address range
812 *
813 * Provides memory mapping for the buffer. This can be used as the
814 * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap().
815 *
816 * Returns 0 on success or a negative error code on failure.
817 */
818int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
819{
820 struct drm_gem_object *obj = dma_buf->priv;
821
822 return drm_gem_prime_mmap(obj, vma);
823}
824EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
825
826static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
827 .attach = drm_gem_map_attach,
828 .detach = drm_gem_map_detach,
829 .map_dma_buf = drm_gem_map_dma_buf,
830 .unmap_dma_buf = drm_gem_unmap_dma_buf,
831 .release = drm_gem_dmabuf_release,
832 .mmap = drm_gem_dmabuf_mmap,
833 .vmap = drm_gem_dmabuf_vmap,
834 .vunmap = drm_gem_dmabuf_vunmap,
835};
836
837/**
838 * drm_prime_pages_to_sg - converts a page array into an sg list
839 * @dev: DRM device
840 * @pages: pointer to the array of page pointers to convert
841 * @nr_pages: length of the page vector
842 *
843 * This helper creates an sg table object from a set of pages
844 * the driver is responsible for mapping the pages into the
845 * importers address space for use with dma_buf itself.
846 *
847 * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
848 */
849struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
850 struct page **pages, unsigned int nr_pages)
851{
852 struct sg_table *sg;
853 size_t max_segment = 0;
854 int err;
855
856 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
857 if (!sg)
858 return ERR_PTR(error: -ENOMEM);
859
860 if (dev)
861 max_segment = dma_max_mapping_size(dev: dev->dev);
862 if (max_segment == 0)
863 max_segment = UINT_MAX;
864 err = sg_alloc_table_from_pages_segment(sgt: sg, pages, n_pages: nr_pages, offset: 0,
865 size: (unsigned long)nr_pages << PAGE_SHIFT,
866 max_segment, GFP_KERNEL);
867 if (err) {
868 kfree(objp: sg);
869 sg = ERR_PTR(error: err);
870 }
871 return sg;
872}
873EXPORT_SYMBOL(drm_prime_pages_to_sg);
874
875/**
876 * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
877 * @sgt: sg_table describing the buffer to check
878 *
879 * This helper calculates the contiguous size in the DMA address space
880 * of the buffer described by the provided sg_table.
881 *
882 * This is useful for implementing
883 * &drm_gem_object_funcs.gem_prime_import_sg_table.
884 */
885unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
886{
887 dma_addr_t expected = sg_dma_address(sgt->sgl);
888 struct scatterlist *sg;
889 unsigned long size = 0;
890 int i;
891
892 for_each_sgtable_dma_sg(sgt, sg, i) {
893 unsigned int len = sg_dma_len(sg);
894
895 if (!len)
896 break;
897 if (sg_dma_address(sg) != expected)
898 break;
899 expected += len;
900 size += len;
901 }
902 return size;
903}
904EXPORT_SYMBOL(drm_prime_get_contiguous_size);
905
906/**
907 * drm_gem_prime_export - helper library implementation of the export callback
908 * @obj: GEM object to export
909 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
910 *
911 * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
912 * using the PRIME helpers. It is used as the default in
913 * drm_gem_prime_handle_to_fd().
914 */
915struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
916 int flags)
917{
918 struct drm_device *dev = obj->dev;
919 struct dma_buf_export_info exp_info = {
920 .exp_name = KBUILD_MODNAME, /* white lie for debug */
921 .owner = dev->driver->fops->owner,
922 .ops = &drm_gem_prime_dmabuf_ops,
923 .size = obj->size,
924 .flags = flags,
925 .priv = obj,
926 .resv = obj->resv,
927 };
928
929 return drm_gem_dmabuf_export(dev, &exp_info);
930}
931EXPORT_SYMBOL(drm_gem_prime_export);
932
933
934/**
935 * drm_gem_is_prime_exported_dma_buf -
936 * checks if the DMA-BUF was exported from a GEM object belonging to @dev.
937 * @dev: drm_device to check against
938 * @dma_buf: dma-buf object to import
939 *
940 * Return: true if the DMA-BUF was exported from a GEM object belonging
941 * to @dev, false otherwise.
942 */
943
944bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev,
945 struct dma_buf *dma_buf)
946{
947 struct drm_gem_object *obj = dma_buf->priv;
948
949 return (dma_buf->ops == &drm_gem_prime_dmabuf_ops) && (obj->dev == dev);
950}
951EXPORT_SYMBOL(drm_gem_is_prime_exported_dma_buf);
952
953/**
954 * drm_gem_prime_import_dev - core implementation of the import callback
955 * @dev: drm_device to import into
956 * @dma_buf: dma-buf object to import
957 * @attach_dev: struct device to dma_buf attach
958 *
959 * This is the core of drm_gem_prime_import(). It's designed to be called by
960 * drivers who want to use a different device structure than &drm_device.dev for
961 * attaching via dma_buf. This function calls
962 * &drm_driver.gem_prime_import_sg_table internally.
963 *
964 * Drivers must arrange to call drm_prime_gem_destroy() from their
965 * &drm_gem_object_funcs.free hook when using this function.
966 */
967struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
968 struct dma_buf *dma_buf,
969 struct device *attach_dev)
970{
971 struct dma_buf_attachment *attach;
972 struct sg_table *sgt;
973 struct drm_gem_object *obj;
974 int ret;
975
976 if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
977 /*
978 * Importing dmabuf exported from our own gem increases
979 * refcount on gem itself instead of f_count of dmabuf.
980 */
981 obj = dma_buf->priv;
982 drm_gem_object_get(obj);
983 return obj;
984 }
985
986 if (!dev->driver->gem_prime_import_sg_table)
987 return ERR_PTR(error: -EINVAL);
988
989 attach = dma_buf_attach(dmabuf: dma_buf, dev: attach_dev);
990 if (IS_ERR(ptr: attach))
991 return ERR_CAST(ptr: attach);
992
993 get_dma_buf(dmabuf: dma_buf);
994
995 sgt = dma_buf_map_attachment_unlocked(attach, direction: DMA_BIDIRECTIONAL);
996 if (IS_ERR(ptr: sgt)) {
997 ret = PTR_ERR(ptr: sgt);
998 goto fail_detach;
999 }
1000
1001 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
1002 if (IS_ERR(ptr: obj)) {
1003 ret = PTR_ERR(ptr: obj);
1004 goto fail_unmap;
1005 }
1006
1007 obj->import_attach = attach;
1008 obj->resv = dma_buf->resv;
1009
1010 return obj;
1011
1012fail_unmap:
1013 dma_buf_unmap_attachment_unlocked(attach, sg_table: sgt, direction: DMA_BIDIRECTIONAL);
1014fail_detach:
1015 dma_buf_detach(dmabuf: dma_buf, attach);
1016 dma_buf_put(dmabuf: dma_buf);
1017
1018 return ERR_PTR(error: ret);
1019}
1020EXPORT_SYMBOL(drm_gem_prime_import_dev);
1021
1022/**
1023 * drm_gem_prime_import - helper library implementation of the import callback
1024 * @dev: drm_device to import into
1025 * @dma_buf: dma-buf object to import
1026 *
1027 * This is the implementation of the gem_prime_import functions for GEM drivers
1028 * using the PRIME helpers. Drivers can use this as their
1029 * &drm_driver.gem_prime_import implementation. It is used as the default
1030 * implementation in drm_gem_prime_fd_to_handle().
1031 *
1032 * Drivers must arrange to call drm_prime_gem_destroy() from their
1033 * &drm_gem_object_funcs.free hook when using this function.
1034 */
1035struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
1036 struct dma_buf *dma_buf)
1037{
1038 return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev));
1039}
1040EXPORT_SYMBOL(drm_gem_prime_import);
1041
1042/**
1043 * drm_prime_sg_to_page_array - convert an sg table into a page array
1044 * @sgt: scatter-gather table to convert
1045 * @pages: array of page pointers to store the pages in
1046 * @max_entries: size of the passed-in array
1047 *
1048 * Exports an sg table into an array of pages.
1049 *
1050 * This function is deprecated and strongly discouraged to be used.
1051 * The page array is only useful for page faults and those can corrupt fields
1052 * in the struct page if they are not handled by the exporting driver.
1053 */
1054int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
1055 struct page **pages,
1056 int max_entries)
1057{
1058 struct sg_page_iter page_iter;
1059 struct page **p = pages;
1060
1061 for_each_sgtable_page(sgt, &page_iter, 0) {
1062 if (WARN_ON(p - pages >= max_entries))
1063 return -1;
1064 *p++ = sg_page_iter_page(piter: &page_iter);
1065 }
1066 return 0;
1067}
1068EXPORT_SYMBOL(drm_prime_sg_to_page_array);
1069
1070/**
1071 * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
1072 * @sgt: scatter-gather table to convert
1073 * @addrs: array to store the dma bus address of each page
1074 * @max_entries: size of both the passed-in arrays
1075 *
1076 * Exports an sg table into an array of addresses.
1077 *
1078 * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
1079 * implementation.
1080 */
1081int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
1082 int max_entries)
1083{
1084 struct sg_dma_page_iter dma_iter;
1085 dma_addr_t *a = addrs;
1086
1087 for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
1088 if (WARN_ON(a - addrs >= max_entries))
1089 return -1;
1090 *a++ = sg_page_iter_dma_address(dma_iter: &dma_iter);
1091 }
1092 return 0;
1093}
1094EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
1095
1096/**
1097 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
1098 * @obj: GEM object which was created from a dma-buf
1099 * @sg: the sg-table which was pinned at import time
1100 *
1101 * This is the cleanup functions which GEM drivers need to call when they use
1102 * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
1103 */
1104void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1105{
1106 struct dma_buf_attachment *attach;
1107 struct dma_buf *dma_buf;
1108
1109 attach = obj->import_attach;
1110 if (sg)
1111 dma_buf_unmap_attachment_unlocked(attach, sg_table: sg, direction: DMA_BIDIRECTIONAL);
1112 dma_buf = attach->dmabuf;
1113 dma_buf_detach(dmabuf: attach->dmabuf, attach);
1114 /* remove the reference */
1115 dma_buf_put(dmabuf: dma_buf);
1116}
1117EXPORT_SYMBOL(drm_prime_gem_destroy);
1118