| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ | 
|---|
| 2 | /* | 
|---|
| 3 | * Header file for dma buffer sharing framework. | 
|---|
| 4 | * | 
|---|
| 5 | * Copyright(C) 2011 Linaro Limited. All rights reserved. | 
|---|
| 6 | * Author: Sumit Semwal <sumit.semwal@ti.com> | 
|---|
| 7 | * | 
|---|
| 8 | * Many thanks to linaro-mm-sig list, and specially | 
|---|
| 9 | * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and | 
|---|
| 10 | * Daniel Vetter <daniel@ffwll.ch> for their support in creation and | 
|---|
| 11 | * refining of this idea. | 
|---|
| 12 | */ | 
|---|
| 13 | #ifndef __DMA_BUF_H__ | 
|---|
| 14 | #define __DMA_BUF_H__ | 
|---|
| 15 |  | 
|---|
| 16 | #include <linux/iosys-map.h> | 
|---|
| 17 | #include <linux/file.h> | 
|---|
| 18 | #include <linux/err.h> | 
|---|
| 19 | #include <linux/scatterlist.h> | 
|---|
| 20 | #include <linux/list.h> | 
|---|
| 21 | #include <linux/dma-mapping.h> | 
|---|
| 22 | #include <linux/fs.h> | 
|---|
| 23 | #include <linux/dma-fence.h> | 
|---|
| 24 | #include <linux/wait.h> | 
|---|
| 25 |  | 
|---|
| 26 | struct device; | 
|---|
| 27 | struct dma_buf; | 
|---|
| 28 | struct dma_buf_attachment; | 
|---|
| 29 |  | 
|---|
| 30 | /** | 
|---|
| 31 | * struct dma_buf_ops - operations possible on struct dma_buf | 
|---|
| 32 | * @vmap: [optional] creates a virtual mapping for the buffer into kernel | 
|---|
| 33 | *	  address space. Same restrictions as for vmap and friends apply. | 
|---|
| 34 | * @vunmap: [optional] unmaps a vmap from the buffer | 
|---|
| 35 | */ | 
|---|
| 36 | struct dma_buf_ops { | 
|---|
| 37 | /** | 
|---|
| 38 | * @attach: | 
|---|
| 39 | * | 
|---|
| 40 | * This is called from dma_buf_attach() to make sure that a given | 
|---|
| 41 | * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters | 
|---|
| 42 | * which support buffer objects in special locations like VRAM or | 
|---|
| 43 | * device-specific carveout areas should check whether the buffer could | 
|---|
| 44 | * be move to system memory (or directly accessed by the provided | 
|---|
| 45 | * device), and otherwise need to fail the attach operation. | 
|---|
| 46 | * | 
|---|
| 47 | * The exporter should also in general check whether the current | 
|---|
| 48 | * allocation fulfills the DMA constraints of the new device. If this | 
|---|
| 49 | * is not the case, and the allocation cannot be moved, it should also | 
|---|
| 50 | * fail the attach operation. | 
|---|
| 51 | * | 
|---|
| 52 | * Any exporter-private housekeeping data can be stored in the | 
|---|
| 53 | * &dma_buf_attachment.priv pointer. | 
|---|
| 54 | * | 
|---|
| 55 | * This callback is optional. | 
|---|
| 56 | * | 
|---|
| 57 | * Returns: | 
|---|
| 58 | * | 
|---|
| 59 | * 0 on success, negative error code on failure. It might return -EBUSY | 
|---|
| 60 | * to signal that backing storage is already allocated and incompatible | 
|---|
| 61 | * with the requirements of requesting device. | 
|---|
| 62 | */ | 
|---|
| 63 | int (*attach)(struct dma_buf *, struct dma_buf_attachment *); | 
|---|
| 64 |  | 
|---|
| 65 | /** | 
|---|
| 66 | * @detach: | 
|---|
| 67 | * | 
|---|
| 68 | * This is called by dma_buf_detach() to release a &dma_buf_attachment. | 
|---|
| 69 | * Provided so that exporters can clean up any housekeeping for an | 
|---|
| 70 | * &dma_buf_attachment. | 
|---|
| 71 | * | 
|---|
| 72 | * This callback is optional. | 
|---|
| 73 | */ | 
|---|
| 74 | void (*detach)(struct dma_buf *, struct dma_buf_attachment *); | 
|---|
| 75 |  | 
|---|
| 76 | /** | 
|---|
| 77 | * @pin: | 
|---|
| 78 | * | 
|---|
| 79 | * This is called by dma_buf_pin() and lets the exporter know that the | 
|---|
| 80 | * DMA-buf can't be moved any more. Ideally, the exporter should | 
|---|
| 81 | * pin the buffer so that it is generally accessible by all | 
|---|
| 82 | * devices. | 
|---|
| 83 | * | 
|---|
| 84 | * This is called with the &dmabuf.resv object locked and is mutual | 
|---|
| 85 | * exclusive with @cache_sgt_mapping. | 
|---|
| 86 | * | 
|---|
| 87 | * This is called automatically for non-dynamic importers from | 
|---|
| 88 | * dma_buf_attach(). | 
|---|
| 89 | * | 
|---|
| 90 | * Note that similar to non-dynamic exporters in their @map_dma_buf | 
|---|
| 91 | * callback the driver must guarantee that the memory is available for | 
|---|
| 92 | * use and cleared of any old data by the time this function returns. | 
|---|
| 93 | * Drivers which pipeline their buffer moves internally must wait for | 
|---|
| 94 | * all moves and clears to complete. | 
|---|
| 95 | * | 
|---|
| 96 | * Returns: | 
|---|
| 97 | * | 
|---|
| 98 | * 0 on success, negative error code on failure. | 
|---|
| 99 | */ | 
|---|
| 100 | int (*pin)(struct dma_buf_attachment *attach); | 
|---|
| 101 |  | 
|---|
| 102 | /** | 
|---|
| 103 | * @unpin: | 
|---|
| 104 | * | 
|---|
| 105 | * This is called by dma_buf_unpin() and lets the exporter know that the | 
|---|
| 106 | * DMA-buf can be moved again. | 
|---|
| 107 | * | 
|---|
| 108 | * This is called with the dmabuf->resv object locked and is mutual | 
|---|
| 109 | * exclusive with @cache_sgt_mapping. | 
|---|
| 110 | * | 
|---|
| 111 | * This callback is optional. | 
|---|
| 112 | */ | 
|---|
| 113 | void (*unpin)(struct dma_buf_attachment *attach); | 
|---|
| 114 |  | 
|---|
| 115 | /** | 
|---|
| 116 | * @map_dma_buf: | 
|---|
| 117 | * | 
|---|
| 118 | * This is called by dma_buf_map_attachment() and is used to map a | 
|---|
| 119 | * shared &dma_buf into device address space, and it is mandatory. It | 
|---|
| 120 | * can only be called if @attach has been called successfully. | 
|---|
| 121 | * | 
|---|
| 122 | * This call may sleep, e.g. when the backing storage first needs to be | 
|---|
| 123 | * allocated, or moved to a location suitable for all currently attached | 
|---|
| 124 | * devices. | 
|---|
| 125 | * | 
|---|
| 126 | * Note that any specific buffer attributes required for this function | 
|---|
| 127 | * should get added to device_dma_parameters accessible via | 
|---|
| 128 | * &device.dma_params from the &dma_buf_attachment. The @attach callback | 
|---|
| 129 | * should also check these constraints. | 
|---|
| 130 | * | 
|---|
| 131 | * If this is being called for the first time, the exporter can now | 
|---|
| 132 | * choose to scan through the list of attachments for this buffer, | 
|---|
| 133 | * collate the requirements of the attached devices, and choose an | 
|---|
| 134 | * appropriate backing storage for the buffer. | 
|---|
| 135 | * | 
|---|
| 136 | * Based on enum dma_data_direction, it might be possible to have | 
|---|
| 137 | * multiple users accessing at the same time (for reading, maybe), or | 
|---|
| 138 | * any other kind of sharing that the exporter might wish to make | 
|---|
| 139 | * available to buffer-users. | 
|---|
| 140 | * | 
|---|
| 141 | * This is always called with the dmabuf->resv object locked when | 
|---|
| 142 | * the dynamic_mapping flag is true. | 
|---|
| 143 | * | 
|---|
| 144 | * Note that for non-dynamic exporters the driver must guarantee that | 
|---|
| 145 | * that the memory is available for use and cleared of any old data by | 
|---|
| 146 | * the time this function returns.  Drivers which pipeline their buffer | 
|---|
| 147 | * moves internally must wait for all moves and clears to complete. | 
|---|
| 148 | * Dynamic exporters do not need to follow this rule: For non-dynamic | 
|---|
| 149 | * importers the buffer is already pinned through @pin, which has the | 
|---|
| 150 | * same requirements. Dynamic importers otoh are required to obey the | 
|---|
| 151 | * dma_resv fences. | 
|---|
| 152 | * | 
|---|
| 153 | * Returns: | 
|---|
| 154 | * | 
|---|
| 155 | * A &sg_table scatter list of the backing storage of the DMA buffer, | 
|---|
| 156 | * already mapped into the device address space of the &device attached | 
|---|
| 157 | * with the provided &dma_buf_attachment. The addresses and lengths in | 
|---|
| 158 | * the scatter list are PAGE_SIZE aligned. | 
|---|
| 159 | * | 
|---|
| 160 | * On failure, returns a negative error value wrapped into a pointer. | 
|---|
| 161 | * May also return -EINTR when a signal was received while being | 
|---|
| 162 | * blocked. | 
|---|
| 163 | * | 
|---|
| 164 | * Note that exporters should not try to cache the scatter list, or | 
|---|
| 165 | * return the same one for multiple calls. Caching is done either by the | 
|---|
| 166 | * DMA-BUF code (for non-dynamic importers) or the importer. Ownership | 
|---|
| 167 | * of the scatter list is transferred to the caller, and returned by | 
|---|
| 168 | * @unmap_dma_buf. | 
|---|
| 169 | */ | 
|---|
| 170 | struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, | 
|---|
| 171 | enum dma_data_direction); | 
|---|
| 172 | /** | 
|---|
| 173 | * @unmap_dma_buf: | 
|---|
| 174 | * | 
|---|
| 175 | * This is called by dma_buf_unmap_attachment() and should unmap and | 
|---|
| 176 | * release the &sg_table allocated in @map_dma_buf, and it is mandatory. | 
|---|
| 177 | * For static dma_buf handling this might also unpin the backing | 
|---|
| 178 | * storage if this is the last mapping of the DMA buffer. | 
|---|
| 179 | */ | 
|---|
| 180 | void (*unmap_dma_buf)(struct dma_buf_attachment *, | 
|---|
| 181 | struct sg_table *, | 
|---|
| 182 | enum dma_data_direction); | 
|---|
| 183 |  | 
|---|
| 184 | /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY | 
|---|
| 185 | * if the call would block. | 
|---|
| 186 | */ | 
|---|
| 187 |  | 
|---|
| 188 | /** | 
|---|
| 189 | * @release: | 
|---|
| 190 | * | 
|---|
| 191 | * Called after the last dma_buf_put to release the &dma_buf, and | 
|---|
| 192 | * mandatory. | 
|---|
| 193 | */ | 
|---|
| 194 | void (*release)(struct dma_buf *); | 
|---|
| 195 |  | 
|---|
| 196 | /** | 
|---|
| 197 | * @begin_cpu_access: | 
|---|
| 198 | * | 
|---|
| 199 | * This is called from dma_buf_begin_cpu_access() and allows the | 
|---|
| 200 | * exporter to ensure that the memory is actually coherent for cpu | 
|---|
| 201 | * access. The exporter also needs to ensure that cpu access is coherent | 
|---|
| 202 | * for the access direction. The direction can be used by the exporter | 
|---|
| 203 | * to optimize the cache flushing, i.e. access with a different | 
|---|
| 204 | * direction (read instead of write) might return stale or even bogus | 
|---|
| 205 | * data (e.g. when the exporter needs to copy the data to temporary | 
|---|
| 206 | * storage). | 
|---|
| 207 | * | 
|---|
| 208 | * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL | 
|---|
| 209 | * command for userspace mappings established through @mmap, and also | 
|---|
| 210 | * for kernel mappings established with @vmap. | 
|---|
| 211 | * | 
|---|
| 212 | * This callback is optional. | 
|---|
| 213 | * | 
|---|
| 214 | * Returns: | 
|---|
| 215 | * | 
|---|
| 216 | * 0 on success or a negative error code on failure. This can for | 
|---|
| 217 | * example fail when the backing storage can't be allocated. Can also | 
|---|
| 218 | * return -ERESTARTSYS or -EINTR when the call has been interrupted and | 
|---|
| 219 | * needs to be restarted. | 
|---|
| 220 | */ | 
|---|
| 221 | int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); | 
|---|
| 222 |  | 
|---|
| 223 | /** | 
|---|
| 224 | * @end_cpu_access: | 
|---|
| 225 | * | 
|---|
| 226 | * This is called from dma_buf_end_cpu_access() when the importer is | 
|---|
| 227 | * done accessing the CPU. The exporter can use this to flush caches and | 
|---|
| 228 | * undo anything else done in @begin_cpu_access. | 
|---|
| 229 | * | 
|---|
| 230 | * This callback is optional. | 
|---|
| 231 | * | 
|---|
| 232 | * Returns: | 
|---|
| 233 | * | 
|---|
| 234 | * 0 on success or a negative error code on failure. Can return | 
|---|
| 235 | * -ERESTARTSYS or -EINTR when the call has been interrupted and needs | 
|---|
| 236 | * to be restarted. | 
|---|
| 237 | */ | 
|---|
| 238 | int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); | 
|---|
| 239 |  | 
|---|
| 240 | /** | 
|---|
| 241 | * @mmap: | 
|---|
| 242 | * | 
|---|
| 243 | * This callback is used by the dma_buf_mmap() function | 
|---|
| 244 | * | 
|---|
| 245 | * Note that the mapping needs to be incoherent, userspace is expected | 
|---|
| 246 | * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface. | 
|---|
| 247 | * | 
|---|
| 248 | * Because dma-buf buffers have invariant size over their lifetime, the | 
|---|
| 249 | * dma-buf core checks whether a vma is too large and rejects such | 
|---|
| 250 | * mappings. The exporter hence does not need to duplicate this check. | 
|---|
| 251 | * Drivers do not need to check this themselves. | 
|---|
| 252 | * | 
|---|
| 253 | * If an exporter needs to manually flush caches and hence needs to fake | 
|---|
| 254 | * coherency for mmap support, it needs to be able to zap all the ptes | 
|---|
| 255 | * pointing at the backing storage. Now linux mm needs a struct | 
|---|
| 256 | * address_space associated with the struct file stored in vma->vm_file | 
|---|
| 257 | * to do that with the function unmap_mapping_range. But the dma_buf | 
|---|
| 258 | * framework only backs every dma_buf fd with the anon_file struct file, | 
|---|
| 259 | * i.e. all dma_bufs share the same file. | 
|---|
| 260 | * | 
|---|
| 261 | * Hence exporters need to setup their own file (and address_space) | 
|---|
| 262 | * association by setting vma->vm_file and adjusting vma->vm_pgoff in | 
|---|
| 263 | * the dma_buf mmap callback. In the specific case of a gem driver the | 
|---|
| 264 | * exporter could use the shmem file already provided by gem (and set | 
|---|
| 265 | * vm_pgoff = 0). Exporters can then zap ptes by unmapping the | 
|---|
| 266 | * corresponding range of the struct address_space associated with their | 
|---|
| 267 | * own file. | 
|---|
| 268 | * | 
|---|
| 269 | * This callback is optional. | 
|---|
| 270 | * | 
|---|
| 271 | * Returns: | 
|---|
| 272 | * | 
|---|
| 273 | * 0 on success or a negative error code on failure. | 
|---|
| 274 | */ | 
|---|
| 275 | int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); | 
|---|
| 276 |  | 
|---|
| 277 | int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map); | 
|---|
| 278 | void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map); | 
|---|
| 279 | }; | 
|---|
| 280 |  | 
|---|
| 281 | /** | 
|---|
| 282 | * struct dma_buf - shared buffer object | 
|---|
| 283 | * | 
|---|
| 284 | * This represents a shared buffer, created by calling dma_buf_export(). The | 
|---|
| 285 | * userspace representation is a normal file descriptor, which can be created by | 
|---|
| 286 | * calling dma_buf_fd(). | 
|---|
| 287 | * | 
|---|
| 288 | * Shared dma buffers are reference counted using dma_buf_put() and | 
|---|
| 289 | * get_dma_buf(). | 
|---|
| 290 | * | 
|---|
| 291 | * Device DMA access is handled by the separate &struct dma_buf_attachment. | 
|---|
| 292 | */ | 
|---|
| 293 | struct dma_buf { | 
|---|
| 294 | /** | 
|---|
| 295 | * @size: | 
|---|
| 296 | * | 
|---|
| 297 | * Size of the buffer; invariant over the lifetime of the buffer. | 
|---|
| 298 | */ | 
|---|
| 299 | size_t size; | 
|---|
| 300 |  | 
|---|
| 301 | /** | 
|---|
| 302 | * @file: | 
|---|
| 303 | * | 
|---|
| 304 | * File pointer used for sharing buffers across, and for refcounting. | 
|---|
| 305 | * See dma_buf_get() and dma_buf_put(). | 
|---|
| 306 | */ | 
|---|
| 307 | struct file *file; | 
|---|
| 308 |  | 
|---|
| 309 | /** | 
|---|
| 310 | * @attachments: | 
|---|
| 311 | * | 
|---|
| 312 | * List of dma_buf_attachment that denotes all devices attached, | 
|---|
| 313 | * protected by &dma_resv lock @resv. | 
|---|
| 314 | */ | 
|---|
| 315 | struct list_head attachments; | 
|---|
| 316 |  | 
|---|
| 317 | /** @ops: dma_buf_ops associated with this buffer object. */ | 
|---|
| 318 | const struct dma_buf_ops *ops; | 
|---|
| 319 |  | 
|---|
| 320 | /** | 
|---|
| 321 | * @vmapping_counter: | 
|---|
| 322 | * | 
|---|
| 323 | * Used internally to refcnt the vmaps returned by dma_buf_vmap(). | 
|---|
| 324 | * Protected by @lock. | 
|---|
| 325 | */ | 
|---|
| 326 | unsigned vmapping_counter; | 
|---|
| 327 |  | 
|---|
| 328 | /** | 
|---|
| 329 | * @vmap_ptr: | 
|---|
| 330 | * The current vmap ptr if @vmapping_counter > 0. Protected by @lock. | 
|---|
| 331 | */ | 
|---|
| 332 | struct iosys_map vmap_ptr; | 
|---|
| 333 |  | 
|---|
| 334 | /** | 
|---|
| 335 | * @exp_name: | 
|---|
| 336 | * | 
|---|
| 337 | * Name of the exporter; useful for debugging. Must not be NULL | 
|---|
| 338 | */ | 
|---|
| 339 | const char *exp_name; | 
|---|
| 340 |  | 
|---|
| 341 | /** | 
|---|
| 342 | * @name: | 
|---|
| 343 | * | 
|---|
| 344 | * Userspace-provided name. Default value is NULL. If not NULL, | 
|---|
| 345 | * length cannot be longer than DMA_BUF_NAME_LEN, including NIL | 
|---|
| 346 | * char. Useful for accounting and debugging. Read/Write accesses | 
|---|
| 347 | * are protected by @name_lock | 
|---|
| 348 | * | 
|---|
| 349 | * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B | 
|---|
| 350 | */ | 
|---|
| 351 | const char *name; | 
|---|
| 352 |  | 
|---|
| 353 | /** @name_lock: Spinlock to protect name access for read access. */ | 
|---|
| 354 | spinlock_t name_lock; | 
|---|
| 355 |  | 
|---|
| 356 | /** | 
|---|
| 357 | * @owner: | 
|---|
| 358 | * | 
|---|
| 359 | * Pointer to exporter module; used for refcounting when exporter is a | 
|---|
| 360 | * kernel module. | 
|---|
| 361 | */ | 
|---|
| 362 | struct module *owner; | 
|---|
| 363 |  | 
|---|
| 364 | /** @list_node: node for dma_buf accounting and debugging. */ | 
|---|
| 365 | struct list_head list_node; | 
|---|
| 366 |  | 
|---|
| 367 | /** @priv: exporter specific private data for this buffer object. */ | 
|---|
| 368 | void *priv; | 
|---|
| 369 |  | 
|---|
| 370 | /** | 
|---|
| 371 | * @resv: | 
|---|
| 372 | * | 
|---|
| 373 | * Reservation object linked to this dma-buf. | 
|---|
| 374 | * | 
|---|
| 375 | * IMPLICIT SYNCHRONIZATION RULES: | 
|---|
| 376 | * | 
|---|
| 377 | * Drivers which support implicit synchronization of buffer access as | 
|---|
| 378 | * e.g. exposed in `Implicit Fence Poll Support`_ must follow the | 
|---|
| 379 | * below rules. | 
|---|
| 380 | * | 
|---|
| 381 | * - Drivers must add a read fence through dma_resv_add_fence() with the | 
|---|
| 382 | *   DMA_RESV_USAGE_READ flag for anything the userspace API considers a | 
|---|
| 383 | *   read access. This highly depends upon the API and window system. | 
|---|
| 384 | * | 
|---|
| 385 | * - Similarly drivers must add a write fence through | 
|---|
| 386 | *   dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for | 
|---|
| 387 | *   anything the userspace API considers write access. | 
|---|
| 388 | * | 
|---|
| 389 | * - Drivers may just always add a write fence, since that only | 
|---|
| 390 | *   causes unnecessary synchronization, but no correctness issues. | 
|---|
| 391 | * | 
|---|
| 392 | * - Some drivers only expose a synchronous userspace API with no | 
|---|
| 393 | *   pipelining across drivers. These do not set any fences for their | 
|---|
| 394 | *   access. An example here is v4l. | 
|---|
| 395 | * | 
|---|
| 396 | * - Driver should use dma_resv_usage_rw() when retrieving fences as | 
|---|
| 397 | *   dependency for implicit synchronization. | 
|---|
| 398 | * | 
|---|
| 399 | * DYNAMIC IMPORTER RULES: | 
|---|
| 400 | * | 
|---|
| 401 | * Dynamic importers, see dma_buf_attachment_is_dynamic(), have | 
|---|
| 402 | * additional constraints on how they set up fences: | 
|---|
| 403 | * | 
|---|
| 404 | * - Dynamic importers must obey the write fences and wait for them to | 
|---|
| 405 | *   signal before allowing access to the buffer's underlying storage | 
|---|
| 406 | *   through the device. | 
|---|
| 407 | * | 
|---|
| 408 | * - Dynamic importers should set fences for any access that they can't | 
|---|
| 409 | *   disable immediately from their &dma_buf_attach_ops.move_notify | 
|---|
| 410 | *   callback. | 
|---|
| 411 | * | 
|---|
| 412 | * IMPORTANT: | 
|---|
| 413 | * | 
|---|
| 414 | * All drivers and memory management related functions must obey the | 
|---|
| 415 | * struct dma_resv rules, specifically the rules for updating and | 
|---|
| 416 | * obeying fences. See enum dma_resv_usage for further descriptions. | 
|---|
| 417 | */ | 
|---|
| 418 | struct dma_resv *resv; | 
|---|
| 419 |  | 
|---|
| 420 | /** @poll: for userspace poll support */ | 
|---|
| 421 | wait_queue_head_t poll; | 
|---|
| 422 |  | 
|---|
| 423 | /** @cb_in: for userspace poll support */ | 
|---|
| 424 | /** @cb_out: for userspace poll support */ | 
|---|
| 425 | struct dma_buf_poll_cb_t { | 
|---|
| 426 | struct dma_fence_cb cb; | 
|---|
| 427 | wait_queue_head_t *poll; | 
|---|
| 428 |  | 
|---|
| 429 | __poll_t active; | 
|---|
| 430 | } cb_in, cb_out; | 
|---|
| 431 | #ifdef CONFIG_DMABUF_SYSFS_STATS | 
|---|
| 432 | /** | 
|---|
| 433 | * @sysfs_entry: | 
|---|
| 434 | * | 
|---|
| 435 | * For exposing information about this buffer in sysfs. See also | 
|---|
| 436 | * `DMA-BUF statistics`_ for the uapi this enables. | 
|---|
| 437 | */ | 
|---|
| 438 | struct dma_buf_sysfs_entry { | 
|---|
| 439 | struct kobject kobj; | 
|---|
| 440 | struct dma_buf *dmabuf; | 
|---|
| 441 | } *sysfs_entry; | 
|---|
| 442 | #endif | 
|---|
| 443 | }; | 
|---|
| 444 |  | 
|---|
| 445 | /** | 
|---|
| 446 | * struct dma_buf_attach_ops - importer operations for an attachment | 
|---|
| 447 | * | 
|---|
| 448 | * Attachment operations implemented by the importer. | 
|---|
| 449 | */ | 
|---|
| 450 | struct dma_buf_attach_ops { | 
|---|
| 451 | /** | 
|---|
| 452 | * @allow_peer2peer: | 
|---|
| 453 | * | 
|---|
| 454 | * If this is set to true the importer must be able to handle peer | 
|---|
| 455 | * resources without struct pages. | 
|---|
| 456 | */ | 
|---|
| 457 | bool allow_peer2peer; | 
|---|
| 458 |  | 
|---|
| 459 | /** | 
|---|
| 460 | * @move_notify: [optional] notification that the DMA-buf is moving | 
|---|
| 461 | * | 
|---|
| 462 | * If this callback is provided the framework can avoid pinning the | 
|---|
| 463 | * backing store while mappings exists. | 
|---|
| 464 | * | 
|---|
| 465 | * This callback is called with the lock of the reservation object | 
|---|
| 466 | * associated with the dma_buf held and the mapping function must be | 
|---|
| 467 | * called with this lock held as well. This makes sure that no mapping | 
|---|
| 468 | * is created concurrently with an ongoing move operation. | 
|---|
| 469 | * | 
|---|
| 470 | * Mappings stay valid and are not directly affected by this callback. | 
|---|
| 471 | * But the DMA-buf can now be in a different physical location, so all | 
|---|
| 472 | * mappings should be destroyed and re-created as soon as possible. | 
|---|
| 473 | * | 
|---|
| 474 | * New mappings can be created after this callback returns, and will | 
|---|
| 475 | * point to the new location of the DMA-buf. | 
|---|
| 476 | */ | 
|---|
| 477 | void (*move_notify)(struct dma_buf_attachment *attach); | 
|---|
| 478 | }; | 
|---|
| 479 |  | 
|---|
| 480 | /** | 
|---|
| 481 | * struct dma_buf_attachment - holds device-buffer attachment data | 
|---|
| 482 | * @dmabuf: buffer for this attachment. | 
|---|
| 483 | * @dev: device attached to the buffer. | 
|---|
| 484 | * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf. | 
|---|
| 485 | * @peer2peer: true if the importer can handle peer resources without pages. | 
|---|
| 486 | * @priv: exporter specific attachment data. | 
|---|
| 487 | * @importer_ops: importer operations for this attachment, if provided | 
|---|
| 488 | * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held. | 
|---|
| 489 | * @importer_priv: importer specific attachment data. | 
|---|
| 490 | * | 
|---|
| 491 | * This structure holds the attachment information between the dma_buf buffer | 
|---|
| 492 | * and its user device(s). The list contains one attachment struct per device | 
|---|
| 493 | * attached to the buffer. | 
|---|
| 494 | * | 
|---|
| 495 | * An attachment is created by calling dma_buf_attach(), and released again by | 
|---|
| 496 | * calling dma_buf_detach(). The DMA mapping itself needed to initiate a | 
|---|
| 497 | * transfer is created by dma_buf_map_attachment() and freed again by calling | 
|---|
| 498 | * dma_buf_unmap_attachment(). | 
|---|
| 499 | */ | 
|---|
| 500 | struct dma_buf_attachment { | 
|---|
| 501 | struct dma_buf *dmabuf; | 
|---|
| 502 | struct device *dev; | 
|---|
| 503 | struct list_head node; | 
|---|
| 504 | bool peer2peer; | 
|---|
| 505 | const struct dma_buf_attach_ops *importer_ops; | 
|---|
| 506 | void *importer_priv; | 
|---|
| 507 | void *priv; | 
|---|
| 508 | }; | 
|---|
| 509 |  | 
|---|
| 510 | /** | 
|---|
| 511 | * struct dma_buf_export_info - holds information needed to export a dma_buf | 
|---|
| 512 | * @exp_name:	name of the exporter - useful for debugging. | 
|---|
| 513 | * @owner:	pointer to exporter module - used for refcounting kernel module | 
|---|
| 514 | * @ops:	Attach allocator-defined dma buf ops to the new buffer | 
|---|
| 515 | * @size:	Size of the buffer - invariant over the lifetime of the buffer | 
|---|
| 516 | * @flags:	mode flags for the file | 
|---|
| 517 | * @resv:	reservation-object, NULL to allocate default one | 
|---|
| 518 | * @priv:	Attach private data of allocator to this buffer | 
|---|
| 519 | * | 
|---|
| 520 | * This structure holds the information required to export the buffer. Used | 
|---|
| 521 | * with dma_buf_export() only. | 
|---|
| 522 | */ | 
|---|
| 523 | struct dma_buf_export_info { | 
|---|
| 524 | const char *exp_name; | 
|---|
| 525 | struct module *owner; | 
|---|
| 526 | const struct dma_buf_ops *ops; | 
|---|
| 527 | size_t size; | 
|---|
| 528 | int flags; | 
|---|
| 529 | struct dma_resv *resv; | 
|---|
| 530 | void *priv; | 
|---|
| 531 | }; | 
|---|
| 532 |  | 
|---|
| 533 | /** | 
|---|
| 534 | * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters | 
|---|
| 535 | * @name: export-info name | 
|---|
| 536 | * | 
|---|
| 537 | * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, | 
|---|
| 538 | * zeroes it out and pre-populates exp_name in it. | 
|---|
| 539 | */ | 
|---|
| 540 | #define DEFINE_DMA_BUF_EXPORT_INFO(name)	\ | 
|---|
| 541 | struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ | 
|---|
| 542 | .owner = THIS_MODULE } | 
|---|
| 543 |  | 
|---|
| 544 | /** | 
|---|
| 545 | * get_dma_buf - convenience wrapper for get_file. | 
|---|
| 546 | * @dmabuf:	[in]	pointer to dma_buf | 
|---|
| 547 | * | 
|---|
| 548 | * Increments the reference count on the dma-buf, needed in case of drivers | 
|---|
| 549 | * that either need to create additional references to the dmabuf on the | 
|---|
| 550 | * kernel side.  For example, an exporter that needs to keep a dmabuf ptr | 
|---|
| 551 | * so that subsequent exports don't create a new dmabuf. | 
|---|
| 552 | */ | 
|---|
| 553 | static inline void get_dma_buf(struct dma_buf *dmabuf) | 
|---|
| 554 | { | 
|---|
| 555 | get_file(f: dmabuf->file); | 
|---|
| 556 | } | 
|---|
| 557 |  | 
|---|
| 558 | /** | 
|---|
| 559 | * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings. | 
|---|
| 560 | * @dmabuf: the DMA-buf to check | 
|---|
| 561 | * | 
|---|
| 562 | * Returns true if a DMA-buf exporter wants to be called with the dma_resv | 
|---|
| 563 | * locked for the map/unmap callbacks, false if it doesn't wants to be called | 
|---|
| 564 | * with the lock held. | 
|---|
| 565 | */ | 
|---|
| 566 | static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf) | 
|---|
| 567 | { | 
|---|
| 568 | return !!dmabuf->ops->pin; | 
|---|
| 569 | } | 
|---|
| 570 |  | 
|---|
| 571 | struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, | 
|---|
| 572 | struct device *dev); | 
|---|
| 573 | struct dma_buf_attachment * | 
|---|
| 574 | dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, | 
|---|
| 575 | const struct dma_buf_attach_ops *importer_ops, | 
|---|
| 576 | void *importer_priv); | 
|---|
| 577 | void dma_buf_detach(struct dma_buf *dmabuf, | 
|---|
| 578 | struct dma_buf_attachment *attach); | 
|---|
| 579 | int dma_buf_pin(struct dma_buf_attachment *attach); | 
|---|
| 580 | void dma_buf_unpin(struct dma_buf_attachment *attach); | 
|---|
| 581 |  | 
|---|
| 582 | struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); | 
|---|
| 583 |  | 
|---|
| 584 | int dma_buf_fd(struct dma_buf *dmabuf, int flags); | 
|---|
| 585 | struct dma_buf *dma_buf_get(int fd); | 
|---|
| 586 | void dma_buf_put(struct dma_buf *dmabuf); | 
|---|
| 587 |  | 
|---|
| 588 | struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, | 
|---|
| 589 | enum dma_data_direction); | 
|---|
| 590 | void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, | 
|---|
| 591 | enum dma_data_direction); | 
|---|
| 592 | void dma_buf_move_notify(struct dma_buf *dma_buf); | 
|---|
| 593 | int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, | 
|---|
| 594 | enum dma_data_direction dir); | 
|---|
| 595 | int dma_buf_end_cpu_access(struct dma_buf *dma_buf, | 
|---|
| 596 | enum dma_data_direction dir); | 
|---|
| 597 | struct sg_table * | 
|---|
| 598 | dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, | 
|---|
| 599 | enum dma_data_direction direction); | 
|---|
| 600 | void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, | 
|---|
| 601 | struct sg_table *sg_table, | 
|---|
| 602 | enum dma_data_direction direction); | 
|---|
| 603 |  | 
|---|
| 604 | int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, | 
|---|
| 605 | unsigned long); | 
|---|
| 606 | int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map); | 
|---|
| 607 | void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map); | 
|---|
| 608 | int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map); | 
|---|
| 609 | void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map); | 
|---|
| 610 | struct dma_buf *dma_buf_iter_begin(void); | 
|---|
| 611 | struct dma_buf *dma_buf_iter_next(struct dma_buf *dmbuf); | 
|---|
| 612 | #endif /* __DMA_BUF_H__ */ | 
|---|
| 613 |  | 
|---|