| 1 | // SPDX-License-Identifier: MIT | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright (C) 2015 Red Hat, Inc. | 
|---|
| 4 | * All Rights Reserved. | 
|---|
| 5 | * | 
|---|
| 6 | * Authors: | 
|---|
| 7 | *    Dave Airlie | 
|---|
| 8 | *    Alon Levy | 
|---|
| 9 | */ | 
|---|
| 10 |  | 
|---|
| 11 | #include <linux/dma-fence-unwrap.h> | 
|---|
| 12 | #include <linux/file.h> | 
|---|
| 13 | #include <linux/sync_file.h> | 
|---|
| 14 | #include <linux/uaccess.h> | 
|---|
| 15 |  | 
|---|
| 16 | #include <drm/drm_file.h> | 
|---|
| 17 | #include <drm/drm_syncobj.h> | 
|---|
| 18 | #include <drm/virtgpu_drm.h> | 
|---|
| 19 |  | 
|---|
| 20 | #include "virtgpu_drv.h" | 
|---|
| 21 |  | 
|---|
| 22 | struct virtio_gpu_submit_post_dep { | 
|---|
| 23 | struct drm_syncobj *syncobj; | 
|---|
| 24 | struct dma_fence_chain *chain; | 
|---|
| 25 | u64 point; | 
|---|
| 26 | }; | 
|---|
| 27 |  | 
|---|
| 28 | struct virtio_gpu_submit { | 
|---|
| 29 | struct virtio_gpu_submit_post_dep *post_deps; | 
|---|
| 30 | unsigned int num_out_syncobjs; | 
|---|
| 31 |  | 
|---|
| 32 | struct drm_syncobj **in_syncobjs; | 
|---|
| 33 | unsigned int num_in_syncobjs; | 
|---|
| 34 |  | 
|---|
| 35 | struct virtio_gpu_object_array *buflist; | 
|---|
| 36 | struct drm_virtgpu_execbuffer *exbuf; | 
|---|
| 37 | struct virtio_gpu_fence *out_fence; | 
|---|
| 38 | struct virtio_gpu_fpriv *vfpriv; | 
|---|
| 39 | struct virtio_gpu_device *vgdev; | 
|---|
| 40 | struct sync_file *sync_file; | 
|---|
| 41 | struct drm_file *file; | 
|---|
| 42 | int out_fence_fd; | 
|---|
| 43 | u64 fence_ctx; | 
|---|
| 44 | u32 ring_idx; | 
|---|
| 45 | void *buf; | 
|---|
| 46 | }; | 
|---|
| 47 |  | 
|---|
| 48 | static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, | 
|---|
| 49 | struct dma_fence *in_fence) | 
|---|
| 50 | { | 
|---|
| 51 | u64 context = submit->fence_ctx + submit->ring_idx; | 
|---|
| 52 |  | 
|---|
| 53 | if (dma_fence_match_context(fence: in_fence, context)) | 
|---|
| 54 | return 0; | 
|---|
| 55 |  | 
|---|
| 56 | return dma_fence_wait(fence: in_fence, intr: true); | 
|---|
| 57 | } | 
|---|
| 58 |  | 
|---|
| 59 | static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit, | 
|---|
| 60 | struct dma_fence *fence) | 
|---|
| 61 | { | 
|---|
| 62 | struct dma_fence_unwrap itr; | 
|---|
| 63 | struct dma_fence *f; | 
|---|
| 64 | int err; | 
|---|
| 65 |  | 
|---|
| 66 | dma_fence_unwrap_for_each(f, &itr, fence) { | 
|---|
| 67 | err = virtio_gpu_do_fence_wait(submit, in_fence: f); | 
|---|
| 68 | if (err) | 
|---|
| 69 | return err; | 
|---|
| 70 | } | 
|---|
| 71 |  | 
|---|
| 72 | return 0; | 
|---|
| 73 | } | 
|---|
| 74 |  | 
|---|
| 75 | static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs, | 
|---|
| 76 | u32 nr_syncobjs) | 
|---|
| 77 | { | 
|---|
| 78 | u32 i = nr_syncobjs; | 
|---|
| 79 |  | 
|---|
| 80 | while (i--) { | 
|---|
| 81 | if (syncobjs[i]) | 
|---|
| 82 | drm_syncobj_put(obj: syncobjs[i]); | 
|---|
| 83 | } | 
|---|
| 84 |  | 
|---|
| 85 | kvfree(addr: syncobjs); | 
|---|
| 86 | } | 
|---|
| 87 |  | 
|---|
| 88 | static int | 
|---|
| 89 | virtio_gpu_parse_deps(struct virtio_gpu_submit *submit) | 
|---|
| 90 | { | 
|---|
| 91 | struct drm_virtgpu_execbuffer *exbuf = submit->exbuf; | 
|---|
| 92 | struct drm_virtgpu_execbuffer_syncobj syncobj_desc; | 
|---|
| 93 | size_t syncobj_stride = exbuf->syncobj_stride; | 
|---|
| 94 | u32 num_in_syncobjs = exbuf->num_in_syncobjs; | 
|---|
| 95 | struct drm_syncobj **syncobjs; | 
|---|
| 96 | int ret = 0, i; | 
|---|
| 97 |  | 
|---|
| 98 | if (!num_in_syncobjs) | 
|---|
| 99 | return 0; | 
|---|
| 100 |  | 
|---|
| 101 | /* | 
|---|
| 102 | * kvmalloc() at first tries to allocate memory using kmalloc() and | 
|---|
| 103 | * falls back to vmalloc() only on failure. It also uses __GFP_NOWARN | 
|---|
| 104 | * internally for allocations larger than a page size, preventing | 
|---|
| 105 | * storm of KMSG warnings. | 
|---|
| 106 | */ | 
|---|
| 107 | syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL); | 
|---|
| 108 | if (!syncobjs) | 
|---|
| 109 | return -ENOMEM; | 
|---|
| 110 |  | 
|---|
| 111 | for (i = 0; i < num_in_syncobjs; i++) { | 
|---|
| 112 | u64 address = exbuf->in_syncobjs + i * syncobj_stride; | 
|---|
| 113 | struct dma_fence *fence; | 
|---|
| 114 |  | 
|---|
| 115 | memset(s: &syncobj_desc, c: 0, n: sizeof(syncobj_desc)); | 
|---|
| 116 |  | 
|---|
| 117 | if (copy_from_user(to: &syncobj_desc, | 
|---|
| 118 | u64_to_user_ptr(address), | 
|---|
| 119 | min(syncobj_stride, sizeof(syncobj_desc)))) { | 
|---|
| 120 | ret = -EFAULT; | 
|---|
| 121 | break; | 
|---|
| 122 | } | 
|---|
| 123 |  | 
|---|
| 124 | if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) { | 
|---|
| 125 | ret = -EINVAL; | 
|---|
| 126 | break; | 
|---|
| 127 | } | 
|---|
| 128 |  | 
|---|
| 129 | ret = drm_syncobj_find_fence(file_private: submit->file, handle: syncobj_desc.handle, | 
|---|
| 130 | point: syncobj_desc.point, flags: 0, fence: &fence); | 
|---|
| 131 | if (ret) | 
|---|
| 132 | break; | 
|---|
| 133 |  | 
|---|
| 134 | ret = virtio_gpu_dma_fence_wait(submit, fence); | 
|---|
| 135 |  | 
|---|
| 136 | dma_fence_put(fence); | 
|---|
| 137 | if (ret) | 
|---|
| 138 | break; | 
|---|
| 139 |  | 
|---|
| 140 | if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) { | 
|---|
| 141 | syncobjs[i] = drm_syncobj_find(file_private: submit->file, | 
|---|
| 142 | handle: syncobj_desc.handle); | 
|---|
| 143 | if (!syncobjs[i]) { | 
|---|
| 144 | ret = -EINVAL; | 
|---|
| 145 | break; | 
|---|
| 146 | } | 
|---|
| 147 | } | 
|---|
| 148 | } | 
|---|
| 149 |  | 
|---|
| 150 | if (ret) { | 
|---|
| 151 | virtio_gpu_free_syncobjs(syncobjs, nr_syncobjs: i); | 
|---|
| 152 | return ret; | 
|---|
| 153 | } | 
|---|
| 154 |  | 
|---|
| 155 | submit->num_in_syncobjs = num_in_syncobjs; | 
|---|
| 156 | submit->in_syncobjs = syncobjs; | 
|---|
| 157 |  | 
|---|
| 158 | return ret; | 
|---|
| 159 | } | 
|---|
| 160 |  | 
|---|
| 161 | static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs, | 
|---|
| 162 | u32 nr_syncobjs) | 
|---|
| 163 | { | 
|---|
| 164 | u32 i; | 
|---|
| 165 |  | 
|---|
| 166 | for (i = 0; i < nr_syncobjs; i++) { | 
|---|
| 167 | if (syncobjs[i]) | 
|---|
| 168 | drm_syncobj_replace_fence(syncobj: syncobjs[i], NULL); | 
|---|
| 169 | } | 
|---|
| 170 | } | 
|---|
| 171 |  | 
|---|
| 172 | static void | 
|---|
| 173 | virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps, | 
|---|
| 174 | u32 nr_syncobjs) | 
|---|
| 175 | { | 
|---|
| 176 | u32 i = nr_syncobjs; | 
|---|
| 177 |  | 
|---|
| 178 | while (i--) { | 
|---|
| 179 | kfree(objp: post_deps[i].chain); | 
|---|
| 180 | drm_syncobj_put(obj: post_deps[i].syncobj); | 
|---|
| 181 | } | 
|---|
| 182 |  | 
|---|
| 183 | kvfree(addr: post_deps); | 
|---|
| 184 | } | 
|---|
| 185 |  | 
|---|
| 186 | static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit) | 
|---|
| 187 | { | 
|---|
| 188 | struct drm_virtgpu_execbuffer *exbuf = submit->exbuf; | 
|---|
| 189 | struct drm_virtgpu_execbuffer_syncobj syncobj_desc; | 
|---|
| 190 | struct virtio_gpu_submit_post_dep *post_deps; | 
|---|
| 191 | u32 num_out_syncobjs = exbuf->num_out_syncobjs; | 
|---|
| 192 | size_t syncobj_stride = exbuf->syncobj_stride; | 
|---|
| 193 | int ret = 0, i; | 
|---|
| 194 |  | 
|---|
| 195 | if (!num_out_syncobjs) | 
|---|
| 196 | return 0; | 
|---|
| 197 |  | 
|---|
| 198 | post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL); | 
|---|
| 199 | if (!post_deps) | 
|---|
| 200 | return -ENOMEM; | 
|---|
| 201 |  | 
|---|
| 202 | for (i = 0; i < num_out_syncobjs; i++) { | 
|---|
| 203 | u64 address = exbuf->out_syncobjs + i * syncobj_stride; | 
|---|
| 204 |  | 
|---|
| 205 | memset(s: &syncobj_desc, c: 0, n: sizeof(syncobj_desc)); | 
|---|
| 206 |  | 
|---|
| 207 | if (copy_from_user(to: &syncobj_desc, | 
|---|
| 208 | u64_to_user_ptr(address), | 
|---|
| 209 | min(syncobj_stride, sizeof(syncobj_desc)))) { | 
|---|
| 210 | ret = -EFAULT; | 
|---|
| 211 | break; | 
|---|
| 212 | } | 
|---|
| 213 |  | 
|---|
| 214 | post_deps[i].point = syncobj_desc.point; | 
|---|
| 215 |  | 
|---|
| 216 | if (syncobj_desc.flags) { | 
|---|
| 217 | ret = -EINVAL; | 
|---|
| 218 | break; | 
|---|
| 219 | } | 
|---|
| 220 |  | 
|---|
| 221 | if (syncobj_desc.point) { | 
|---|
| 222 | post_deps[i].chain = dma_fence_chain_alloc(); | 
|---|
| 223 | if (!post_deps[i].chain) { | 
|---|
| 224 | ret = -ENOMEM; | 
|---|
| 225 | break; | 
|---|
| 226 | } | 
|---|
| 227 | } | 
|---|
| 228 |  | 
|---|
| 229 | post_deps[i].syncobj = drm_syncobj_find(file_private: submit->file, | 
|---|
| 230 | handle: syncobj_desc.handle); | 
|---|
| 231 | if (!post_deps[i].syncobj) { | 
|---|
| 232 | kfree(objp: post_deps[i].chain); | 
|---|
| 233 | ret = -EINVAL; | 
|---|
| 234 | break; | 
|---|
| 235 | } | 
|---|
| 236 | } | 
|---|
| 237 |  | 
|---|
| 238 | if (ret) { | 
|---|
| 239 | virtio_gpu_free_post_deps(post_deps, nr_syncobjs: i); | 
|---|
| 240 | return ret; | 
|---|
| 241 | } | 
|---|
| 242 |  | 
|---|
| 243 | submit->num_out_syncobjs = num_out_syncobjs; | 
|---|
| 244 | submit->post_deps = post_deps; | 
|---|
| 245 |  | 
|---|
| 246 | return 0; | 
|---|
| 247 | } | 
|---|
| 248 |  | 
|---|
| 249 | static void | 
|---|
| 250 | virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit) | 
|---|
| 251 | { | 
|---|
| 252 | struct virtio_gpu_submit_post_dep *post_deps = submit->post_deps; | 
|---|
| 253 |  | 
|---|
| 254 | if (post_deps) { | 
|---|
| 255 | struct dma_fence *fence = &submit->out_fence->f; | 
|---|
| 256 | u32 i; | 
|---|
| 257 |  | 
|---|
| 258 | for (i = 0; i < submit->num_out_syncobjs; i++) { | 
|---|
| 259 | if (post_deps[i].chain) { | 
|---|
| 260 | drm_syncobj_add_point(syncobj: post_deps[i].syncobj, | 
|---|
| 261 | chain: post_deps[i].chain, | 
|---|
| 262 | fence, point: post_deps[i].point); | 
|---|
| 263 | post_deps[i].chain = NULL; | 
|---|
| 264 | } else { | 
|---|
| 265 | drm_syncobj_replace_fence(syncobj: post_deps[i].syncobj, | 
|---|
| 266 | fence); | 
|---|
| 267 | } | 
|---|
| 268 | } | 
|---|
| 269 | } | 
|---|
| 270 | } | 
|---|
| 271 |  | 
|---|
| 272 | static int virtio_gpu_fence_event_create(struct drm_device *dev, | 
|---|
| 273 | struct drm_file *file, | 
|---|
| 274 | struct virtio_gpu_fence *fence, | 
|---|
| 275 | u32 ring_idx) | 
|---|
| 276 | { | 
|---|
| 277 | struct virtio_gpu_fence_event *e = NULL; | 
|---|
| 278 | int ret; | 
|---|
| 279 |  | 
|---|
| 280 | e = kzalloc(sizeof(*e), GFP_KERNEL); | 
|---|
| 281 | if (!e) | 
|---|
| 282 | return -ENOMEM; | 
|---|
| 283 |  | 
|---|
| 284 | e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED; | 
|---|
| 285 | e->event.length = sizeof(e->event); | 
|---|
| 286 |  | 
|---|
| 287 | ret = drm_event_reserve_init(dev, file_priv: file, p: &e->base, e: &e->event); | 
|---|
| 288 | if (ret) { | 
|---|
| 289 | kfree(objp: e); | 
|---|
| 290 | return ret; | 
|---|
| 291 | } | 
|---|
| 292 |  | 
|---|
| 293 | fence->e = e; | 
|---|
| 294 |  | 
|---|
| 295 | return 0; | 
|---|
| 296 | } | 
|---|
| 297 |  | 
|---|
| 298 | static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit) | 
|---|
| 299 | { | 
|---|
| 300 | struct drm_virtgpu_execbuffer *exbuf = submit->exbuf; | 
|---|
| 301 | u32 *bo_handles; | 
|---|
| 302 |  | 
|---|
| 303 | if (!exbuf->num_bo_handles) | 
|---|
| 304 | return 0; | 
|---|
| 305 |  | 
|---|
| 306 | bo_handles = kvmalloc_array(exbuf->num_bo_handles, sizeof(*bo_handles), | 
|---|
| 307 | GFP_KERNEL); | 
|---|
| 308 | if (!bo_handles) | 
|---|
| 309 | return -ENOMEM; | 
|---|
| 310 |  | 
|---|
| 311 | if (copy_from_user(to: bo_handles, u64_to_user_ptr(exbuf->bo_handles), | 
|---|
| 312 | n: exbuf->num_bo_handles * sizeof(*bo_handles))) { | 
|---|
| 313 | kvfree(addr: bo_handles); | 
|---|
| 314 | return -EFAULT; | 
|---|
| 315 | } | 
|---|
| 316 |  | 
|---|
| 317 | submit->buflist = virtio_gpu_array_from_handles(drm_file: submit->file, handles: bo_handles, | 
|---|
| 318 | nents: exbuf->num_bo_handles); | 
|---|
| 319 | if (!submit->buflist) { | 
|---|
| 320 | kvfree(addr: bo_handles); | 
|---|
| 321 | return -ENOENT; | 
|---|
| 322 | } | 
|---|
| 323 |  | 
|---|
| 324 | kvfree(addr: bo_handles); | 
|---|
| 325 |  | 
|---|
| 326 | return 0; | 
|---|
| 327 | } | 
|---|
| 328 |  | 
|---|
| 329 | static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit) | 
|---|
| 330 | { | 
|---|
| 331 | virtio_gpu_reset_syncobjs(syncobjs: submit->in_syncobjs, nr_syncobjs: submit->num_in_syncobjs); | 
|---|
| 332 | virtio_gpu_free_syncobjs(syncobjs: submit->in_syncobjs, nr_syncobjs: submit->num_in_syncobjs); | 
|---|
| 333 | virtio_gpu_free_post_deps(post_deps: submit->post_deps, nr_syncobjs: submit->num_out_syncobjs); | 
|---|
| 334 |  | 
|---|
| 335 | if (!IS_ERR(ptr: submit->buf)) | 
|---|
| 336 | kvfree(addr: submit->buf); | 
|---|
| 337 |  | 
|---|
| 338 | if (submit->buflist) | 
|---|
| 339 | virtio_gpu_array_put_free(objs: submit->buflist); | 
|---|
| 340 |  | 
|---|
| 341 | if (submit->out_fence_fd >= 0) | 
|---|
| 342 | put_unused_fd(fd: submit->out_fence_fd); | 
|---|
| 343 |  | 
|---|
| 344 | if (submit->out_fence) | 
|---|
| 345 | dma_fence_put(fence: &submit->out_fence->f); | 
|---|
| 346 |  | 
|---|
| 347 | if (submit->sync_file) | 
|---|
| 348 | fput(submit->sync_file->file); | 
|---|
| 349 | } | 
|---|
| 350 |  | 
|---|
| 351 | static void virtio_gpu_submit(struct virtio_gpu_submit *submit) | 
|---|
| 352 | { | 
|---|
| 353 | virtio_gpu_cmd_submit(vgdev: submit->vgdev, data: submit->buf, data_size: submit->exbuf->size, | 
|---|
| 354 | ctx_id: submit->vfpriv->ctx_id, objs: submit->buflist, | 
|---|
| 355 | fence: submit->out_fence); | 
|---|
| 356 | virtio_gpu_notify(vgdev: submit->vgdev); | 
|---|
| 357 | } | 
|---|
| 358 |  | 
|---|
| 359 | static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit) | 
|---|
| 360 | { | 
|---|
| 361 | submit->buf = NULL; | 
|---|
| 362 | submit->buflist = NULL; | 
|---|
| 363 | submit->sync_file = NULL; | 
|---|
| 364 | submit->out_fence_fd = -1; | 
|---|
| 365 | } | 
|---|
| 366 |  | 
|---|
| 367 | static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit, | 
|---|
| 368 | struct drm_virtgpu_execbuffer *exbuf, | 
|---|
| 369 | struct drm_device *dev, | 
|---|
| 370 | struct drm_file *file, | 
|---|
| 371 | u64 fence_ctx, u32 ring_idx) | 
|---|
| 372 | { | 
|---|
| 373 | struct virtio_gpu_fpriv *vfpriv = file->driver_priv; | 
|---|
| 374 | struct virtio_gpu_device *vgdev = dev->dev_private; | 
|---|
| 375 | struct virtio_gpu_fence *out_fence; | 
|---|
| 376 | bool drm_fence_event; | 
|---|
| 377 | int err; | 
|---|
| 378 |  | 
|---|
| 379 | memset(s: submit, c: 0, n: sizeof(*submit)); | 
|---|
| 380 |  | 
|---|
| 381 | if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) && | 
|---|
| 382 | (vfpriv->ring_idx_mask & BIT_ULL(ring_idx))) | 
|---|
| 383 | drm_fence_event = true; | 
|---|
| 384 | else | 
|---|
| 385 | drm_fence_event = false; | 
|---|
| 386 |  | 
|---|
| 387 | if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) || | 
|---|
| 388 | exbuf->num_out_syncobjs || | 
|---|
| 389 | exbuf->num_bo_handles || | 
|---|
| 390 | drm_fence_event) | 
|---|
| 391 | out_fence = virtio_gpu_fence_alloc(vgdev, base_fence_ctx: fence_ctx, ring_idx); | 
|---|
| 392 | else | 
|---|
| 393 | out_fence = NULL; | 
|---|
| 394 |  | 
|---|
| 395 | if (drm_fence_event) { | 
|---|
| 396 | err = virtio_gpu_fence_event_create(dev, file, fence: out_fence, ring_idx); | 
|---|
| 397 | if (err) { | 
|---|
| 398 | dma_fence_put(fence: &out_fence->f); | 
|---|
| 399 | return err; | 
|---|
| 400 | } | 
|---|
| 401 | } | 
|---|
| 402 |  | 
|---|
| 403 | submit->out_fence = out_fence; | 
|---|
| 404 | submit->fence_ctx = fence_ctx; | 
|---|
| 405 | submit->ring_idx = ring_idx; | 
|---|
| 406 | submit->out_fence_fd = -1; | 
|---|
| 407 | submit->vfpriv = vfpriv; | 
|---|
| 408 | submit->vgdev = vgdev; | 
|---|
| 409 | submit->exbuf = exbuf; | 
|---|
| 410 | submit->file = file; | 
|---|
| 411 |  | 
|---|
| 412 | err = virtio_gpu_init_submit_buflist(submit); | 
|---|
| 413 | if (err) | 
|---|
| 414 | return err; | 
|---|
| 415 |  | 
|---|
| 416 | submit->buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); | 
|---|
| 417 | if (IS_ERR(ptr: submit->buf)) | 
|---|
| 418 | return PTR_ERR(ptr: submit->buf); | 
|---|
| 419 |  | 
|---|
| 420 | if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) { | 
|---|
| 421 | err = get_unused_fd_flags(O_CLOEXEC); | 
|---|
| 422 | if (err < 0) | 
|---|
| 423 | return err; | 
|---|
| 424 |  | 
|---|
| 425 | submit->out_fence_fd = err; | 
|---|
| 426 |  | 
|---|
| 427 | submit->sync_file = sync_file_create(fence: &out_fence->f); | 
|---|
| 428 | if (!submit->sync_file) | 
|---|
| 429 | return -ENOMEM; | 
|---|
| 430 | } | 
|---|
| 431 |  | 
|---|
| 432 | return 0; | 
|---|
| 433 | } | 
|---|
| 434 |  | 
|---|
| 435 | static int virtio_gpu_wait_in_fence(struct virtio_gpu_submit *submit) | 
|---|
| 436 | { | 
|---|
| 437 | int ret = 0; | 
|---|
| 438 |  | 
|---|
| 439 | if (submit->exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) { | 
|---|
| 440 | struct dma_fence *in_fence = | 
|---|
| 441 | sync_file_get_fence(fd: submit->exbuf->fence_fd); | 
|---|
| 442 | if (!in_fence) | 
|---|
| 443 | return -EINVAL; | 
|---|
| 444 |  | 
|---|
| 445 | /* | 
|---|
| 446 | * Wait if the fence is from a foreign context, or if the | 
|---|
| 447 | * fence array contains any fence from a foreign context. | 
|---|
| 448 | */ | 
|---|
| 449 | ret = virtio_gpu_dma_fence_wait(submit, fence: in_fence); | 
|---|
| 450 |  | 
|---|
| 451 | dma_fence_put(fence: in_fence); | 
|---|
| 452 | } | 
|---|
| 453 |  | 
|---|
| 454 | return ret; | 
|---|
| 455 | } | 
|---|
| 456 |  | 
|---|
| 457 | static void virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit) | 
|---|
| 458 | { | 
|---|
| 459 | if (submit->sync_file) { | 
|---|
| 460 | submit->exbuf->fence_fd = submit->out_fence_fd; | 
|---|
| 461 | fd_install(fd: submit->out_fence_fd, file: submit->sync_file->file); | 
|---|
| 462 | } | 
|---|
| 463 | } | 
|---|
| 464 |  | 
|---|
| 465 | static int virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit) | 
|---|
| 466 | { | 
|---|
| 467 | if (submit->buflist) | 
|---|
| 468 | return virtio_gpu_array_lock_resv(objs: submit->buflist); | 
|---|
| 469 |  | 
|---|
| 470 | return 0; | 
|---|
| 471 | } | 
|---|
| 472 |  | 
|---|
| 473 | int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, | 
|---|
| 474 | struct drm_file *file) | 
|---|
| 475 | { | 
|---|
| 476 | struct virtio_gpu_device *vgdev = dev->dev_private; | 
|---|
| 477 | struct virtio_gpu_fpriv *vfpriv = file->driver_priv; | 
|---|
| 478 | u64 fence_ctx = vgdev->fence_drv.context; | 
|---|
| 479 | struct drm_virtgpu_execbuffer *exbuf = data; | 
|---|
| 480 | struct virtio_gpu_submit submit; | 
|---|
| 481 | u32 ring_idx = 0; | 
|---|
| 482 | int ret = -EINVAL; | 
|---|
| 483 |  | 
|---|
| 484 | if (!vgdev->has_virgl_3d) | 
|---|
| 485 | return -ENOSYS; | 
|---|
| 486 |  | 
|---|
| 487 | if (exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS) | 
|---|
| 488 | return ret; | 
|---|
| 489 |  | 
|---|
| 490 | if (exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) { | 
|---|
| 491 | if (exbuf->ring_idx >= vfpriv->num_rings) | 
|---|
| 492 | return ret; | 
|---|
| 493 |  | 
|---|
| 494 | if (!vfpriv->base_fence_ctx) | 
|---|
| 495 | return ret; | 
|---|
| 496 |  | 
|---|
| 497 | fence_ctx = vfpriv->base_fence_ctx; | 
|---|
| 498 | ring_idx = exbuf->ring_idx; | 
|---|
| 499 | } | 
|---|
| 500 |  | 
|---|
| 501 | virtio_gpu_create_context(dev, file); | 
|---|
| 502 |  | 
|---|
| 503 | ret = virtio_gpu_init_submit(submit: &submit, exbuf, dev, file, | 
|---|
| 504 | fence_ctx, ring_idx); | 
|---|
| 505 | if (ret) | 
|---|
| 506 | goto cleanup; | 
|---|
| 507 |  | 
|---|
| 508 | ret = virtio_gpu_parse_post_deps(submit: &submit); | 
|---|
| 509 | if (ret) | 
|---|
| 510 | goto cleanup; | 
|---|
| 511 |  | 
|---|
| 512 | ret = virtio_gpu_parse_deps(submit: &submit); | 
|---|
| 513 | if (ret) | 
|---|
| 514 | goto cleanup; | 
|---|
| 515 |  | 
|---|
| 516 | /* | 
|---|
| 517 | * Await in-fences in the end of the job submission path to | 
|---|
| 518 | * optimize the path by proceeding directly to the submission | 
|---|
| 519 | * to virtio after the waits. | 
|---|
| 520 | */ | 
|---|
| 521 | ret = virtio_gpu_wait_in_fence(submit: &submit); | 
|---|
| 522 | if (ret) | 
|---|
| 523 | goto cleanup; | 
|---|
| 524 |  | 
|---|
| 525 | ret = virtio_gpu_lock_buflist(submit: &submit); | 
|---|
| 526 | if (ret) | 
|---|
| 527 | goto cleanup; | 
|---|
| 528 |  | 
|---|
| 529 | virtio_gpu_submit(submit: &submit); | 
|---|
| 530 |  | 
|---|
| 531 | /* | 
|---|
| 532 | * Set up user-out data after submitting the job to optimize | 
|---|
| 533 | * the job submission path. | 
|---|
| 534 | */ | 
|---|
| 535 | virtio_gpu_install_out_fence_fd(submit: &submit); | 
|---|
| 536 | virtio_gpu_process_post_deps(submit: &submit); | 
|---|
| 537 | virtio_gpu_complete_submit(submit: &submit); | 
|---|
| 538 | cleanup: | 
|---|
| 539 | virtio_gpu_cleanup_submit(submit: &submit); | 
|---|
| 540 |  | 
|---|
| 541 | return ret; | 
|---|
| 542 | } | 
|---|
| 543 |  | 
|---|