| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | #include "virtgpu_drv.h" | 
|---|
| 3 |  | 
|---|
| 4 | #include <linux/dma-mapping.h> | 
|---|
| 5 |  | 
|---|
| 6 | static void virtio_gpu_vram_free(struct drm_gem_object *obj) | 
|---|
| 7 | { | 
|---|
| 8 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); | 
|---|
| 9 | struct virtio_gpu_device *vgdev = obj->dev->dev_private; | 
|---|
| 10 | struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); | 
|---|
| 11 | bool unmap; | 
|---|
| 12 |  | 
|---|
| 13 | if (bo->created) { | 
|---|
| 14 | spin_lock(lock: &vgdev->host_visible_lock); | 
|---|
| 15 | unmap = drm_mm_node_allocated(node: &vram->vram_node); | 
|---|
| 16 | spin_unlock(lock: &vgdev->host_visible_lock); | 
|---|
| 17 |  | 
|---|
| 18 | if (unmap) | 
|---|
| 19 | virtio_gpu_cmd_unmap(vgdev, bo); | 
|---|
| 20 |  | 
|---|
| 21 | virtio_gpu_cmd_unref_resource(vgdev, bo); | 
|---|
| 22 | virtio_gpu_notify(vgdev); | 
|---|
| 23 | return; | 
|---|
| 24 | } | 
|---|
| 25 | } | 
|---|
| 26 |  | 
|---|
| 27 | static const struct vm_operations_struct virtio_gpu_vram_vm_ops = { | 
|---|
| 28 | .open = drm_gem_vm_open, | 
|---|
| 29 | .close = drm_gem_vm_close, | 
|---|
| 30 | }; | 
|---|
| 31 |  | 
|---|
| 32 | static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, | 
|---|
| 33 | struct vm_area_struct *vma) | 
|---|
| 34 | { | 
|---|
| 35 | int ret; | 
|---|
| 36 | struct virtio_gpu_device *vgdev = obj->dev->dev_private; | 
|---|
| 37 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); | 
|---|
| 38 | struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); | 
|---|
| 39 | unsigned long vm_size = vma->vm_end - vma->vm_start; | 
|---|
| 40 | unsigned long vm_end; | 
|---|
| 41 |  | 
|---|
| 42 | if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) | 
|---|
| 43 | return -EINVAL; | 
|---|
| 44 |  | 
|---|
| 45 | wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING); | 
|---|
| 46 | if (vram->map_state != STATE_OK) | 
|---|
| 47 | return -EINVAL; | 
|---|
| 48 |  | 
|---|
| 49 | vma->vm_pgoff -= drm_vma_node_start(node: &obj->vma_node); | 
|---|
| 50 | vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND); | 
|---|
| 51 | vma->vm_page_prot = vm_get_page_prot(vm_flags: vma->vm_flags); | 
|---|
| 52 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); | 
|---|
| 53 | vma->vm_ops = &virtio_gpu_vram_vm_ops; | 
|---|
| 54 |  | 
|---|
| 55 | if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC) | 
|---|
| 56 | vma->vm_page_prot = pgprot_writecombine(prot: vma->vm_page_prot); | 
|---|
| 57 | else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED) | 
|---|
| 58 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 
|---|
| 59 |  | 
|---|
| 60 | if (check_add_overflow(vma->vm_pgoff << PAGE_SHIFT, vm_size, &vm_end)) | 
|---|
| 61 | return -EINVAL; | 
|---|
| 62 |  | 
|---|
| 63 | if (vm_end > vram->vram_node.size) | 
|---|
| 64 | return -EINVAL; | 
|---|
| 65 |  | 
|---|
| 66 | ret = io_remap_pfn_range(vma, addr: vma->vm_start, | 
|---|
| 67 | pfn: (vram->vram_node.start >> PAGE_SHIFT) + vma->vm_pgoff, | 
|---|
| 68 | size: vm_size, prot: vma->vm_page_prot); | 
|---|
| 69 | return ret; | 
|---|
| 70 | } | 
|---|
| 71 |  | 
|---|
| 72 | struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo, | 
|---|
| 73 | struct device *dev, | 
|---|
| 74 | enum dma_data_direction dir) | 
|---|
| 75 | { | 
|---|
| 76 | struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; | 
|---|
| 77 | struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); | 
|---|
| 78 | struct sg_table *sgt; | 
|---|
| 79 | dma_addr_t addr; | 
|---|
| 80 | int ret; | 
|---|
| 81 |  | 
|---|
| 82 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | 
|---|
| 83 | if (!sgt) | 
|---|
| 84 | return ERR_PTR(error: -ENOMEM); | 
|---|
| 85 |  | 
|---|
| 86 | if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) { | 
|---|
| 87 | // Virtio devices can access the dma-buf via its UUID. Return a stub | 
|---|
| 88 | // sg_table so the dma-buf API still works. | 
|---|
| 89 | if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) { | 
|---|
| 90 | ret = -EIO; | 
|---|
| 91 | goto out; | 
|---|
| 92 | } | 
|---|
| 93 | return sgt; | 
|---|
| 94 | } | 
|---|
| 95 |  | 
|---|
| 96 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | 
|---|
| 97 | if (ret) | 
|---|
| 98 | goto out; | 
|---|
| 99 |  | 
|---|
| 100 | addr = dma_map_resource(dev, phys_addr: vram->vram_node.start, | 
|---|
| 101 | size: vram->vram_node.size, dir, | 
|---|
| 102 | DMA_ATTR_SKIP_CPU_SYNC); | 
|---|
| 103 | ret = dma_mapping_error(dev, dma_addr: addr); | 
|---|
| 104 | if (ret) | 
|---|
| 105 | goto out; | 
|---|
| 106 |  | 
|---|
| 107 | sg_set_page(sg: sgt->sgl, NULL, len: vram->vram_node.size, offset: 0); | 
|---|
| 108 | sg_dma_address(sgt->sgl) = addr; | 
|---|
| 109 | sg_dma_len(sgt->sgl) = vram->vram_node.size; | 
|---|
| 110 |  | 
|---|
| 111 | return sgt; | 
|---|
| 112 | out: | 
|---|
| 113 | sg_free_table(sgt); | 
|---|
| 114 | kfree(objp: sgt); | 
|---|
| 115 | return ERR_PTR(error: ret); | 
|---|
| 116 | } | 
|---|
| 117 |  | 
|---|
| 118 | void virtio_gpu_vram_unmap_dma_buf(struct device *dev, | 
|---|
| 119 | struct sg_table *sgt, | 
|---|
| 120 | enum dma_data_direction dir) | 
|---|
| 121 | { | 
|---|
| 122 | if (sgt->nents) { | 
|---|
| 123 | dma_unmap_resource(dev, sg_dma_address(sgt->sgl), | 
|---|
| 124 | sg_dma_len(sgt->sgl), dir, | 
|---|
| 125 | DMA_ATTR_SKIP_CPU_SYNC); | 
|---|
| 126 | } | 
|---|
| 127 | sg_free_table(sgt); | 
|---|
| 128 | kfree(objp: sgt); | 
|---|
| 129 | } | 
|---|
| 130 |  | 
|---|
| 131 | static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = { | 
|---|
| 132 | .open = virtio_gpu_gem_object_open, | 
|---|
| 133 | .close = virtio_gpu_gem_object_close, | 
|---|
| 134 | .free = virtio_gpu_vram_free, | 
|---|
| 135 | .mmap = virtio_gpu_vram_mmap, | 
|---|
| 136 | .export = virtgpu_gem_prime_export, | 
|---|
| 137 | }; | 
|---|
| 138 |  | 
|---|
| 139 | bool virtio_gpu_is_vram(struct virtio_gpu_object *bo) | 
|---|
| 140 | { | 
|---|
| 141 | return bo->base.base.funcs == &virtio_gpu_vram_funcs; | 
|---|
| 142 | } | 
|---|
| 143 |  | 
|---|
| 144 | static int virtio_gpu_vram_map(struct virtio_gpu_object *bo) | 
|---|
| 145 | { | 
|---|
| 146 | int ret; | 
|---|
| 147 | uint64_t offset; | 
|---|
| 148 | struct virtio_gpu_object_array *objs; | 
|---|
| 149 | struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; | 
|---|
| 150 | struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); | 
|---|
| 151 |  | 
|---|
| 152 | if (!vgdev->has_host_visible) | 
|---|
| 153 | return -EINVAL; | 
|---|
| 154 |  | 
|---|
| 155 | spin_lock(lock: &vgdev->host_visible_lock); | 
|---|
| 156 | ret = drm_mm_insert_node(mm: &vgdev->host_visible_mm, node: &vram->vram_node, | 
|---|
| 157 | size: bo->base.base.size); | 
|---|
| 158 | spin_unlock(lock: &vgdev->host_visible_lock); | 
|---|
| 159 |  | 
|---|
| 160 | if (ret) | 
|---|
| 161 | return ret; | 
|---|
| 162 |  | 
|---|
| 163 | objs = virtio_gpu_array_alloc(nents: 1); | 
|---|
| 164 | if (!objs) { | 
|---|
| 165 | ret = -ENOMEM; | 
|---|
| 166 | goto err_remove_node; | 
|---|
| 167 | } | 
|---|
| 168 |  | 
|---|
| 169 | virtio_gpu_array_add_obj(objs, obj: &bo->base.base); | 
|---|
| 170 | /*TODO: Add an error checking helper function in drm_mm.h */ | 
|---|
| 171 | offset = vram->vram_node.start - vgdev->host_visible_region.addr; | 
|---|
| 172 |  | 
|---|
| 173 | ret = virtio_gpu_cmd_map(vgdev, objs, offset); | 
|---|
| 174 | if (ret) { | 
|---|
| 175 | virtio_gpu_array_put_free(objs); | 
|---|
| 176 | goto err_remove_node; | 
|---|
| 177 | } | 
|---|
| 178 |  | 
|---|
| 179 | return 0; | 
|---|
| 180 |  | 
|---|
| 181 | err_remove_node: | 
|---|
| 182 | spin_lock(lock: &vgdev->host_visible_lock); | 
|---|
| 183 | drm_mm_remove_node(node: &vram->vram_node); | 
|---|
| 184 | spin_unlock(lock: &vgdev->host_visible_lock); | 
|---|
| 185 | return ret; | 
|---|
| 186 | } | 
|---|
| 187 |  | 
|---|
| 188 | int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, | 
|---|
| 189 | struct virtio_gpu_object_params *params, | 
|---|
| 190 | struct virtio_gpu_object **bo_ptr) | 
|---|
| 191 | { | 
|---|
| 192 | struct drm_gem_object *obj; | 
|---|
| 193 | struct virtio_gpu_object_vram *vram; | 
|---|
| 194 | int ret; | 
|---|
| 195 |  | 
|---|
| 196 | vram = kzalloc(sizeof(*vram), GFP_KERNEL); | 
|---|
| 197 | if (!vram) | 
|---|
| 198 | return -ENOMEM; | 
|---|
| 199 |  | 
|---|
| 200 | obj = &vram->base.base.base; | 
|---|
| 201 | obj->funcs = &virtio_gpu_vram_funcs; | 
|---|
| 202 |  | 
|---|
| 203 | params->size = PAGE_ALIGN(params->size); | 
|---|
| 204 | drm_gem_private_object_init(dev: vgdev->ddev, obj, size: params->size); | 
|---|
| 205 |  | 
|---|
| 206 | /* Create fake offset */ | 
|---|
| 207 | ret = drm_gem_create_mmap_offset(obj); | 
|---|
| 208 | if (ret) { | 
|---|
| 209 | kfree(objp: vram); | 
|---|
| 210 | return ret; | 
|---|
| 211 | } | 
|---|
| 212 |  | 
|---|
| 213 | ret = virtio_gpu_resource_id_get(vgdev, resid: &vram->base.hw_res_handle); | 
|---|
| 214 | if (ret) { | 
|---|
| 215 | kfree(objp: vram); | 
|---|
| 216 | return ret; | 
|---|
| 217 | } | 
|---|
| 218 |  | 
|---|
| 219 | virtio_gpu_cmd_resource_create_blob(vgdev, bo: &vram->base, params, NULL, | 
|---|
| 220 | nents: 0); | 
|---|
| 221 | if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) { | 
|---|
| 222 | ret = virtio_gpu_vram_map(bo: &vram->base); | 
|---|
| 223 | if (ret) { | 
|---|
| 224 | virtio_gpu_vram_free(obj); | 
|---|
| 225 | return ret; | 
|---|
| 226 | } | 
|---|
| 227 | } | 
|---|
| 228 |  | 
|---|
| 229 | *bo_ptr = &vram->base; | 
|---|
| 230 | return 0; | 
|---|
| 231 | } | 
|---|
| 232 |  | 
|---|