| 1 | /* | 
|---|
| 2 | * Copyright (C) 2015 Red Hat, Inc. | 
|---|
| 3 | * All Rights Reserved. | 
|---|
| 4 | * | 
|---|
| 5 | * Permission is hereby granted, free of charge, to any person obtaining | 
|---|
| 6 | * a copy of this software and associated documentation files (the | 
|---|
| 7 | * "Software"), to deal in the Software without restriction, including | 
|---|
| 8 | * without limitation the rights to use, copy, modify, merge, publish, | 
|---|
| 9 | * distribute, sublicense, and/or sell copies of the Software, and to | 
|---|
| 10 | * permit persons to whom the Software is furnished to do so, subject to | 
|---|
| 11 | * the following conditions: | 
|---|
| 12 | * | 
|---|
| 13 | * The above copyright notice and this permission notice (including the | 
|---|
| 14 | * next paragraph) shall be included in all copies or substantial | 
|---|
| 15 | * portions of the Software. | 
|---|
| 16 | * | 
|---|
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
|---|
| 18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
|---|
| 19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | 
|---|
| 20 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | 
|---|
| 21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | 
|---|
| 22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | 
|---|
| 23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 
|---|
| 24 | */ | 
|---|
| 25 |  | 
|---|
| 26 | #include <linux/virtio.h> | 
|---|
| 27 | #include <linux/virtio_config.h> | 
|---|
| 28 | #include <linux/virtio_ring.h> | 
|---|
| 29 |  | 
|---|
| 30 | #include <drm/drm_file.h> | 
|---|
| 31 | #include <drm/drm_managed.h> | 
|---|
| 32 |  | 
|---|
| 33 | #include "virtgpu_drv.h" | 
|---|
| 34 |  | 
|---|
| 35 | static void virtio_gpu_config_changed_work_func(struct work_struct *work) | 
|---|
| 36 | { | 
|---|
| 37 | struct virtio_gpu_device *vgdev = | 
|---|
| 38 | container_of(work, struct virtio_gpu_device, | 
|---|
| 39 | config_changed_work); | 
|---|
| 40 | u32 events_read, events_clear = 0; | 
|---|
| 41 |  | 
|---|
| 42 | /* read the config space */ | 
|---|
| 43 | virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, | 
|---|
| 44 | events_read, &events_read); | 
|---|
| 45 | if (events_read & VIRTIO_GPU_EVENT_DISPLAY) { | 
|---|
| 46 | if (vgdev->num_scanouts) { | 
|---|
| 47 | if (vgdev->has_edid) | 
|---|
| 48 | virtio_gpu_cmd_get_edids(vgdev); | 
|---|
| 49 | virtio_gpu_cmd_get_display_info(vgdev); | 
|---|
| 50 | virtio_gpu_notify(vgdev); | 
|---|
| 51 | drm_helper_hpd_irq_event(dev: vgdev->ddev); | 
|---|
| 52 | } | 
|---|
| 53 | events_clear |= VIRTIO_GPU_EVENT_DISPLAY; | 
|---|
| 54 | } | 
|---|
| 55 | virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config, | 
|---|
| 56 | events_clear, &events_clear); | 
|---|
| 57 | } | 
|---|
| 58 |  | 
|---|
| 59 | static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, | 
|---|
| 60 | void (*work_func)(struct work_struct *work)) | 
|---|
| 61 | { | 
|---|
| 62 | spin_lock_init(&vgvq->qlock); | 
|---|
| 63 | init_waitqueue_head(&vgvq->ack_queue); | 
|---|
| 64 | INIT_WORK(&vgvq->dequeue_work, work_func); | 
|---|
| 65 | } | 
|---|
| 66 |  | 
|---|
| 67 | static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, | 
|---|
| 68 | int num_capsets) | 
|---|
| 69 | { | 
|---|
| 70 | int i, ret; | 
|---|
| 71 | bool invalid_capset_id = false; | 
|---|
| 72 | struct drm_device *drm = vgdev->ddev; | 
|---|
| 73 |  | 
|---|
| 74 | vgdev->capsets = drmm_kcalloc(dev: drm, n: num_capsets, | 
|---|
| 75 | size: sizeof(struct virtio_gpu_drv_capset), | 
|---|
| 76 | GFP_KERNEL); | 
|---|
| 77 | if (!vgdev->capsets) { | 
|---|
| 78 | DRM_ERROR( "failed to allocate cap sets\n"); | 
|---|
| 79 | return; | 
|---|
| 80 | } | 
|---|
| 81 | for (i = 0; i < num_capsets; i++) { | 
|---|
| 82 | virtio_gpu_cmd_get_capset_info(vgdev, idx: i); | 
|---|
| 83 | virtio_gpu_notify(vgdev); | 
|---|
| 84 | ret = wait_event_timeout(vgdev->resp_wq, | 
|---|
| 85 | vgdev->capsets[i].id > 0, 5 * HZ); | 
|---|
| 86 | /* | 
|---|
| 87 | * Capability ids are defined in the virtio-gpu spec and are | 
|---|
| 88 | * between 1 to 63, inclusive. | 
|---|
| 89 | */ | 
|---|
| 90 | if (!vgdev->capsets[i].id || | 
|---|
| 91 | vgdev->capsets[i].id > MAX_CAPSET_ID) | 
|---|
| 92 | invalid_capset_id = true; | 
|---|
| 93 |  | 
|---|
| 94 | if (ret == 0) | 
|---|
| 95 | DRM_ERROR( "timed out waiting for cap set %d\n", i); | 
|---|
| 96 | else if (invalid_capset_id) | 
|---|
| 97 | DRM_ERROR( "invalid capset id %u", vgdev->capsets[i].id); | 
|---|
| 98 |  | 
|---|
| 99 | if (ret == 0 || invalid_capset_id) { | 
|---|
| 100 | spin_lock(lock: &vgdev->display_info_lock); | 
|---|
| 101 | drmm_kfree(dev: drm, data: vgdev->capsets); | 
|---|
| 102 | vgdev->capsets = NULL; | 
|---|
| 103 | spin_unlock(lock: &vgdev->display_info_lock); | 
|---|
| 104 | return; | 
|---|
| 105 | } | 
|---|
| 106 |  | 
|---|
| 107 | vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id; | 
|---|
| 108 | DRM_INFO( "cap set %d: id %d, max-version %d, max-size %d\n", | 
|---|
| 109 | i, vgdev->capsets[i].id, | 
|---|
| 110 | vgdev->capsets[i].max_version, | 
|---|
| 111 | vgdev->capsets[i].max_size); | 
|---|
| 112 | } | 
|---|
| 113 |  | 
|---|
| 114 | vgdev->num_capsets = num_capsets; | 
|---|
| 115 | } | 
|---|
| 116 |  | 
|---|
| 117 | int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) | 
|---|
| 118 | { | 
|---|
| 119 | struct virtqueue_info vqs_info[] = { | 
|---|
| 120 | { "control", virtio_gpu_ctrl_ack }, | 
|---|
| 121 | { "cursor", virtio_gpu_cursor_ack }, | 
|---|
| 122 | }; | 
|---|
| 123 | struct virtio_gpu_device *vgdev; | 
|---|
| 124 | /* this will expand later */ | 
|---|
| 125 | struct virtqueue *vqs[2]; | 
|---|
| 126 | u32 num_scanouts, num_capsets; | 
|---|
| 127 | int ret = 0; | 
|---|
| 128 |  | 
|---|
| 129 | if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | 
|---|
| 130 | return -ENODEV; | 
|---|
| 131 |  | 
|---|
| 132 | vgdev = drmm_kzalloc(dev, size: sizeof(struct virtio_gpu_device), GFP_KERNEL); | 
|---|
| 133 | if (!vgdev) | 
|---|
| 134 | return -ENOMEM; | 
|---|
| 135 |  | 
|---|
| 136 | vgdev->ddev = dev; | 
|---|
| 137 | dev->dev_private = vgdev; | 
|---|
| 138 | vgdev->vdev = vdev; | 
|---|
| 139 |  | 
|---|
| 140 | spin_lock_init(&vgdev->display_info_lock); | 
|---|
| 141 | spin_lock_init(&vgdev->resource_export_lock); | 
|---|
| 142 | spin_lock_init(&vgdev->host_visible_lock); | 
|---|
| 143 | ida_init(ida: &vgdev->ctx_id_ida); | 
|---|
| 144 | ida_init(ida: &vgdev->resource_ida); | 
|---|
| 145 | init_waitqueue_head(&vgdev->resp_wq); | 
|---|
| 146 | virtio_gpu_init_vq(vgvq: &vgdev->ctrlq, work_func: virtio_gpu_dequeue_ctrl_func); | 
|---|
| 147 | virtio_gpu_init_vq(vgvq: &vgdev->cursorq, work_func: virtio_gpu_dequeue_cursor_func); | 
|---|
| 148 |  | 
|---|
| 149 | vgdev->fence_drv.context = dma_fence_context_alloc(num: 1); | 
|---|
| 150 | spin_lock_init(&vgdev->fence_drv.lock); | 
|---|
| 151 | INIT_LIST_HEAD(list: &vgdev->fence_drv.fences); | 
|---|
| 152 | INIT_LIST_HEAD(list: &vgdev->cap_cache); | 
|---|
| 153 | INIT_WORK(&vgdev->config_changed_work, | 
|---|
| 154 | virtio_gpu_config_changed_work_func); | 
|---|
| 155 |  | 
|---|
| 156 | INIT_WORK(&vgdev->obj_free_work, | 
|---|
| 157 | virtio_gpu_array_put_free_work); | 
|---|
| 158 | INIT_LIST_HEAD(list: &vgdev->obj_free_list); | 
|---|
| 159 | spin_lock_init(&vgdev->obj_free_lock); | 
|---|
| 160 |  | 
|---|
| 161 | #ifdef __LITTLE_ENDIAN | 
|---|
| 162 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_GPU_F_VIRGL)) | 
|---|
| 163 | vgdev->has_virgl_3d = true; | 
|---|
| 164 | #endif | 
|---|
| 165 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_GPU_F_EDID)) | 
|---|
| 166 | vgdev->has_edid = true; | 
|---|
| 167 |  | 
|---|
| 168 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) | 
|---|
| 169 | vgdev->has_indirect = true; | 
|---|
| 170 |  | 
|---|
| 171 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) | 
|---|
| 172 | vgdev->has_resource_assign_uuid = true; | 
|---|
| 173 |  | 
|---|
| 174 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) | 
|---|
| 175 | vgdev->has_resource_blob = true; | 
|---|
| 176 |  | 
|---|
| 177 | if (virtio_get_shm_region(vdev: vgdev->vdev, region_out: &vgdev->host_visible_region, | 
|---|
| 178 | id: VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) { | 
|---|
| 179 | if (!devm_request_mem_region(&vgdev->vdev->dev, | 
|---|
| 180 | vgdev->host_visible_region.addr, | 
|---|
| 181 | vgdev->host_visible_region.len, | 
|---|
| 182 | dev_name(&vgdev->vdev->dev))) { | 
|---|
| 183 | DRM_ERROR( "Could not reserve host visible region\n"); | 
|---|
| 184 | ret = -EBUSY; | 
|---|
| 185 | goto err_vqs; | 
|---|
| 186 | } | 
|---|
| 187 |  | 
|---|
| 188 | DRM_INFO( "Host memory window: 0x%lx +0x%lx\n", | 
|---|
| 189 | (unsigned long)vgdev->host_visible_region.addr, | 
|---|
| 190 | (unsigned long)vgdev->host_visible_region.len); | 
|---|
| 191 | vgdev->has_host_visible = true; | 
|---|
| 192 | drm_mm_init(mm: &vgdev->host_visible_mm, | 
|---|
| 193 | start: (unsigned long)vgdev->host_visible_region.addr, | 
|---|
| 194 | size: (unsigned long)vgdev->host_visible_region.len); | 
|---|
| 195 | } | 
|---|
| 196 |  | 
|---|
| 197 | if (virtio_has_feature(vdev: vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) | 
|---|
| 198 | vgdev->has_context_init = true; | 
|---|
| 199 |  | 
|---|
| 200 | DRM_INFO( "features: %cvirgl %cedid %cresource_blob %chost_visible", | 
|---|
| 201 | vgdev->has_virgl_3d    ? '+' : '-', | 
|---|
| 202 | vgdev->has_edid        ? '+' : '-', | 
|---|
| 203 | vgdev->has_resource_blob ? '+' : '-', | 
|---|
| 204 | vgdev->has_host_visible ? '+' : '-'); | 
|---|
| 205 |  | 
|---|
| 206 | DRM_INFO( "features: %ccontext_init\n", | 
|---|
| 207 | vgdev->has_context_init ? '+' : '-'); | 
|---|
| 208 |  | 
|---|
| 209 | ret = virtio_find_vqs(vdev: vgdev->vdev, nvqs: 2, vqs, vqs_info, NULL); | 
|---|
| 210 | if (ret) { | 
|---|
| 211 | DRM_ERROR( "failed to find virt queues\n"); | 
|---|
| 212 | goto err_vqs; | 
|---|
| 213 | } | 
|---|
| 214 | vgdev->ctrlq.vq = vqs[0]; | 
|---|
| 215 | vgdev->cursorq.vq = vqs[1]; | 
|---|
| 216 | ret = virtio_gpu_alloc_vbufs(vgdev); | 
|---|
| 217 | if (ret) { | 
|---|
| 218 | DRM_ERROR( "failed to alloc vbufs\n"); | 
|---|
| 219 | goto err_vbufs; | 
|---|
| 220 | } | 
|---|
| 221 |  | 
|---|
| 222 | /* get display info */ | 
|---|
| 223 | virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, | 
|---|
| 224 | num_scanouts, &num_scanouts); | 
|---|
| 225 | vgdev->num_scanouts = min_t(uint32_t, num_scanouts, | 
|---|
| 226 | VIRTIO_GPU_MAX_SCANOUTS); | 
|---|
| 227 |  | 
|---|
| 228 | if (!IS_ENABLED(CONFIG_DRM_VIRTIO_GPU_KMS) || !vgdev->num_scanouts) { | 
|---|
| 229 | DRM_INFO( "KMS disabled\n"); | 
|---|
| 230 | vgdev->num_scanouts = 0; | 
|---|
| 231 | vgdev->has_edid = false; | 
|---|
| 232 | dev->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); | 
|---|
| 233 | } else { | 
|---|
| 234 | DRM_INFO( "number of scanouts: %d\n", num_scanouts); | 
|---|
| 235 | } | 
|---|
| 236 |  | 
|---|
| 237 | virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, | 
|---|
| 238 | num_capsets, &num_capsets); | 
|---|
| 239 | DRM_INFO( "number of cap sets: %d\n", num_capsets); | 
|---|
| 240 |  | 
|---|
| 241 | ret = virtio_gpu_modeset_init(vgdev); | 
|---|
| 242 | if (ret) { | 
|---|
| 243 | DRM_ERROR( "modeset init failed\n"); | 
|---|
| 244 | goto err_scanouts; | 
|---|
| 245 | } | 
|---|
| 246 |  | 
|---|
| 247 | virtio_device_ready(dev: vgdev->vdev); | 
|---|
| 248 |  | 
|---|
| 249 | if (num_capsets) | 
|---|
| 250 | virtio_gpu_get_capsets(vgdev, num_capsets); | 
|---|
| 251 | if (vgdev->num_scanouts) { | 
|---|
| 252 | if (vgdev->has_edid) | 
|---|
| 253 | virtio_gpu_cmd_get_edids(vgdev); | 
|---|
| 254 | virtio_gpu_cmd_get_display_info(vgdev); | 
|---|
| 255 | virtio_gpu_notify(vgdev); | 
|---|
| 256 | wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, | 
|---|
| 257 | 5 * HZ); | 
|---|
| 258 | } | 
|---|
| 259 | return 0; | 
|---|
| 260 |  | 
|---|
| 261 | err_scanouts: | 
|---|
| 262 | virtio_gpu_free_vbufs(vgdev); | 
|---|
| 263 | err_vbufs: | 
|---|
| 264 | vgdev->vdev->config->del_vqs(vgdev->vdev); | 
|---|
| 265 | err_vqs: | 
|---|
| 266 | dev->dev_private = NULL; | 
|---|
| 267 | return ret; | 
|---|
| 268 | } | 
|---|
| 269 |  | 
|---|
| 270 | static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev) | 
|---|
| 271 | { | 
|---|
| 272 | struct virtio_gpu_drv_cap_cache *cache_ent, *tmp; | 
|---|
| 273 |  | 
|---|
| 274 | list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) { | 
|---|
| 275 | kfree(objp: cache_ent->caps_cache); | 
|---|
| 276 | kfree(objp: cache_ent); | 
|---|
| 277 | } | 
|---|
| 278 | } | 
|---|
| 279 |  | 
|---|
| 280 | void virtio_gpu_deinit(struct drm_device *dev) | 
|---|
| 281 | { | 
|---|
| 282 | struct virtio_gpu_device *vgdev = dev->dev_private; | 
|---|
| 283 |  | 
|---|
| 284 | flush_work(work: &vgdev->obj_free_work); | 
|---|
| 285 | flush_work(work: &vgdev->ctrlq.dequeue_work); | 
|---|
| 286 | flush_work(work: &vgdev->cursorq.dequeue_work); | 
|---|
| 287 | flush_work(work: &vgdev->config_changed_work); | 
|---|
| 288 | virtio_reset_device(dev: vgdev->vdev); | 
|---|
| 289 | vgdev->vdev->config->del_vqs(vgdev->vdev); | 
|---|
| 290 | } | 
|---|
| 291 |  | 
|---|
| 292 | void virtio_gpu_release(struct drm_device *dev) | 
|---|
| 293 | { | 
|---|
| 294 | struct virtio_gpu_device *vgdev = dev->dev_private; | 
|---|
| 295 |  | 
|---|
| 296 | if (!vgdev) | 
|---|
| 297 | return; | 
|---|
| 298 |  | 
|---|
| 299 | virtio_gpu_modeset_fini(vgdev); | 
|---|
| 300 | virtio_gpu_free_vbufs(vgdev); | 
|---|
| 301 | virtio_gpu_cleanup_cap_cache(vgdev); | 
|---|
| 302 |  | 
|---|
| 303 | if (vgdev->has_host_visible) | 
|---|
| 304 | drm_mm_takedown(mm: &vgdev->host_visible_mm); | 
|---|
| 305 | } | 
|---|
| 306 |  | 
|---|
| 307 | int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file) | 
|---|
| 308 | { | 
|---|
| 309 | struct virtio_gpu_device *vgdev = dev->dev_private; | 
|---|
| 310 | struct virtio_gpu_fpriv *vfpriv; | 
|---|
| 311 | int handle; | 
|---|
| 312 |  | 
|---|
| 313 | /* can't create contexts without 3d renderer */ | 
|---|
| 314 | if (!vgdev->has_virgl_3d) | 
|---|
| 315 | return 0; | 
|---|
| 316 |  | 
|---|
| 317 | /* allocate a virt GPU context for this opener */ | 
|---|
| 318 | vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL); | 
|---|
| 319 | if (!vfpriv) | 
|---|
| 320 | return -ENOMEM; | 
|---|
| 321 |  | 
|---|
| 322 | mutex_init(&vfpriv->context_lock); | 
|---|
| 323 |  | 
|---|
| 324 | handle = ida_alloc(ida: &vgdev->ctx_id_ida, GFP_KERNEL); | 
|---|
| 325 | if (handle < 0) { | 
|---|
| 326 | kfree(objp: vfpriv); | 
|---|
| 327 | return handle; | 
|---|
| 328 | } | 
|---|
| 329 |  | 
|---|
| 330 | vfpriv->ctx_id = handle + 1; | 
|---|
| 331 | file->driver_priv = vfpriv; | 
|---|
| 332 | return 0; | 
|---|
| 333 | } | 
|---|
| 334 |  | 
|---|
| 335 | void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file) | 
|---|
| 336 | { | 
|---|
| 337 | struct virtio_gpu_device *vgdev = dev->dev_private; | 
|---|
| 338 | struct virtio_gpu_fpriv *vfpriv = file->driver_priv; | 
|---|
| 339 |  | 
|---|
| 340 | if (!vgdev->has_virgl_3d) | 
|---|
| 341 | return; | 
|---|
| 342 |  | 
|---|
| 343 | if (vfpriv->context_created) { | 
|---|
| 344 | virtio_gpu_cmd_context_destroy(vgdev, id: vfpriv->ctx_id); | 
|---|
| 345 | virtio_gpu_notify(vgdev); | 
|---|
| 346 | } | 
|---|
| 347 |  | 
|---|
| 348 | ida_free(&vgdev->ctx_id_ida, id: vfpriv->ctx_id - 1); | 
|---|
| 349 | mutex_destroy(lock: &vfpriv->context_lock); | 
|---|
| 350 | kfree(objp: vfpriv); | 
|---|
| 351 | file->driver_priv = NULL; | 
|---|
| 352 | } | 
|---|
| 353 |  | 
|---|