| 1 | /* | 
|---|
| 2 | * Copyright © 2016 Intel Corporation | 
|---|
| 3 | * | 
|---|
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | 
|---|
| 5 | * copy of this software and associated documentation files (the "Software"), | 
|---|
| 6 | * to deal in the Software without restriction, including without limitation | 
|---|
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
|---|
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | 
|---|
| 9 | * Software is furnished to do so, subject to the following conditions: | 
|---|
| 10 | * | 
|---|
| 11 | * The above copyright notice and this permission notice (including the next | 
|---|
| 12 | * paragraph) shall be included in all copies or substantial portions of the | 
|---|
| 13 | * Software. | 
|---|
| 14 | * | 
|---|
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
|---|
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|---|
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
|---|
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 
|---|
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 
|---|
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 
|---|
| 21 | * IN THE SOFTWARE. | 
|---|
| 22 | * | 
|---|
| 23 | */ | 
|---|
| 24 |  | 
|---|
| 25 | #include <linux/sched/mm.h> | 
|---|
| 26 | #include <linux/dma-fence-array.h> | 
|---|
| 27 | #include <drm/drm_gem.h> | 
|---|
| 28 |  | 
|---|
| 29 | #include "display/intel_fb.h" | 
|---|
| 30 | #include "display/intel_frontbuffer.h" | 
|---|
| 31 | #include "gem/i915_gem_lmem.h" | 
|---|
| 32 | #include "gem/i915_gem_object_frontbuffer.h" | 
|---|
| 33 | #include "gem/i915_gem_tiling.h" | 
|---|
| 34 | #include "gt/intel_engine.h" | 
|---|
| 35 | #include "gt/intel_engine_heartbeat.h" | 
|---|
| 36 | #include "gt/intel_gt.h" | 
|---|
| 37 | #include "gt/intel_gt_pm.h" | 
|---|
| 38 | #include "gt/intel_gt_requests.h" | 
|---|
| 39 | #include "gt/intel_tlb.h" | 
|---|
| 40 |  | 
|---|
| 41 | #include "i915_drv.h" | 
|---|
| 42 | #include "i915_gem_evict.h" | 
|---|
| 43 | #include "i915_sw_fence_work.h" | 
|---|
| 44 | #include "i915_trace.h" | 
|---|
| 45 | #include "i915_vma.h" | 
|---|
| 46 | #include "i915_vma_resource.h" | 
|---|
| 47 |  | 
|---|
| 48 | static inline void assert_vma_held_evict(const struct i915_vma *vma) | 
|---|
| 49 | { | 
|---|
| 50 | /* | 
|---|
| 51 | * We may be forced to unbind when the vm is dead, to clean it up. | 
|---|
| 52 | * This is the only exception to the requirement of the object lock | 
|---|
| 53 | * being held. | 
|---|
| 54 | */ | 
|---|
| 55 | if (kref_read(kref: &vma->vm->ref)) | 
|---|
| 56 | assert_object_held_shared(obj: vma->obj); | 
|---|
| 57 | } | 
|---|
| 58 |  | 
|---|
| 59 | static struct kmem_cache *slab_vmas; | 
|---|
| 60 |  | 
|---|
| 61 | static struct i915_vma *i915_vma_alloc(void) | 
|---|
| 62 | { | 
|---|
| 63 | return kmem_cache_zalloc(slab_vmas, GFP_KERNEL); | 
|---|
| 64 | } | 
|---|
| 65 |  | 
|---|
| 66 | static void i915_vma_free(struct i915_vma *vma) | 
|---|
| 67 | { | 
|---|
| 68 | return kmem_cache_free(s: slab_vmas, objp: vma); | 
|---|
| 69 | } | 
|---|
| 70 |  | 
|---|
| 71 | #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) | 
|---|
| 72 |  | 
|---|
| 73 | #include <linux/stackdepot.h> | 
|---|
| 74 |  | 
|---|
| 75 | static void vma_print_allocator(struct i915_vma *vma, const char *reason) | 
|---|
| 76 | { | 
|---|
| 77 | char buf[512]; | 
|---|
| 78 |  | 
|---|
| 79 | if (!vma->node.stack) { | 
|---|
| 80 | drm_dbg(vma->obj->base.dev, | 
|---|
| 81 | "vma.node [%08llx + %08llx] %s: unknown owner\n", | 
|---|
| 82 | vma->node.start, vma->node.size, reason); | 
|---|
| 83 | return; | 
|---|
| 84 | } | 
|---|
| 85 |  | 
|---|
| 86 | stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); | 
|---|
| 87 | drm_dbg(vma->obj->base.dev, | 
|---|
| 88 | "vma.node [%08llx + %08llx] %s: inserted at %s\n", | 
|---|
| 89 | vma->node.start, vma->node.size, reason, buf); | 
|---|
| 90 | } | 
|---|
| 91 |  | 
|---|
| 92 | #else | 
|---|
| 93 |  | 
|---|
| 94 | static void vma_print_allocator(struct i915_vma *vma, const char *reason) | 
|---|
| 95 | { | 
|---|
| 96 | } | 
|---|
| 97 |  | 
|---|
| 98 | #endif | 
|---|
| 99 |  | 
|---|
| 100 | static inline struct i915_vma *active_to_vma(struct i915_active *ref) | 
|---|
| 101 | { | 
|---|
| 102 | return container_of(ref, typeof(struct i915_vma), active); | 
|---|
| 103 | } | 
|---|
| 104 |  | 
|---|
| 105 | static int __i915_vma_active(struct i915_active *ref) | 
|---|
| 106 | { | 
|---|
| 107 | struct i915_vma *vma = active_to_vma(ref); | 
|---|
| 108 |  | 
|---|
| 109 | if (!i915_vma_tryget(vma)) | 
|---|
| 110 | return -ENOENT; | 
|---|
| 111 |  | 
|---|
| 112 | /* | 
|---|
| 113 | * Exclude global GTT VMA from holding a GT wakeref | 
|---|
| 114 | * while active, otherwise GPU never goes idle. | 
|---|
| 115 | */ | 
|---|
| 116 | if (!i915_vma_is_ggtt(vma)) { | 
|---|
| 117 | /* | 
|---|
| 118 | * Since we and our _retire() counterpart can be | 
|---|
| 119 | * called asynchronously, storing a wakeref tracking | 
|---|
| 120 | * handle inside struct i915_vma is not safe, and | 
|---|
| 121 | * there is no other good place for that.  Hence, | 
|---|
| 122 | * use untracked variants of intel_gt_pm_get/put(). | 
|---|
| 123 | */ | 
|---|
| 124 | intel_gt_pm_get_untracked(gt: vma->vm->gt); | 
|---|
| 125 | } | 
|---|
| 126 |  | 
|---|
| 127 | return 0; | 
|---|
| 128 | } | 
|---|
| 129 |  | 
|---|
| 130 | static void __i915_vma_retire(struct i915_active *ref) | 
|---|
| 131 | { | 
|---|
| 132 | struct i915_vma *vma = active_to_vma(ref); | 
|---|
| 133 |  | 
|---|
| 134 | if (!i915_vma_is_ggtt(vma)) { | 
|---|
| 135 | /* | 
|---|
| 136 | * Since we can be called from atomic contexts, | 
|---|
| 137 | * use an async variant of intel_gt_pm_put(). | 
|---|
| 138 | */ | 
|---|
| 139 | intel_gt_pm_put_async_untracked(gt: vma->vm->gt); | 
|---|
| 140 | } | 
|---|
| 141 |  | 
|---|
| 142 | i915_vma_put(vma); | 
|---|
| 143 | } | 
|---|
| 144 |  | 
|---|
| 145 | static struct i915_vma * | 
|---|
| 146 | vma_create(struct drm_i915_gem_object *obj, | 
|---|
| 147 | struct i915_address_space *vm, | 
|---|
| 148 | const struct i915_gtt_view *view) | 
|---|
| 149 | { | 
|---|
| 150 | struct i915_vma *pos = ERR_PTR(error: -E2BIG); | 
|---|
| 151 | struct i915_vma *vma; | 
|---|
| 152 | struct rb_node *rb, **p; | 
|---|
| 153 | int err; | 
|---|
| 154 |  | 
|---|
| 155 | /* The aliasing_ppgtt should never be used directly! */ | 
|---|
| 156 | GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); | 
|---|
| 157 |  | 
|---|
| 158 | vma = i915_vma_alloc(); | 
|---|
| 159 | if (vma == NULL) | 
|---|
| 160 | return ERR_PTR(error: -ENOMEM); | 
|---|
| 161 |  | 
|---|
| 162 | vma->ops = &vm->vma_ops; | 
|---|
| 163 | vma->obj = obj; | 
|---|
| 164 | vma->size = obj->base.size; | 
|---|
| 165 | vma->display_alignment = I915_GTT_MIN_ALIGNMENT; | 
|---|
| 166 |  | 
|---|
| 167 | i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0); | 
|---|
| 168 |  | 
|---|
| 169 | /* Declare ourselves safe for use inside shrinkers */ | 
|---|
| 170 | if (IS_ENABLED(CONFIG_LOCKDEP)) { | 
|---|
| 171 | fs_reclaim_acquire(GFP_KERNEL); | 
|---|
| 172 | might_lock(&vma->active.mutex); | 
|---|
| 173 | fs_reclaim_release(GFP_KERNEL); | 
|---|
| 174 | } | 
|---|
| 175 |  | 
|---|
| 176 | INIT_LIST_HEAD(list: &vma->closed_link); | 
|---|
| 177 | INIT_LIST_HEAD(list: &vma->obj_link); | 
|---|
| 178 | RB_CLEAR_NODE(&vma->obj_node); | 
|---|
| 179 |  | 
|---|
| 180 | if (view && view->type != I915_GTT_VIEW_NORMAL) { | 
|---|
| 181 | vma->gtt_view = *view; | 
|---|
| 182 | if (view->type == I915_GTT_VIEW_PARTIAL) { | 
|---|
| 183 | GEM_BUG_ON(range_overflows_t(u64, | 
|---|
| 184 | view->partial.offset, | 
|---|
| 185 | view->partial.size, | 
|---|
| 186 | obj->base.size >> PAGE_SHIFT)); | 
|---|
| 187 | vma->size = view->partial.size; | 
|---|
| 188 | vma->size <<= PAGE_SHIFT; | 
|---|
| 189 | GEM_BUG_ON(vma->size > obj->base.size); | 
|---|
| 190 | } else if (view->type == I915_GTT_VIEW_ROTATED) { | 
|---|
| 191 | vma->size = intel_rotation_info_size(rot_info: &view->rotated); | 
|---|
| 192 | vma->size <<= PAGE_SHIFT; | 
|---|
| 193 | } else if (view->type == I915_GTT_VIEW_REMAPPED) { | 
|---|
| 194 | vma->size = intel_remapped_info_size(rem_info: &view->remapped); | 
|---|
| 195 | vma->size <<= PAGE_SHIFT; | 
|---|
| 196 | } | 
|---|
| 197 | } | 
|---|
| 198 |  | 
|---|
| 199 | if (unlikely(vma->size > vm->total)) | 
|---|
| 200 | goto err_vma; | 
|---|
| 201 |  | 
|---|
| 202 | GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); | 
|---|
| 203 |  | 
|---|
| 204 | err = mutex_lock_interruptible(lock: &vm->mutex); | 
|---|
| 205 | if (err) { | 
|---|
| 206 | pos = ERR_PTR(error: err); | 
|---|
| 207 | goto err_vma; | 
|---|
| 208 | } | 
|---|
| 209 |  | 
|---|
| 210 | vma->vm = vm; | 
|---|
| 211 | list_add_tail(new: &vma->vm_link, head: &vm->unbound_list); | 
|---|
| 212 |  | 
|---|
| 213 | spin_lock(lock: &obj->vma.lock); | 
|---|
| 214 | if (i915_is_ggtt(vm)) { | 
|---|
| 215 | if (unlikely(overflows_type(vma->size, u32))) | 
|---|
| 216 | goto err_unlock; | 
|---|
| 217 |  | 
|---|
| 218 | vma->fence_size = i915_gem_fence_size(i915: vm->i915, size: vma->size, | 
|---|
| 219 | tiling: i915_gem_object_get_tiling(obj), | 
|---|
| 220 | stride: i915_gem_object_get_stride(obj)); | 
|---|
| 221 | if (unlikely(vma->fence_size < vma->size || /* overflow */ | 
|---|
| 222 | vma->fence_size > vm->total)) | 
|---|
| 223 | goto err_unlock; | 
|---|
| 224 |  | 
|---|
| 225 | GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); | 
|---|
| 226 |  | 
|---|
| 227 | vma->fence_alignment = i915_gem_fence_alignment(i915: vm->i915, size: vma->size, | 
|---|
| 228 | tiling: i915_gem_object_get_tiling(obj), | 
|---|
| 229 | stride: i915_gem_object_get_stride(obj)); | 
|---|
| 230 | GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); | 
|---|
| 231 |  | 
|---|
| 232 | __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); | 
|---|
| 233 | } | 
|---|
| 234 |  | 
|---|
| 235 | rb = NULL; | 
|---|
| 236 | p = &obj->vma.tree.rb_node; | 
|---|
| 237 | while (*p) { | 
|---|
| 238 | long cmp; | 
|---|
| 239 |  | 
|---|
| 240 | rb = *p; | 
|---|
| 241 | pos = rb_entry(rb, struct i915_vma, obj_node); | 
|---|
| 242 |  | 
|---|
| 243 | /* | 
|---|
| 244 | * If the view already exists in the tree, another thread | 
|---|
| 245 | * already created a matching vma, so return the older instance | 
|---|
| 246 | * and dispose of ours. | 
|---|
| 247 | */ | 
|---|
| 248 | cmp = i915_vma_compare(vma: pos, vm, view); | 
|---|
| 249 | if (cmp < 0) | 
|---|
| 250 | p = &rb->rb_right; | 
|---|
| 251 | else if (cmp > 0) | 
|---|
| 252 | p = &rb->rb_left; | 
|---|
| 253 | else | 
|---|
| 254 | goto err_unlock; | 
|---|
| 255 | } | 
|---|
| 256 | rb_link_node(node: &vma->obj_node, parent: rb, rb_link: p); | 
|---|
| 257 | rb_insert_color(&vma->obj_node, &obj->vma.tree); | 
|---|
| 258 |  | 
|---|
| 259 | if (i915_vma_is_ggtt(vma)) | 
|---|
| 260 | /* | 
|---|
| 261 | * We put the GGTT vma at the start of the vma-list, followed | 
|---|
| 262 | * by the ppGGTT vma. This allows us to break early when | 
|---|
| 263 | * iterating over only the GGTT vma for an object, see | 
|---|
| 264 | * for_each_ggtt_vma() | 
|---|
| 265 | */ | 
|---|
| 266 | list_add(new: &vma->obj_link, head: &obj->vma.list); | 
|---|
| 267 | else | 
|---|
| 268 | list_add_tail(new: &vma->obj_link, head: &obj->vma.list); | 
|---|
| 269 |  | 
|---|
| 270 | spin_unlock(lock: &obj->vma.lock); | 
|---|
| 271 | mutex_unlock(lock: &vm->mutex); | 
|---|
| 272 |  | 
|---|
| 273 | return vma; | 
|---|
| 274 |  | 
|---|
| 275 | err_unlock: | 
|---|
| 276 | spin_unlock(lock: &obj->vma.lock); | 
|---|
| 277 | list_del_init(entry: &vma->vm_link); | 
|---|
| 278 | mutex_unlock(lock: &vm->mutex); | 
|---|
| 279 | err_vma: | 
|---|
| 280 | i915_vma_free(vma); | 
|---|
| 281 | return pos; | 
|---|
| 282 | } | 
|---|
| 283 |  | 
|---|
| 284 | static struct i915_vma * | 
|---|
| 285 | i915_vma_lookup(struct drm_i915_gem_object *obj, | 
|---|
| 286 | struct i915_address_space *vm, | 
|---|
| 287 | const struct i915_gtt_view *view) | 
|---|
| 288 | { | 
|---|
| 289 | struct rb_node *rb; | 
|---|
| 290 |  | 
|---|
| 291 | rb = obj->vma.tree.rb_node; | 
|---|
| 292 | while (rb) { | 
|---|
| 293 | struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); | 
|---|
| 294 | long cmp; | 
|---|
| 295 |  | 
|---|
| 296 | cmp = i915_vma_compare(vma, vm, view); | 
|---|
| 297 | if (cmp == 0) | 
|---|
| 298 | return vma; | 
|---|
| 299 |  | 
|---|
| 300 | if (cmp < 0) | 
|---|
| 301 | rb = rb->rb_right; | 
|---|
| 302 | else | 
|---|
| 303 | rb = rb->rb_left; | 
|---|
| 304 | } | 
|---|
| 305 |  | 
|---|
| 306 | return NULL; | 
|---|
| 307 | } | 
|---|
| 308 |  | 
|---|
| 309 | /** | 
|---|
| 310 | * i915_vma_instance - return the singleton instance of the VMA | 
|---|
| 311 | * @obj: parent &struct drm_i915_gem_object to be mapped | 
|---|
| 312 | * @vm: address space in which the mapping is located | 
|---|
| 313 | * @view: additional mapping requirements | 
|---|
| 314 | * | 
|---|
| 315 | * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with | 
|---|
| 316 | * the same @view characteristics. If a match is not found, one is created. | 
|---|
| 317 | * Once created, the VMA is kept until either the object is freed, or the | 
|---|
| 318 | * address space is closed. | 
|---|
| 319 | * | 
|---|
| 320 | * Returns the vma, or an error pointer. | 
|---|
| 321 | */ | 
|---|
| 322 | struct i915_vma * | 
|---|
| 323 | i915_vma_instance(struct drm_i915_gem_object *obj, | 
|---|
| 324 | struct i915_address_space *vm, | 
|---|
| 325 | const struct i915_gtt_view *view) | 
|---|
| 326 | { | 
|---|
| 327 | struct i915_vma *vma; | 
|---|
| 328 |  | 
|---|
| 329 | GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm)); | 
|---|
| 330 | GEM_BUG_ON(!kref_read(&vm->ref)); | 
|---|
| 331 |  | 
|---|
| 332 | spin_lock(lock: &obj->vma.lock); | 
|---|
| 333 | vma = i915_vma_lookup(obj, vm, view); | 
|---|
| 334 | spin_unlock(lock: &obj->vma.lock); | 
|---|
| 335 |  | 
|---|
| 336 | /* vma_create() will resolve the race if another creates the vma */ | 
|---|
| 337 | if (unlikely(!vma)) | 
|---|
| 338 | vma = vma_create(obj, vm, view); | 
|---|
| 339 |  | 
|---|
| 340 | GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); | 
|---|
| 341 | return vma; | 
|---|
| 342 | } | 
|---|
| 343 |  | 
|---|
| 344 | struct i915_vma_work { | 
|---|
| 345 | struct dma_fence_work base; | 
|---|
| 346 | struct i915_address_space *vm; | 
|---|
| 347 | struct i915_vm_pt_stash stash; | 
|---|
| 348 | struct i915_vma_resource *vma_res; | 
|---|
| 349 | struct drm_i915_gem_object *obj; | 
|---|
| 350 | struct i915_sw_dma_fence_cb cb; | 
|---|
| 351 | unsigned int pat_index; | 
|---|
| 352 | unsigned int flags; | 
|---|
| 353 | }; | 
|---|
| 354 |  | 
|---|
| 355 | static void __vma_bind(struct dma_fence_work *work) | 
|---|
| 356 | { | 
|---|
| 357 | struct i915_vma_work *vw = container_of(work, typeof(*vw), base); | 
|---|
| 358 | struct i915_vma_resource *vma_res = vw->vma_res; | 
|---|
| 359 |  | 
|---|
| 360 | /* | 
|---|
| 361 | * We are about the bind the object, which must mean we have already | 
|---|
| 362 | * signaled the work to potentially clear/move the pages underneath. If | 
|---|
| 363 | * something went wrong at that stage then the object should have | 
|---|
| 364 | * unknown_state set, in which case we need to skip the bind. | 
|---|
| 365 | */ | 
|---|
| 366 | if (i915_gem_object_has_unknown_state(obj: vw->obj)) | 
|---|
| 367 | return; | 
|---|
| 368 |  | 
|---|
| 369 | vma_res->ops->bind_vma(vma_res->vm, &vw->stash, | 
|---|
| 370 | vma_res, vw->pat_index, vw->flags); | 
|---|
| 371 | } | 
|---|
| 372 |  | 
|---|
| 373 | static void __vma_release(struct dma_fence_work *work) | 
|---|
| 374 | { | 
|---|
| 375 | struct i915_vma_work *vw = container_of(work, typeof(*vw), base); | 
|---|
| 376 |  | 
|---|
| 377 | if (vw->obj) | 
|---|
| 378 | i915_gem_object_put(obj: vw->obj); | 
|---|
| 379 |  | 
|---|
| 380 | i915_vm_free_pt_stash(vm: vw->vm, stash: &vw->stash); | 
|---|
| 381 | if (vw->vma_res) | 
|---|
| 382 | i915_vma_resource_put(vma_res: vw->vma_res); | 
|---|
| 383 | } | 
|---|
| 384 |  | 
|---|
| 385 | static const struct dma_fence_work_ops bind_ops = { | 
|---|
| 386 | .name = "bind", | 
|---|
| 387 | .work = __vma_bind, | 
|---|
| 388 | .release = __vma_release, | 
|---|
| 389 | }; | 
|---|
| 390 |  | 
|---|
| 391 | struct i915_vma_work *i915_vma_work(void) | 
|---|
| 392 | { | 
|---|
| 393 | struct i915_vma_work *vw; | 
|---|
| 394 |  | 
|---|
| 395 | vw = kzalloc(sizeof(*vw), GFP_KERNEL); | 
|---|
| 396 | if (!vw) | 
|---|
| 397 | return NULL; | 
|---|
| 398 |  | 
|---|
| 399 | dma_fence_work_init(f: &vw->base, ops: &bind_ops); | 
|---|
| 400 | vw->base.dma.error = -EAGAIN; /* disable the worker by default */ | 
|---|
| 401 |  | 
|---|
| 402 | return vw; | 
|---|
| 403 | } | 
|---|
| 404 |  | 
|---|
| 405 | int i915_vma_wait_for_bind(struct i915_vma *vma) | 
|---|
| 406 | { | 
|---|
| 407 | int err = 0; | 
|---|
| 408 |  | 
|---|
| 409 | if (rcu_access_pointer(vma->active.excl.fence)) { | 
|---|
| 410 | struct dma_fence *fence; | 
|---|
| 411 |  | 
|---|
| 412 | rcu_read_lock(); | 
|---|
| 413 | fence = dma_fence_get_rcu_safe(fencep: &vma->active.excl.fence); | 
|---|
| 414 | rcu_read_unlock(); | 
|---|
| 415 | if (fence) { | 
|---|
| 416 | err = dma_fence_wait(fence, intr: true); | 
|---|
| 417 | dma_fence_put(fence); | 
|---|
| 418 | } | 
|---|
| 419 | } | 
|---|
| 420 |  | 
|---|
| 421 | return err; | 
|---|
| 422 | } | 
|---|
| 423 |  | 
|---|
| 424 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) | 
|---|
| 425 | static int i915_vma_verify_bind_complete(struct i915_vma *vma) | 
|---|
| 426 | { | 
|---|
| 427 | struct dma_fence *fence = i915_active_fence_get(&vma->active.excl); | 
|---|
| 428 | int err; | 
|---|
| 429 |  | 
|---|
| 430 | if (!fence) | 
|---|
| 431 | return 0; | 
|---|
| 432 |  | 
|---|
| 433 | if (dma_fence_is_signaled(fence)) | 
|---|
| 434 | err = fence->error; | 
|---|
| 435 | else | 
|---|
| 436 | err = -EBUSY; | 
|---|
| 437 |  | 
|---|
| 438 | dma_fence_put(fence); | 
|---|
| 439 |  | 
|---|
| 440 | return err; | 
|---|
| 441 | } | 
|---|
| 442 | #else | 
|---|
| 443 | #define i915_vma_verify_bind_complete(_vma) 0 | 
|---|
| 444 | #endif | 
|---|
| 445 |  | 
|---|
| 446 | I915_SELFTEST_EXPORT void | 
|---|
| 447 | i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res, | 
|---|
| 448 | struct i915_vma *vma) | 
|---|
| 449 | { | 
|---|
| 450 | struct drm_i915_gem_object *obj = vma->obj; | 
|---|
| 451 |  | 
|---|
| 452 | i915_vma_resource_init(vma_res, vm: vma->vm, pages: vma->pages, page_sizes: &vma->page_sizes, | 
|---|
| 453 | pages_rsgt: obj->mm.rsgt, readonly: i915_gem_object_is_readonly(obj), | 
|---|
| 454 | lmem: i915_gem_object_is_lmem(obj), mr: obj->mm.region, | 
|---|
| 455 | ops: vma->ops, private: vma->private, start: __i915_vma_offset(vma), | 
|---|
| 456 | node_size: __i915_vma_size(vma), size: vma->size, guard: vma->guard); | 
|---|
| 457 | } | 
|---|
| 458 |  | 
|---|
| 459 | /** | 
|---|
| 460 | * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. | 
|---|
| 461 | * @vma: VMA to map | 
|---|
| 462 | * @pat_index: PAT index to set in PTE | 
|---|
| 463 | * @flags: flags like global or local mapping | 
|---|
| 464 | * @work: preallocated worker for allocating and binding the PTE | 
|---|
| 465 | * @vma_res: pointer to a preallocated vma resource. The resource is either | 
|---|
| 466 | * consumed or freed. | 
|---|
| 467 | * | 
|---|
| 468 | * DMA addresses are taken from the scatter-gather table of this object (or of | 
|---|
| 469 | * this VMA in case of non-default GGTT views) and PTE entries set up. | 
|---|
| 470 | * Note that DMA addresses are also the only part of the SG table we care about. | 
|---|
| 471 | */ | 
|---|
| 472 | int i915_vma_bind(struct i915_vma *vma, | 
|---|
| 473 | unsigned int pat_index, | 
|---|
| 474 | u32 flags, | 
|---|
| 475 | struct i915_vma_work *work, | 
|---|
| 476 | struct i915_vma_resource *vma_res) | 
|---|
| 477 | { | 
|---|
| 478 | u32 bind_flags; | 
|---|
| 479 | u32 vma_flags; | 
|---|
| 480 | int ret; | 
|---|
| 481 |  | 
|---|
| 482 | lockdep_assert_held(&vma->vm->mutex); | 
|---|
| 483 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | 
|---|
| 484 | GEM_BUG_ON(vma->size > i915_vma_size(vma)); | 
|---|
| 485 |  | 
|---|
| 486 | if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, | 
|---|
| 487 | vma->node.size, | 
|---|
| 488 | vma->vm->total))) { | 
|---|
| 489 | i915_vma_resource_free(vma_res); | 
|---|
| 490 | return -ENODEV; | 
|---|
| 491 | } | 
|---|
| 492 |  | 
|---|
| 493 | if (GEM_DEBUG_WARN_ON(!flags)) { | 
|---|
| 494 | i915_vma_resource_free(vma_res); | 
|---|
| 495 | return -EINVAL; | 
|---|
| 496 | } | 
|---|
| 497 |  | 
|---|
| 498 | bind_flags = flags; | 
|---|
| 499 | bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; | 
|---|
| 500 |  | 
|---|
| 501 | vma_flags = atomic_read(v: &vma->flags); | 
|---|
| 502 | vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; | 
|---|
| 503 |  | 
|---|
| 504 | bind_flags &= ~vma_flags; | 
|---|
| 505 | if (bind_flags == 0) { | 
|---|
| 506 | i915_vma_resource_free(vma_res); | 
|---|
| 507 | return 0; | 
|---|
| 508 | } | 
|---|
| 509 |  | 
|---|
| 510 | GEM_BUG_ON(!atomic_read(&vma->pages_count)); | 
|---|
| 511 |  | 
|---|
| 512 | /* Wait for or await async unbinds touching our range */ | 
|---|
| 513 | if (work && bind_flags & vma->vm->bind_async_flags) | 
|---|
| 514 | ret = i915_vma_resource_bind_dep_await(vm: vma->vm, | 
|---|
| 515 | sw_fence: &work->base.chain, | 
|---|
| 516 | first: vma->node.start, | 
|---|
| 517 | last: vma->node.size, | 
|---|
| 518 | intr: true, | 
|---|
| 519 | GFP_NOWAIT | | 
|---|
| 520 | __GFP_RETRY_MAYFAIL | | 
|---|
| 521 | __GFP_NOWARN); | 
|---|
| 522 | else | 
|---|
| 523 | ret = i915_vma_resource_bind_dep_sync(vm: vma->vm, first: vma->node.start, | 
|---|
| 524 | last: vma->node.size, intr: true); | 
|---|
| 525 | if (ret) { | 
|---|
| 526 | i915_vma_resource_free(vma_res); | 
|---|
| 527 | return ret; | 
|---|
| 528 | } | 
|---|
| 529 |  | 
|---|
| 530 | if (vma->resource || !vma_res) { | 
|---|
| 531 | /* Rebinding with an additional I915_VMA_*_BIND */ | 
|---|
| 532 | GEM_WARN_ON(!vma_flags); | 
|---|
| 533 | i915_vma_resource_free(vma_res); | 
|---|
| 534 | } else { | 
|---|
| 535 | i915_vma_resource_init_from_vma(vma_res, vma); | 
|---|
| 536 | vma->resource = vma_res; | 
|---|
| 537 | } | 
|---|
| 538 | trace_i915_vma_bind(vma, flags: bind_flags); | 
|---|
| 539 | if (work && bind_flags & vma->vm->bind_async_flags) { | 
|---|
| 540 | struct dma_fence *prev; | 
|---|
| 541 |  | 
|---|
| 542 | work->vma_res = i915_vma_resource_get(vma_res: vma->resource); | 
|---|
| 543 | work->pat_index = pat_index; | 
|---|
| 544 | work->flags = bind_flags; | 
|---|
| 545 |  | 
|---|
| 546 | /* | 
|---|
| 547 | * Note we only want to chain up to the migration fence on | 
|---|
| 548 | * the pages (not the object itself). As we don't track that, | 
|---|
| 549 | * yet, we have to use the exclusive fence instead. | 
|---|
| 550 | * | 
|---|
| 551 | * Also note that we do not want to track the async vma as | 
|---|
| 552 | * part of the obj->resv->excl_fence as it only affects | 
|---|
| 553 | * execution and not content or object's backing store lifetime. | 
|---|
| 554 | */ | 
|---|
| 555 | prev = i915_active_set_exclusive(ref: &vma->active, f: &work->base.dma); | 
|---|
| 556 | if (prev) { | 
|---|
| 557 | __i915_sw_fence_await_dma_fence(fence: &work->base.chain, | 
|---|
| 558 | dma: prev, | 
|---|
| 559 | cb: &work->cb); | 
|---|
| 560 | dma_fence_put(fence: prev); | 
|---|
| 561 | } | 
|---|
| 562 |  | 
|---|
| 563 | work->base.dma.error = 0; /* enable the queue_work() */ | 
|---|
| 564 | work->obj = i915_gem_object_get(obj: vma->obj); | 
|---|
| 565 | } else { | 
|---|
| 566 | ret = i915_gem_object_wait_moving_fence(obj: vma->obj, intr: true); | 
|---|
| 567 | if (ret) { | 
|---|
| 568 | i915_vma_resource_free(vma_res: vma->resource); | 
|---|
| 569 | vma->resource = NULL; | 
|---|
| 570 |  | 
|---|
| 571 | return ret; | 
|---|
| 572 | } | 
|---|
| 573 | vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index, | 
|---|
| 574 | bind_flags); | 
|---|
| 575 | } | 
|---|
| 576 |  | 
|---|
| 577 | atomic_or(i: bind_flags, v: &vma->flags); | 
|---|
| 578 | return 0; | 
|---|
| 579 | } | 
|---|
| 580 |  | 
|---|
| 581 | void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) | 
|---|
| 582 | { | 
|---|
| 583 | void __iomem *ptr; | 
|---|
| 584 | int err; | 
|---|
| 585 |  | 
|---|
| 586 | if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY)) | 
|---|
| 587 | return IOMEM_ERR_PTR(-EINVAL); | 
|---|
| 588 |  | 
|---|
| 589 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); | 
|---|
| 590 | GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); | 
|---|
| 591 | GEM_BUG_ON(i915_vma_verify_bind_complete(vma)); | 
|---|
| 592 |  | 
|---|
| 593 | ptr = READ_ONCE(vma->iomap); | 
|---|
| 594 | if (ptr == NULL) { | 
|---|
| 595 | /* | 
|---|
| 596 | * TODO: consider just using i915_gem_object_pin_map() for lmem | 
|---|
| 597 | * instead, which already supports mapping non-contiguous chunks | 
|---|
| 598 | * of pages, that way we can also drop the | 
|---|
| 599 | * I915_BO_ALLOC_CONTIGUOUS when allocating the object. | 
|---|
| 600 | */ | 
|---|
| 601 | if (i915_gem_object_is_lmem(obj: vma->obj)) { | 
|---|
| 602 | ptr = i915_gem_object_lmem_io_map(obj: vma->obj, n: 0, | 
|---|
| 603 | size: vma->obj->base.size); | 
|---|
| 604 | } else if (i915_vma_is_map_and_fenceable(vma)) { | 
|---|
| 605 | ptr = io_mapping_map_wc(mapping: &i915_vm_to_ggtt(vm: vma->vm)->iomap, | 
|---|
| 606 | offset: i915_vma_offset(vma), | 
|---|
| 607 | size: i915_vma_size(vma)); | 
|---|
| 608 | } else { | 
|---|
| 609 | ptr = (void __iomem *) | 
|---|
| 610 | i915_gem_object_pin_map(obj: vma->obj, type: I915_MAP_WC); | 
|---|
| 611 | if (IS_ERR(ptr)) { | 
|---|
| 612 | err = PTR_ERR(ptr); | 
|---|
| 613 | goto err; | 
|---|
| 614 | } | 
|---|
| 615 | ptr = page_pack_bits(ptr, 1); | 
|---|
| 616 | } | 
|---|
| 617 |  | 
|---|
| 618 | if (ptr == NULL) { | 
|---|
| 619 | err = -ENOMEM; | 
|---|
| 620 | goto err; | 
|---|
| 621 | } | 
|---|
| 622 |  | 
|---|
| 623 | if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { | 
|---|
| 624 | if (page_unmask_bits(ptr)) | 
|---|
| 625 | __i915_gem_object_release_map(obj: vma->obj); | 
|---|
| 626 | else | 
|---|
| 627 | io_mapping_unmap(vaddr: ptr); | 
|---|
| 628 | ptr = vma->iomap; | 
|---|
| 629 | } | 
|---|
| 630 | } | 
|---|
| 631 |  | 
|---|
| 632 | __i915_vma_pin(vma); | 
|---|
| 633 |  | 
|---|
| 634 | err = i915_vma_pin_fence(vma); | 
|---|
| 635 | if (err) | 
|---|
| 636 | goto err_unpin; | 
|---|
| 637 |  | 
|---|
| 638 | i915_vma_set_ggtt_write(vma); | 
|---|
| 639 |  | 
|---|
| 640 | /* NB Access through the GTT requires the device to be awake. */ | 
|---|
| 641 | return page_mask_bits(ptr); | 
|---|
| 642 |  | 
|---|
| 643 | err_unpin: | 
|---|
| 644 | __i915_vma_unpin(vma); | 
|---|
| 645 | err: | 
|---|
| 646 | return IOMEM_ERR_PTR(err); | 
|---|
| 647 | } | 
|---|
| 648 |  | 
|---|
| 649 | void i915_vma_flush_writes(struct i915_vma *vma) | 
|---|
| 650 | { | 
|---|
| 651 | if (i915_vma_unset_ggtt_write(vma)) | 
|---|
| 652 | intel_gt_flush_ggtt_writes(gt: vma->vm->gt); | 
|---|
| 653 | } | 
|---|
| 654 |  | 
|---|
| 655 | void i915_vma_unpin_iomap(struct i915_vma *vma) | 
|---|
| 656 | { | 
|---|
| 657 | GEM_BUG_ON(vma->iomap == NULL); | 
|---|
| 658 |  | 
|---|
| 659 | /* XXX We keep the mapping until __i915_vma_unbind()/evict() */ | 
|---|
| 660 |  | 
|---|
| 661 | i915_vma_flush_writes(vma); | 
|---|
| 662 |  | 
|---|
| 663 | i915_vma_unpin_fence(vma); | 
|---|
| 664 | i915_vma_unpin(vma); | 
|---|
| 665 | } | 
|---|
| 666 |  | 
|---|
| 667 | void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) | 
|---|
| 668 | { | 
|---|
| 669 | struct i915_vma *vma; | 
|---|
| 670 | struct drm_i915_gem_object *obj; | 
|---|
| 671 |  | 
|---|
| 672 | vma = fetch_and_zero(p_vma); | 
|---|
| 673 | if (!vma) | 
|---|
| 674 | return; | 
|---|
| 675 |  | 
|---|
| 676 | obj = vma->obj; | 
|---|
| 677 | GEM_BUG_ON(!obj); | 
|---|
| 678 |  | 
|---|
| 679 | i915_vma_unpin(vma); | 
|---|
| 680 |  | 
|---|
| 681 | if (flags & I915_VMA_RELEASE_MAP) | 
|---|
| 682 | i915_gem_object_unpin_map(obj); | 
|---|
| 683 |  | 
|---|
| 684 | i915_gem_object_put(obj); | 
|---|
| 685 | } | 
|---|
| 686 |  | 
|---|
| 687 | bool i915_vma_misplaced(const struct i915_vma *vma, | 
|---|
| 688 | u64 size, u64 alignment, u64 flags) | 
|---|
| 689 | { | 
|---|
| 690 | if (!drm_mm_node_allocated(node: &vma->node)) | 
|---|
| 691 | return false; | 
|---|
| 692 |  | 
|---|
| 693 | if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) | 
|---|
| 694 | return true; | 
|---|
| 695 |  | 
|---|
| 696 | if (i915_vma_size(vma) < size) | 
|---|
| 697 | return true; | 
|---|
| 698 |  | 
|---|
| 699 | GEM_BUG_ON(alignment && !is_power_of_2(alignment)); | 
|---|
| 700 | if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment)) | 
|---|
| 701 | return true; | 
|---|
| 702 |  | 
|---|
| 703 | if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) | 
|---|
| 704 | return true; | 
|---|
| 705 |  | 
|---|
| 706 | if (flags & PIN_OFFSET_BIAS && | 
|---|
| 707 | i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK)) | 
|---|
| 708 | return true; | 
|---|
| 709 |  | 
|---|
| 710 | if (flags & PIN_OFFSET_FIXED && | 
|---|
| 711 | i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK)) | 
|---|
| 712 | return true; | 
|---|
| 713 |  | 
|---|
| 714 | if (flags & PIN_OFFSET_GUARD && | 
|---|
| 715 | vma->guard < (flags & PIN_OFFSET_MASK)) | 
|---|
| 716 | return true; | 
|---|
| 717 |  | 
|---|
| 718 | return false; | 
|---|
| 719 | } | 
|---|
| 720 |  | 
|---|
| 721 | void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) | 
|---|
| 722 | { | 
|---|
| 723 | bool mappable, fenceable; | 
|---|
| 724 |  | 
|---|
| 725 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); | 
|---|
| 726 | GEM_BUG_ON(!vma->fence_size); | 
|---|
| 727 |  | 
|---|
| 728 | fenceable = (i915_vma_size(vma) >= vma->fence_size && | 
|---|
| 729 | IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment)); | 
|---|
| 730 |  | 
|---|
| 731 | mappable = i915_ggtt_offset(vma) + vma->fence_size <= | 
|---|
| 732 | i915_vm_to_ggtt(vm: vma->vm)->mappable_end; | 
|---|
| 733 |  | 
|---|
| 734 | if (mappable && fenceable) | 
|---|
| 735 | set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); | 
|---|
| 736 | else | 
|---|
| 737 | clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); | 
|---|
| 738 | } | 
|---|
| 739 |  | 
|---|
| 740 | bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) | 
|---|
| 741 | { | 
|---|
| 742 | struct drm_mm_node *node = &vma->node; | 
|---|
| 743 | struct drm_mm_node *other; | 
|---|
| 744 |  | 
|---|
| 745 | /* | 
|---|
| 746 | * On some machines we have to be careful when putting differing types | 
|---|
| 747 | * of snoopable memory together to avoid the prefetcher crossing memory | 
|---|
| 748 | * domains and dying. During vm initialisation, we decide whether or not | 
|---|
| 749 | * these constraints apply and set the drm_mm.color_adjust | 
|---|
| 750 | * appropriately. | 
|---|
| 751 | */ | 
|---|
| 752 | if (!i915_vm_has_cache_coloring(vm: vma->vm)) | 
|---|
| 753 | return true; | 
|---|
| 754 |  | 
|---|
| 755 | /* Only valid to be called on an already inserted vma */ | 
|---|
| 756 | GEM_BUG_ON(!drm_mm_node_allocated(node)); | 
|---|
| 757 | GEM_BUG_ON(list_empty(&node->node_list)); | 
|---|
| 758 |  | 
|---|
| 759 | other = list_prev_entry(node, node_list); | 
|---|
| 760 | if (i915_node_color_differs(node: other, color) && | 
|---|
| 761 | !drm_mm_hole_follows(node: other)) | 
|---|
| 762 | return false; | 
|---|
| 763 |  | 
|---|
| 764 | other = list_next_entry(node, node_list); | 
|---|
| 765 | if (i915_node_color_differs(node: other, color) && | 
|---|
| 766 | !drm_mm_hole_follows(node)) | 
|---|
| 767 | return false; | 
|---|
| 768 |  | 
|---|
| 769 | return true; | 
|---|
| 770 | } | 
|---|
| 771 |  | 
|---|
| 772 | /** | 
|---|
| 773 | * i915_vma_insert - finds a slot for the vma in its address space | 
|---|
| 774 | * @vma: the vma | 
|---|
| 775 | * @ww: An optional struct i915_gem_ww_ctx | 
|---|
| 776 | * @size: requested size in bytes (can be larger than the VMA) | 
|---|
| 777 | * @alignment: required alignment | 
|---|
| 778 | * @flags: mask of PIN_* flags to use | 
|---|
| 779 | * | 
|---|
| 780 | * First we try to allocate some free space that meets the requirements for | 
|---|
| 781 | * the VMA. Failing that, if the flags permit, it will evict an old VMA, | 
|---|
| 782 | * preferably the oldest idle entry to make room for the new VMA. | 
|---|
| 783 | * | 
|---|
| 784 | * Returns: | 
|---|
| 785 | * 0 on success, negative error code otherwise. | 
|---|
| 786 | */ | 
|---|
| 787 | static int | 
|---|
| 788 | i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, | 
|---|
| 789 | u64 size, u64 alignment, u64 flags) | 
|---|
| 790 | { | 
|---|
| 791 | unsigned long color, guard; | 
|---|
| 792 | u64 start, end; | 
|---|
| 793 | int ret; | 
|---|
| 794 |  | 
|---|
| 795 | GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); | 
|---|
| 796 | GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); | 
|---|
| 797 | GEM_BUG_ON(hweight64(flags & (PIN_OFFSET_GUARD | PIN_OFFSET_FIXED | PIN_OFFSET_BIAS)) > 1); | 
|---|
| 798 |  | 
|---|
| 799 | size = max(size, vma->size); | 
|---|
| 800 | alignment = max_t(typeof(alignment), alignment, vma->display_alignment); | 
|---|
| 801 | if (flags & PIN_MAPPABLE) { | 
|---|
| 802 | size = max_t(typeof(size), size, vma->fence_size); | 
|---|
| 803 | alignment = max_t(typeof(alignment), | 
|---|
| 804 | alignment, vma->fence_alignment); | 
|---|
| 805 | } | 
|---|
| 806 |  | 
|---|
| 807 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); | 
|---|
| 808 | GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); | 
|---|
| 809 | GEM_BUG_ON(!is_power_of_2(alignment)); | 
|---|
| 810 |  | 
|---|
| 811 | guard = vma->guard; /* retain guard across rebinds */ | 
|---|
| 812 | if (flags & PIN_OFFSET_GUARD) { | 
|---|
| 813 | GEM_BUG_ON(overflows_type(flags & PIN_OFFSET_MASK, u32)); | 
|---|
| 814 | guard = max_t(u32, guard, flags & PIN_OFFSET_MASK); | 
|---|
| 815 | } | 
|---|
| 816 | /* | 
|---|
| 817 | * As we align the node upon insertion, but the hardware gets | 
|---|
| 818 | * node.start + guard, the easiest way to make that work is | 
|---|
| 819 | * to make the guard a multiple of the alignment size. | 
|---|
| 820 | */ | 
|---|
| 821 | guard = ALIGN(guard, alignment); | 
|---|
| 822 |  | 
|---|
| 823 | start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; | 
|---|
| 824 | GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); | 
|---|
| 825 |  | 
|---|
| 826 | end = vma->vm->total; | 
|---|
| 827 | if (flags & PIN_MAPPABLE) | 
|---|
| 828 | end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); | 
|---|
| 829 | if (flags & PIN_ZONE_4G) | 
|---|
| 830 | end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); | 
|---|
| 831 | GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); | 
|---|
| 832 |  | 
|---|
| 833 | alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj)); | 
|---|
| 834 |  | 
|---|
| 835 | /* | 
|---|
| 836 | * If binding the object/GGTT view requires more space than the entire | 
|---|
| 837 | * aperture has, reject it early before evicting everything in a vain | 
|---|
| 838 | * attempt to find space. | 
|---|
| 839 | */ | 
|---|
| 840 | if (size > end - 2 * guard) { | 
|---|
| 841 | drm_dbg(vma->obj->base.dev, | 
|---|
| 842 | "Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", | 
|---|
| 843 | size, flags & PIN_MAPPABLE ? "mappable": "total", end); | 
|---|
| 844 | return -ENOSPC; | 
|---|
| 845 | } | 
|---|
| 846 |  | 
|---|
| 847 | color = 0; | 
|---|
| 848 |  | 
|---|
| 849 | if (i915_vm_has_cache_coloring(vm: vma->vm)) | 
|---|
| 850 | color = vma->obj->pat_index; | 
|---|
| 851 |  | 
|---|
| 852 | if (flags & PIN_OFFSET_FIXED) { | 
|---|
| 853 | u64 offset = flags & PIN_OFFSET_MASK; | 
|---|
| 854 | if (!IS_ALIGNED(offset, alignment) || | 
|---|
| 855 | range_overflows(offset, size, end)) | 
|---|
| 856 | return -EINVAL; | 
|---|
| 857 | /* | 
|---|
| 858 | * The caller knows not of the guard added by others and | 
|---|
| 859 | * requests for the offset of the start of its buffer | 
|---|
| 860 | * to be fixed, which may not be the same as the position | 
|---|
| 861 | * of the vma->node due to the guard pages. | 
|---|
| 862 | */ | 
|---|
| 863 | if (offset < guard || offset + size > end - guard) | 
|---|
| 864 | return -ENOSPC; | 
|---|
| 865 |  | 
|---|
| 866 | ret = i915_gem_gtt_reserve(vm: vma->vm, ww, node: &vma->node, | 
|---|
| 867 | size: size + 2 * guard, | 
|---|
| 868 | offset: offset - guard, | 
|---|
| 869 | color, flags); | 
|---|
| 870 | if (ret) | 
|---|
| 871 | return ret; | 
|---|
| 872 | } else { | 
|---|
| 873 | size += 2 * guard; | 
|---|
| 874 | /* | 
|---|
| 875 | * We only support huge gtt pages through the 48b PPGTT, | 
|---|
| 876 | * however we also don't want to force any alignment for | 
|---|
| 877 | * objects which need to be tightly packed into the low 32bits. | 
|---|
| 878 | * | 
|---|
| 879 | * Note that we assume that GGTT are limited to 4GiB for the | 
|---|
| 880 | * foreseeable future. See also i915_ggtt_offset(). | 
|---|
| 881 | */ | 
|---|
| 882 | if (upper_32_bits(end - 1) && | 
|---|
| 883 | vma->page_sizes.sg > I915_GTT_PAGE_SIZE && | 
|---|
| 884 | !HAS_64K_PAGES(vma->vm->i915)) { | 
|---|
| 885 | /* | 
|---|
| 886 | * We can't mix 64K and 4K PTEs in the same page-table | 
|---|
| 887 | * (2M block), and so to avoid the ugliness and | 
|---|
| 888 | * complexity of coloring we opt for just aligning 64K | 
|---|
| 889 | * objects to 2M. | 
|---|
| 890 | */ | 
|---|
| 891 | u64 page_alignment = | 
|---|
| 892 | rounddown_pow_of_two(vma->page_sizes.sg | | 
|---|
| 893 | I915_GTT_PAGE_SIZE_2M); | 
|---|
| 894 |  | 
|---|
| 895 | /* | 
|---|
| 896 | * Check we don't expand for the limited Global GTT | 
|---|
| 897 | * (mappable aperture is even more precious!). This | 
|---|
| 898 | * also checks that we exclude the aliasing-ppgtt. | 
|---|
| 899 | */ | 
|---|
| 900 | GEM_BUG_ON(i915_vma_is_ggtt(vma)); | 
|---|
| 901 |  | 
|---|
| 902 | alignment = max(alignment, page_alignment); | 
|---|
| 903 |  | 
|---|
| 904 | if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) | 
|---|
| 905 | size = round_up(size, I915_GTT_PAGE_SIZE_2M); | 
|---|
| 906 | } | 
|---|
| 907 |  | 
|---|
| 908 | ret = i915_gem_gtt_insert(vm: vma->vm, ww, node: &vma->node, | 
|---|
| 909 | size, alignment, color, | 
|---|
| 910 | start, end, flags); | 
|---|
| 911 | if (ret) | 
|---|
| 912 | return ret; | 
|---|
| 913 |  | 
|---|
| 914 | GEM_BUG_ON(vma->node.start < start); | 
|---|
| 915 | GEM_BUG_ON(vma->node.start + vma->node.size > end); | 
|---|
| 916 | } | 
|---|
| 917 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | 
|---|
| 918 | GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); | 
|---|
| 919 |  | 
|---|
| 920 | list_move_tail(list: &vma->vm_link, head: &vma->vm->bound_list); | 
|---|
| 921 | vma->guard = guard; | 
|---|
| 922 |  | 
|---|
| 923 | return 0; | 
|---|
| 924 | } | 
|---|
| 925 |  | 
|---|
| 926 | static void | 
|---|
| 927 | i915_vma_detach(struct i915_vma *vma) | 
|---|
| 928 | { | 
|---|
| 929 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | 
|---|
| 930 | GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); | 
|---|
| 931 |  | 
|---|
| 932 | /* | 
|---|
| 933 | * And finally now the object is completely decoupled from this | 
|---|
| 934 | * vma, we can drop its hold on the backing storage and allow | 
|---|
| 935 | * it to be reaped by the shrinker. | 
|---|
| 936 | */ | 
|---|
| 937 | list_move_tail(list: &vma->vm_link, head: &vma->vm->unbound_list); | 
|---|
| 938 | } | 
|---|
| 939 |  | 
|---|
| 940 | static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) | 
|---|
| 941 | { | 
|---|
| 942 | unsigned int bound; | 
|---|
| 943 |  | 
|---|
| 944 | bound = atomic_read(v: &vma->flags); | 
|---|
| 945 |  | 
|---|
| 946 | if (flags & PIN_VALIDATE) { | 
|---|
| 947 | flags &= I915_VMA_BIND_MASK; | 
|---|
| 948 |  | 
|---|
| 949 | return (flags & bound) == flags; | 
|---|
| 950 | } | 
|---|
| 951 |  | 
|---|
| 952 | /* with the lock mandatory for unbind, we don't race here */ | 
|---|
| 953 | flags &= I915_VMA_BIND_MASK; | 
|---|
| 954 | do { | 
|---|
| 955 | if (unlikely(flags & ~bound)) | 
|---|
| 956 | return false; | 
|---|
| 957 |  | 
|---|
| 958 | if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) | 
|---|
| 959 | return false; | 
|---|
| 960 |  | 
|---|
| 961 | GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); | 
|---|
| 962 | } while (!atomic_try_cmpxchg(v: &vma->flags, old: &bound, new: bound + 1)); | 
|---|
| 963 |  | 
|---|
| 964 | return true; | 
|---|
| 965 | } | 
|---|
| 966 |  | 
|---|
| 967 | static struct scatterlist * | 
|---|
| 968 | rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, | 
|---|
| 969 | unsigned int width, unsigned int height, | 
|---|
| 970 | unsigned int src_stride, unsigned int dst_stride, | 
|---|
| 971 | struct sg_table *st, struct scatterlist *sg) | 
|---|
| 972 | { | 
|---|
| 973 | unsigned int column, row; | 
|---|
| 974 | pgoff_t src_idx; | 
|---|
| 975 |  | 
|---|
| 976 | for (column = 0; column < width; column++) { | 
|---|
| 977 | unsigned int left; | 
|---|
| 978 |  | 
|---|
| 979 | src_idx = src_stride * (height - 1) + column + offset; | 
|---|
| 980 | for (row = 0; row < height; row++) { | 
|---|
| 981 | st->nents++; | 
|---|
| 982 | /* | 
|---|
| 983 | * We don't need the pages, but need to initialize | 
|---|
| 984 | * the entries so the sg list can be happily traversed. | 
|---|
| 985 | * The only thing we need are DMA addresses. | 
|---|
| 986 | */ | 
|---|
| 987 | sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, offset: 0); | 
|---|
| 988 | sg_dma_address(sg) = | 
|---|
| 989 | i915_gem_object_get_dma_address(obj, src_idx); | 
|---|
| 990 | sg_dma_len(sg) = I915_GTT_PAGE_SIZE; | 
|---|
| 991 | sg = sg_next(sg); | 
|---|
| 992 | src_idx -= src_stride; | 
|---|
| 993 | } | 
|---|
| 994 |  | 
|---|
| 995 | left = (dst_stride - height) * I915_GTT_PAGE_SIZE; | 
|---|
| 996 |  | 
|---|
| 997 | if (!left) | 
|---|
| 998 | continue; | 
|---|
| 999 |  | 
|---|
| 1000 | st->nents++; | 
|---|
| 1001 |  | 
|---|
| 1002 | /* | 
|---|
| 1003 | * The DE ignores the PTEs for the padding tiles, the sg entry | 
|---|
| 1004 | * here is just a convenience to indicate how many padding PTEs | 
|---|
| 1005 | * to insert at this spot. | 
|---|
| 1006 | */ | 
|---|
| 1007 | sg_set_page(sg, NULL, len: left, offset: 0); | 
|---|
| 1008 | sg_dma_address(sg) = 0; | 
|---|
| 1009 | sg_dma_len(sg) = left; | 
|---|
| 1010 | sg = sg_next(sg); | 
|---|
| 1011 | } | 
|---|
| 1012 |  | 
|---|
| 1013 | return sg; | 
|---|
| 1014 | } | 
|---|
| 1015 |  | 
|---|
| 1016 | static noinline struct sg_table * | 
|---|
| 1017 | intel_rotate_pages(struct intel_rotation_info *rot_info, | 
|---|
| 1018 | struct drm_i915_gem_object *obj) | 
|---|
| 1019 | { | 
|---|
| 1020 | unsigned int size = intel_rotation_info_size(rot_info); | 
|---|
| 1021 | struct drm_i915_private *i915 = to_i915(dev: obj->base.dev); | 
|---|
| 1022 | struct sg_table *st; | 
|---|
| 1023 | struct scatterlist *sg; | 
|---|
| 1024 | int ret = -ENOMEM; | 
|---|
| 1025 | int i; | 
|---|
| 1026 |  | 
|---|
| 1027 | /* Allocate target SG list. */ | 
|---|
| 1028 | st = kmalloc(sizeof(*st), GFP_KERNEL); | 
|---|
| 1029 | if (!st) | 
|---|
| 1030 | goto err_st_alloc; | 
|---|
| 1031 |  | 
|---|
| 1032 | ret = sg_alloc_table(st, size, GFP_KERNEL); | 
|---|
| 1033 | if (ret) | 
|---|
| 1034 | goto err_sg_alloc; | 
|---|
| 1035 |  | 
|---|
| 1036 | st->nents = 0; | 
|---|
| 1037 | sg = st->sgl; | 
|---|
| 1038 |  | 
|---|
| 1039 | for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) | 
|---|
| 1040 | sg = rotate_pages(obj, offset: rot_info->plane[i].offset, | 
|---|
| 1041 | width: rot_info->plane[i].width, height: rot_info->plane[i].height, | 
|---|
| 1042 | src_stride: rot_info->plane[i].src_stride, | 
|---|
| 1043 | dst_stride: rot_info->plane[i].dst_stride, | 
|---|
| 1044 | st, sg); | 
|---|
| 1045 |  | 
|---|
| 1046 | return st; | 
|---|
| 1047 |  | 
|---|
| 1048 | err_sg_alloc: | 
|---|
| 1049 | kfree(objp: st); | 
|---|
| 1050 | err_st_alloc: | 
|---|
| 1051 |  | 
|---|
| 1052 | drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", | 
|---|
| 1053 | obj->base.size, rot_info->plane[0].width, | 
|---|
| 1054 | rot_info->plane[0].height, size); | 
|---|
| 1055 |  | 
|---|
| 1056 | return ERR_PTR(error: ret); | 
|---|
| 1057 | } | 
|---|
| 1058 |  | 
|---|
| 1059 | static struct scatterlist * | 
|---|
| 1060 | add_padding_pages(unsigned int count, | 
|---|
| 1061 | struct sg_table *st, struct scatterlist *sg) | 
|---|
| 1062 | { | 
|---|
| 1063 | st->nents++; | 
|---|
| 1064 |  | 
|---|
| 1065 | /* | 
|---|
| 1066 | * The DE ignores the PTEs for the padding tiles, the sg entry | 
|---|
| 1067 | * here is just a convenience to indicate how many padding PTEs | 
|---|
| 1068 | * to insert at this spot. | 
|---|
| 1069 | */ | 
|---|
| 1070 | sg_set_page(sg, NULL, len: count * I915_GTT_PAGE_SIZE, offset: 0); | 
|---|
| 1071 | sg_dma_address(sg) = 0; | 
|---|
| 1072 | sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE; | 
|---|
| 1073 | sg = sg_next(sg); | 
|---|
| 1074 |  | 
|---|
| 1075 | return sg; | 
|---|
| 1076 | } | 
|---|
| 1077 |  | 
|---|
| 1078 | static struct scatterlist * | 
|---|
| 1079 | remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, | 
|---|
| 1080 | unsigned long offset, unsigned int alignment_pad, | 
|---|
| 1081 | unsigned int width, unsigned int height, | 
|---|
| 1082 | unsigned int src_stride, unsigned int dst_stride, | 
|---|
| 1083 | struct sg_table *st, struct scatterlist *sg, | 
|---|
| 1084 | unsigned int *gtt_offset) | 
|---|
| 1085 | { | 
|---|
| 1086 | unsigned int row; | 
|---|
| 1087 |  | 
|---|
| 1088 | if (!width || !height) | 
|---|
| 1089 | return sg; | 
|---|
| 1090 |  | 
|---|
| 1091 | if (alignment_pad) | 
|---|
| 1092 | sg = add_padding_pages(count: alignment_pad, st, sg); | 
|---|
| 1093 |  | 
|---|
| 1094 | for (row = 0; row < height; row++) { | 
|---|
| 1095 | unsigned int left = width * I915_GTT_PAGE_SIZE; | 
|---|
| 1096 |  | 
|---|
| 1097 | while (left) { | 
|---|
| 1098 | dma_addr_t addr; | 
|---|
| 1099 | unsigned int length; | 
|---|
| 1100 |  | 
|---|
| 1101 | /* | 
|---|
| 1102 | * We don't need the pages, but need to initialize | 
|---|
| 1103 | * the entries so the sg list can be happily traversed. | 
|---|
| 1104 | * The only thing we need are DMA addresses. | 
|---|
| 1105 | */ | 
|---|
| 1106 |  | 
|---|
| 1107 | addr = i915_gem_object_get_dma_address_len(obj, offset, &length); | 
|---|
| 1108 |  | 
|---|
| 1109 | length = min(left, length); | 
|---|
| 1110 |  | 
|---|
| 1111 | st->nents++; | 
|---|
| 1112 |  | 
|---|
| 1113 | sg_set_page(sg, NULL, len: length, offset: 0); | 
|---|
| 1114 | sg_dma_address(sg) = addr; | 
|---|
| 1115 | sg_dma_len(sg) = length; | 
|---|
| 1116 | sg = sg_next(sg); | 
|---|
| 1117 |  | 
|---|
| 1118 | offset += length / I915_GTT_PAGE_SIZE; | 
|---|
| 1119 | left -= length; | 
|---|
| 1120 | } | 
|---|
| 1121 |  | 
|---|
| 1122 | offset += src_stride - width; | 
|---|
| 1123 |  | 
|---|
| 1124 | left = (dst_stride - width) * I915_GTT_PAGE_SIZE; | 
|---|
| 1125 |  | 
|---|
| 1126 | if (!left) | 
|---|
| 1127 | continue; | 
|---|
| 1128 |  | 
|---|
| 1129 | sg = add_padding_pages(count: left >> PAGE_SHIFT, st, sg); | 
|---|
| 1130 | } | 
|---|
| 1131 |  | 
|---|
| 1132 | *gtt_offset += alignment_pad + dst_stride * height; | 
|---|
| 1133 |  | 
|---|
| 1134 | return sg; | 
|---|
| 1135 | } | 
|---|
| 1136 |  | 
|---|
| 1137 | static struct scatterlist * | 
|---|
| 1138 | remap_contiguous_pages(struct drm_i915_gem_object *obj, | 
|---|
| 1139 | pgoff_t obj_offset, | 
|---|
| 1140 | unsigned int count, | 
|---|
| 1141 | struct sg_table *st, struct scatterlist *sg) | 
|---|
| 1142 | { | 
|---|
| 1143 | struct scatterlist *iter; | 
|---|
| 1144 | unsigned int offset; | 
|---|
| 1145 |  | 
|---|
| 1146 | iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset); | 
|---|
| 1147 | GEM_BUG_ON(!iter); | 
|---|
| 1148 |  | 
|---|
| 1149 | do { | 
|---|
| 1150 | unsigned int len; | 
|---|
| 1151 |  | 
|---|
| 1152 | len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), | 
|---|
| 1153 | count << PAGE_SHIFT); | 
|---|
| 1154 | sg_set_page(sg, NULL, len, offset: 0); | 
|---|
| 1155 | sg_dma_address(sg) = | 
|---|
| 1156 | sg_dma_address(iter) + (offset << PAGE_SHIFT); | 
|---|
| 1157 | sg_dma_len(sg) = len; | 
|---|
| 1158 |  | 
|---|
| 1159 | st->nents++; | 
|---|
| 1160 | count -= len >> PAGE_SHIFT; | 
|---|
| 1161 | if (count == 0) | 
|---|
| 1162 | return sg; | 
|---|
| 1163 |  | 
|---|
| 1164 | sg = __sg_next(sg); | 
|---|
| 1165 | iter = __sg_next(sg: iter); | 
|---|
| 1166 | offset = 0; | 
|---|
| 1167 | } while (1); | 
|---|
| 1168 | } | 
|---|
| 1169 |  | 
|---|
| 1170 | static struct scatterlist * | 
|---|
| 1171 | remap_linear_color_plane_pages(struct drm_i915_gem_object *obj, | 
|---|
| 1172 | pgoff_t obj_offset, unsigned int alignment_pad, | 
|---|
| 1173 | unsigned int size, | 
|---|
| 1174 | struct sg_table *st, struct scatterlist *sg, | 
|---|
| 1175 | unsigned int *gtt_offset) | 
|---|
| 1176 | { | 
|---|
| 1177 | if (!size) | 
|---|
| 1178 | return sg; | 
|---|
| 1179 |  | 
|---|
| 1180 | if (alignment_pad) | 
|---|
| 1181 | sg = add_padding_pages(count: alignment_pad, st, sg); | 
|---|
| 1182 |  | 
|---|
| 1183 | sg = remap_contiguous_pages(obj, obj_offset, count: size, st, sg); | 
|---|
| 1184 | sg = sg_next(sg); | 
|---|
| 1185 |  | 
|---|
| 1186 | *gtt_offset += alignment_pad + size; | 
|---|
| 1187 |  | 
|---|
| 1188 | return sg; | 
|---|
| 1189 | } | 
|---|
| 1190 |  | 
|---|
| 1191 | static struct scatterlist * | 
|---|
| 1192 | remap_color_plane_pages(const struct intel_remapped_info *rem_info, | 
|---|
| 1193 | struct drm_i915_gem_object *obj, | 
|---|
| 1194 | int color_plane, | 
|---|
| 1195 | struct sg_table *st, struct scatterlist *sg, | 
|---|
| 1196 | unsigned int *gtt_offset) | 
|---|
| 1197 | { | 
|---|
| 1198 | unsigned int alignment_pad = 0; | 
|---|
| 1199 |  | 
|---|
| 1200 | if (rem_info->plane_alignment) | 
|---|
| 1201 | alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; | 
|---|
| 1202 |  | 
|---|
| 1203 | if (rem_info->plane[color_plane].linear) | 
|---|
| 1204 | sg = remap_linear_color_plane_pages(obj, | 
|---|
| 1205 | obj_offset: rem_info->plane[color_plane].offset, | 
|---|
| 1206 | alignment_pad, | 
|---|
| 1207 | size: rem_info->plane[color_plane].size, | 
|---|
| 1208 | st, sg, | 
|---|
| 1209 | gtt_offset); | 
|---|
| 1210 |  | 
|---|
| 1211 | else | 
|---|
| 1212 | sg = remap_tiled_color_plane_pages(obj, | 
|---|
| 1213 | offset: rem_info->plane[color_plane].offset, | 
|---|
| 1214 | alignment_pad, | 
|---|
| 1215 | width: rem_info->plane[color_plane].width, | 
|---|
| 1216 | height: rem_info->plane[color_plane].height, | 
|---|
| 1217 | src_stride: rem_info->plane[color_plane].src_stride, | 
|---|
| 1218 | dst_stride: rem_info->plane[color_plane].dst_stride, | 
|---|
| 1219 | st, sg, | 
|---|
| 1220 | gtt_offset); | 
|---|
| 1221 |  | 
|---|
| 1222 | return sg; | 
|---|
| 1223 | } | 
|---|
| 1224 |  | 
|---|
| 1225 | static noinline struct sg_table * | 
|---|
| 1226 | intel_remap_pages(struct intel_remapped_info *rem_info, | 
|---|
| 1227 | struct drm_i915_gem_object *obj) | 
|---|
| 1228 | { | 
|---|
| 1229 | unsigned int size = intel_remapped_info_size(rem_info); | 
|---|
| 1230 | struct drm_i915_private *i915 = to_i915(dev: obj->base.dev); | 
|---|
| 1231 | struct sg_table *st; | 
|---|
| 1232 | struct scatterlist *sg; | 
|---|
| 1233 | unsigned int gtt_offset = 0; | 
|---|
| 1234 | int ret = -ENOMEM; | 
|---|
| 1235 | int i; | 
|---|
| 1236 |  | 
|---|
| 1237 | /* Allocate target SG list. */ | 
|---|
| 1238 | st = kmalloc(sizeof(*st), GFP_KERNEL); | 
|---|
| 1239 | if (!st) | 
|---|
| 1240 | goto err_st_alloc; | 
|---|
| 1241 |  | 
|---|
| 1242 | ret = sg_alloc_table(st, size, GFP_KERNEL); | 
|---|
| 1243 | if (ret) | 
|---|
| 1244 | goto err_sg_alloc; | 
|---|
| 1245 |  | 
|---|
| 1246 | st->nents = 0; | 
|---|
| 1247 | sg = st->sgl; | 
|---|
| 1248 |  | 
|---|
| 1249 | for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) | 
|---|
| 1250 | sg = remap_color_plane_pages(rem_info, obj, color_plane: i, st, sg, gtt_offset: >t_offset); | 
|---|
| 1251 |  | 
|---|
| 1252 | i915_sg_trim(orig_st: st); | 
|---|
| 1253 |  | 
|---|
| 1254 | return st; | 
|---|
| 1255 |  | 
|---|
| 1256 | err_sg_alloc: | 
|---|
| 1257 | kfree(objp: st); | 
|---|
| 1258 | err_st_alloc: | 
|---|
| 1259 |  | 
|---|
| 1260 | drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", | 
|---|
| 1261 | obj->base.size, rem_info->plane[0].width, | 
|---|
| 1262 | rem_info->plane[0].height, size); | 
|---|
| 1263 |  | 
|---|
| 1264 | return ERR_PTR(error: ret); | 
|---|
| 1265 | } | 
|---|
| 1266 |  | 
|---|
| 1267 | static noinline struct sg_table * | 
|---|
| 1268 | intel_partial_pages(const struct i915_gtt_view *view, | 
|---|
| 1269 | struct drm_i915_gem_object *obj) | 
|---|
| 1270 | { | 
|---|
| 1271 | struct sg_table *st; | 
|---|
| 1272 | struct scatterlist *sg; | 
|---|
| 1273 | unsigned int count = view->partial.size; | 
|---|
| 1274 | int ret = -ENOMEM; | 
|---|
| 1275 |  | 
|---|
| 1276 | st = kmalloc(sizeof(*st), GFP_KERNEL); | 
|---|
| 1277 | if (!st) | 
|---|
| 1278 | goto err_st_alloc; | 
|---|
| 1279 |  | 
|---|
| 1280 | ret = sg_alloc_table(st, count, GFP_KERNEL); | 
|---|
| 1281 | if (ret) | 
|---|
| 1282 | goto err_sg_alloc; | 
|---|
| 1283 |  | 
|---|
| 1284 | st->nents = 0; | 
|---|
| 1285 |  | 
|---|
| 1286 | sg = remap_contiguous_pages(obj, obj_offset: view->partial.offset, count, st, sg: st->sgl); | 
|---|
| 1287 |  | 
|---|
| 1288 | sg_mark_end(sg); | 
|---|
| 1289 | i915_sg_trim(orig_st: st); /* Drop any unused tail entries. */ | 
|---|
| 1290 |  | 
|---|
| 1291 | return st; | 
|---|
| 1292 |  | 
|---|
| 1293 | err_sg_alloc: | 
|---|
| 1294 | kfree(objp: st); | 
|---|
| 1295 | err_st_alloc: | 
|---|
| 1296 | return ERR_PTR(error: ret); | 
|---|
| 1297 | } | 
|---|
| 1298 |  | 
|---|
| 1299 | static int | 
|---|
| 1300 | __i915_vma_get_pages(struct i915_vma *vma) | 
|---|
| 1301 | { | 
|---|
| 1302 | struct sg_table *pages; | 
|---|
| 1303 |  | 
|---|
| 1304 | /* | 
|---|
| 1305 | * The vma->pages are only valid within the lifespan of the borrowed | 
|---|
| 1306 | * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so | 
|---|
| 1307 | * must be the vma->pages. A simple rule is that vma->pages must only | 
|---|
| 1308 | * be accessed when the obj->mm.pages are pinned. | 
|---|
| 1309 | */ | 
|---|
| 1310 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); | 
|---|
| 1311 |  | 
|---|
| 1312 | switch (vma->gtt_view.type) { | 
|---|
| 1313 | default: | 
|---|
| 1314 | GEM_BUG_ON(vma->gtt_view.type); | 
|---|
| 1315 | fallthrough; | 
|---|
| 1316 | case I915_GTT_VIEW_NORMAL: | 
|---|
| 1317 | pages = vma->obj->mm.pages; | 
|---|
| 1318 | break; | 
|---|
| 1319 |  | 
|---|
| 1320 | case I915_GTT_VIEW_ROTATED: | 
|---|
| 1321 | pages = | 
|---|
| 1322 | intel_rotate_pages(rot_info: &vma->gtt_view.rotated, obj: vma->obj); | 
|---|
| 1323 | break; | 
|---|
| 1324 |  | 
|---|
| 1325 | case I915_GTT_VIEW_REMAPPED: | 
|---|
| 1326 | pages = | 
|---|
| 1327 | intel_remap_pages(rem_info: &vma->gtt_view.remapped, obj: vma->obj); | 
|---|
| 1328 | break; | 
|---|
| 1329 |  | 
|---|
| 1330 | case I915_GTT_VIEW_PARTIAL: | 
|---|
| 1331 | pages = intel_partial_pages(view: &vma->gtt_view, obj: vma->obj); | 
|---|
| 1332 | break; | 
|---|
| 1333 | } | 
|---|
| 1334 |  | 
|---|
| 1335 | if (IS_ERR(ptr: pages)) { | 
|---|
| 1336 | drm_err(&vma->vm->i915->drm, | 
|---|
| 1337 | "Failed to get pages for VMA view type %u (%ld)!\n", | 
|---|
| 1338 | vma->gtt_view.type, PTR_ERR(pages)); | 
|---|
| 1339 | return PTR_ERR(ptr: pages); | 
|---|
| 1340 | } | 
|---|
| 1341 |  | 
|---|
| 1342 | vma->pages = pages; | 
|---|
| 1343 |  | 
|---|
| 1344 | return 0; | 
|---|
| 1345 | } | 
|---|
| 1346 |  | 
|---|
| 1347 | I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma) | 
|---|
| 1348 | { | 
|---|
| 1349 | int err; | 
|---|
| 1350 |  | 
|---|
| 1351 | if (atomic_add_unless(v: &vma->pages_count, a: 1, u: 0)) | 
|---|
| 1352 | return 0; | 
|---|
| 1353 |  | 
|---|
| 1354 | err = i915_gem_object_pin_pages(obj: vma->obj); | 
|---|
| 1355 | if (err) | 
|---|
| 1356 | return err; | 
|---|
| 1357 |  | 
|---|
| 1358 | err = __i915_vma_get_pages(vma); | 
|---|
| 1359 | if (err) | 
|---|
| 1360 | goto err_unpin; | 
|---|
| 1361 |  | 
|---|
| 1362 | vma->page_sizes = vma->obj->mm.page_sizes; | 
|---|
| 1363 | atomic_inc(v: &vma->pages_count); | 
|---|
| 1364 |  | 
|---|
| 1365 | return 0; | 
|---|
| 1366 |  | 
|---|
| 1367 | err_unpin: | 
|---|
| 1368 | __i915_gem_object_unpin_pages(obj: vma->obj); | 
|---|
| 1369 |  | 
|---|
| 1370 | return err; | 
|---|
| 1371 | } | 
|---|
| 1372 |  | 
|---|
| 1373 | void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb) | 
|---|
| 1374 | { | 
|---|
| 1375 | struct intel_gt *gt; | 
|---|
| 1376 | int id; | 
|---|
| 1377 |  | 
|---|
| 1378 | if (!tlb) | 
|---|
| 1379 | return; | 
|---|
| 1380 |  | 
|---|
| 1381 | /* | 
|---|
| 1382 | * Before we release the pages that were bound by this vma, we | 
|---|
| 1383 | * must invalidate all the TLBs that may still have a reference | 
|---|
| 1384 | * back to our physical address. It only needs to be done once, | 
|---|
| 1385 | * so after updating the PTE to point away from the pages, record | 
|---|
| 1386 | * the most recent TLB invalidation seqno, and if we have not yet | 
|---|
| 1387 | * flushed the TLBs upon release, perform a full invalidation. | 
|---|
| 1388 | */ | 
|---|
| 1389 | for_each_gt(gt, vm->i915, id) | 
|---|
| 1390 | WRITE_ONCE(tlb[id], | 
|---|
| 1391 | intel_gt_next_invalidate_tlb_full(gt)); | 
|---|
| 1392 | } | 
|---|
| 1393 |  | 
|---|
| 1394 | static void __vma_put_pages(struct i915_vma *vma, unsigned int count) | 
|---|
| 1395 | { | 
|---|
| 1396 | /* We allocate under vma_get_pages, so beware the shrinker */ | 
|---|
| 1397 | GEM_BUG_ON(atomic_read(&vma->pages_count) < count); | 
|---|
| 1398 |  | 
|---|
| 1399 | if (atomic_sub_return(i: count, v: &vma->pages_count) == 0) { | 
|---|
| 1400 | if (vma->pages != vma->obj->mm.pages) { | 
|---|
| 1401 | sg_free_table(vma->pages); | 
|---|
| 1402 | kfree(objp: vma->pages); | 
|---|
| 1403 | } | 
|---|
| 1404 | vma->pages = NULL; | 
|---|
| 1405 |  | 
|---|
| 1406 | i915_gem_object_unpin_pages(obj: vma->obj); | 
|---|
| 1407 | } | 
|---|
| 1408 | } | 
|---|
| 1409 |  | 
|---|
| 1410 | I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma) | 
|---|
| 1411 | { | 
|---|
| 1412 | if (atomic_add_unless(v: &vma->pages_count, a: -1, u: 1)) | 
|---|
| 1413 | return; | 
|---|
| 1414 |  | 
|---|
| 1415 | __vma_put_pages(vma, count: 1); | 
|---|
| 1416 | } | 
|---|
| 1417 |  | 
|---|
| 1418 | static void vma_unbind_pages(struct i915_vma *vma) | 
|---|
| 1419 | { | 
|---|
| 1420 | unsigned int count; | 
|---|
| 1421 |  | 
|---|
| 1422 | lockdep_assert_held(&vma->vm->mutex); | 
|---|
| 1423 |  | 
|---|
| 1424 | /* The upper portion of pages_count is the number of bindings */ | 
|---|
| 1425 | count = atomic_read(v: &vma->pages_count); | 
|---|
| 1426 | count >>= I915_VMA_PAGES_BIAS; | 
|---|
| 1427 | GEM_BUG_ON(!count); | 
|---|
| 1428 |  | 
|---|
| 1429 | __vma_put_pages(vma, count: count | count << I915_VMA_PAGES_BIAS); | 
|---|
| 1430 | } | 
|---|
| 1431 |  | 
|---|
| 1432 | int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, | 
|---|
| 1433 | u64 size, u64 alignment, u64 flags) | 
|---|
| 1434 | { | 
|---|
| 1435 | struct i915_vma_work *work = NULL; | 
|---|
| 1436 | struct dma_fence *moving = NULL; | 
|---|
| 1437 | struct i915_vma_resource *vma_res = NULL; | 
|---|
| 1438 | intel_wakeref_t wakeref; | 
|---|
| 1439 | unsigned int bound; | 
|---|
| 1440 | int err; | 
|---|
| 1441 |  | 
|---|
| 1442 | assert_vma_held(vma); | 
|---|
| 1443 | GEM_BUG_ON(!ww); | 
|---|
| 1444 |  | 
|---|
| 1445 | BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); | 
|---|
| 1446 | BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); | 
|---|
| 1447 |  | 
|---|
| 1448 | GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); | 
|---|
| 1449 |  | 
|---|
| 1450 | /* First try and grab the pin without rebinding the vma */ | 
|---|
| 1451 | if (try_qad_pin(vma, flags)) | 
|---|
| 1452 | return 0; | 
|---|
| 1453 |  | 
|---|
| 1454 | err = i915_vma_get_pages(vma); | 
|---|
| 1455 | if (err) | 
|---|
| 1456 | return err; | 
|---|
| 1457 |  | 
|---|
| 1458 | /* | 
|---|
| 1459 | * In case of a global GTT, we must hold a runtime-pm wakeref | 
|---|
| 1460 | * while global PTEs are updated.  In other cases, we hold | 
|---|
| 1461 | * the rpm reference while the VMA is active.  Since runtime | 
|---|
| 1462 | * resume may require allocations, which are forbidden inside | 
|---|
| 1463 | * vm->mutex, get the first rpm wakeref outside of the mutex. | 
|---|
| 1464 | */ | 
|---|
| 1465 | wakeref = intel_runtime_pm_get(rpm: &vma->vm->i915->runtime_pm); | 
|---|
| 1466 |  | 
|---|
| 1467 | if (flags & vma->vm->bind_async_flags) { | 
|---|
| 1468 | /* lock VM */ | 
|---|
| 1469 | err = i915_vm_lock_objects(vm: vma->vm, ww); | 
|---|
| 1470 | if (err) | 
|---|
| 1471 | goto err_rpm; | 
|---|
| 1472 |  | 
|---|
| 1473 | work = i915_vma_work(); | 
|---|
| 1474 | if (!work) { | 
|---|
| 1475 | err = -ENOMEM; | 
|---|
| 1476 | goto err_rpm; | 
|---|
| 1477 | } | 
|---|
| 1478 |  | 
|---|
| 1479 | work->vm = vma->vm; | 
|---|
| 1480 |  | 
|---|
| 1481 | err = i915_gem_object_get_moving_fence(obj: vma->obj, fence: &moving); | 
|---|
| 1482 | if (err) | 
|---|
| 1483 | goto err_rpm; | 
|---|
| 1484 |  | 
|---|
| 1485 | dma_fence_work_chain(f: &work->base, signal: moving); | 
|---|
| 1486 |  | 
|---|
| 1487 | /* Allocate enough page directories to used PTE */ | 
|---|
| 1488 | if (vma->vm->allocate_va_range) { | 
|---|
| 1489 | err = i915_vm_alloc_pt_stash(vm: vma->vm, | 
|---|
| 1490 | stash: &work->stash, | 
|---|
| 1491 | size: vma->size); | 
|---|
| 1492 | if (err) | 
|---|
| 1493 | goto err_fence; | 
|---|
| 1494 |  | 
|---|
| 1495 | err = i915_vm_map_pt_stash(vm: vma->vm, stash: &work->stash); | 
|---|
| 1496 | if (err) | 
|---|
| 1497 | goto err_fence; | 
|---|
| 1498 | } | 
|---|
| 1499 | } | 
|---|
| 1500 |  | 
|---|
| 1501 | vma_res = i915_vma_resource_alloc(); | 
|---|
| 1502 | if (IS_ERR(ptr: vma_res)) { | 
|---|
| 1503 | err = PTR_ERR(ptr: vma_res); | 
|---|
| 1504 | goto err_fence; | 
|---|
| 1505 | } | 
|---|
| 1506 |  | 
|---|
| 1507 | /* | 
|---|
| 1508 | * Differentiate between user/kernel vma inside the aliasing-ppgtt. | 
|---|
| 1509 | * | 
|---|
| 1510 | * We conflate the Global GTT with the user's vma when using the | 
|---|
| 1511 | * aliasing-ppgtt, but it is still vitally important to try and | 
|---|
| 1512 | * keep the use cases distinct. For example, userptr objects are | 
|---|
| 1513 | * not allowed inside the Global GTT as that will cause lock | 
|---|
| 1514 | * inversions when we have to evict them the mmu_notifier callbacks - | 
|---|
| 1515 | * but they are allowed to be part of the user ppGTT which can never | 
|---|
| 1516 | * be mapped. As such we try to give the distinct users of the same | 
|---|
| 1517 | * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt | 
|---|
| 1518 | * and i915_ppgtt separate]. | 
|---|
| 1519 | * | 
|---|
| 1520 | * NB this may cause us to mask real lock inversions -- while the | 
|---|
| 1521 | * code is safe today, lockdep may not be able to spot future | 
|---|
| 1522 | * transgressions. | 
|---|
| 1523 | */ | 
|---|
| 1524 | err = mutex_lock_interruptible_nested(&vma->vm->mutex, | 
|---|
| 1525 | !(flags & PIN_GLOBAL)); | 
|---|
| 1526 | if (err) | 
|---|
| 1527 | goto err_vma_res; | 
|---|
| 1528 |  | 
|---|
| 1529 | /* No more allocations allowed now we hold vm->mutex */ | 
|---|
| 1530 |  | 
|---|
| 1531 | if (unlikely(i915_vma_is_closed(vma))) { | 
|---|
| 1532 | err = -ENOENT; | 
|---|
| 1533 | goto err_unlock; | 
|---|
| 1534 | } | 
|---|
| 1535 |  | 
|---|
| 1536 | bound = atomic_read(v: &vma->flags); | 
|---|
| 1537 | if (unlikely(bound & I915_VMA_ERROR)) { | 
|---|
| 1538 | err = -ENOMEM; | 
|---|
| 1539 | goto err_unlock; | 
|---|
| 1540 | } | 
|---|
| 1541 |  | 
|---|
| 1542 | if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { | 
|---|
| 1543 | err = -EAGAIN; /* pins are meant to be fairly temporary */ | 
|---|
| 1544 | goto err_unlock; | 
|---|
| 1545 | } | 
|---|
| 1546 |  | 
|---|
| 1547 | if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { | 
|---|
| 1548 | if (!(flags & PIN_VALIDATE)) | 
|---|
| 1549 | __i915_vma_pin(vma); | 
|---|
| 1550 | goto err_unlock; | 
|---|
| 1551 | } | 
|---|
| 1552 |  | 
|---|
| 1553 | err = i915_active_acquire(ref: &vma->active); | 
|---|
| 1554 | if (err) | 
|---|
| 1555 | goto err_unlock; | 
|---|
| 1556 |  | 
|---|
| 1557 | if (!(bound & I915_VMA_BIND_MASK)) { | 
|---|
| 1558 | err = i915_vma_insert(vma, ww, size, alignment, flags); | 
|---|
| 1559 | if (err) | 
|---|
| 1560 | goto err_active; | 
|---|
| 1561 |  | 
|---|
| 1562 | if (i915_is_ggtt(vma->vm)) | 
|---|
| 1563 | __i915_vma_set_map_and_fenceable(vma); | 
|---|
| 1564 | } | 
|---|
| 1565 |  | 
|---|
| 1566 | GEM_BUG_ON(!vma->pages); | 
|---|
| 1567 | err = i915_vma_bind(vma, | 
|---|
| 1568 | pat_index: vma->obj->pat_index, | 
|---|
| 1569 | flags, work, vma_res); | 
|---|
| 1570 | vma_res = NULL; | 
|---|
| 1571 | if (err) | 
|---|
| 1572 | goto err_remove; | 
|---|
| 1573 |  | 
|---|
| 1574 | /* There should only be at most 2 active bindings (user, global) */ | 
|---|
| 1575 | GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); | 
|---|
| 1576 | atomic_add(I915_VMA_PAGES_ACTIVE, v: &vma->pages_count); | 
|---|
| 1577 | list_move_tail(list: &vma->vm_link, head: &vma->vm->bound_list); | 
|---|
| 1578 |  | 
|---|
| 1579 | if (!(flags & PIN_VALIDATE)) { | 
|---|
| 1580 | __i915_vma_pin(vma); | 
|---|
| 1581 | GEM_BUG_ON(!i915_vma_is_pinned(vma)); | 
|---|
| 1582 | } | 
|---|
| 1583 | GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); | 
|---|
| 1584 | GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); | 
|---|
| 1585 |  | 
|---|
| 1586 | err_remove: | 
|---|
| 1587 | if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { | 
|---|
| 1588 | i915_vma_detach(vma); | 
|---|
| 1589 | drm_mm_remove_node(node: &vma->node); | 
|---|
| 1590 | } | 
|---|
| 1591 | err_active: | 
|---|
| 1592 | i915_active_release(ref: &vma->active); | 
|---|
| 1593 | err_unlock: | 
|---|
| 1594 | mutex_unlock(lock: &vma->vm->mutex); | 
|---|
| 1595 | err_vma_res: | 
|---|
| 1596 | i915_vma_resource_free(vma_res); | 
|---|
| 1597 | err_fence: | 
|---|
| 1598 | if (work) | 
|---|
| 1599 | dma_fence_work_commit_imm(f: &work->base); | 
|---|
| 1600 | err_rpm: | 
|---|
| 1601 | intel_runtime_pm_put(rpm: &vma->vm->i915->runtime_pm, wref: wakeref); | 
|---|
| 1602 |  | 
|---|
| 1603 | if (moving) | 
|---|
| 1604 | dma_fence_put(fence: moving); | 
|---|
| 1605 |  | 
|---|
| 1606 | i915_vma_put_pages(vma); | 
|---|
| 1607 | return err; | 
|---|
| 1608 | } | 
|---|
| 1609 |  | 
|---|
| 1610 | int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) | 
|---|
| 1611 | { | 
|---|
| 1612 | struct i915_gem_ww_ctx ww; | 
|---|
| 1613 | int err; | 
|---|
| 1614 |  | 
|---|
| 1615 | i915_gem_ww_ctx_init(ctx: &ww, intr: true); | 
|---|
| 1616 | retry: | 
|---|
| 1617 | err = i915_gem_object_lock(obj: vma->obj, ww: &ww); | 
|---|
| 1618 | if (!err) | 
|---|
| 1619 | err = i915_vma_pin_ww(vma, ww: &ww, size, alignment, flags); | 
|---|
| 1620 | if (err == -EDEADLK) { | 
|---|
| 1621 | err = i915_gem_ww_ctx_backoff(ctx: &ww); | 
|---|
| 1622 | if (!err) | 
|---|
| 1623 | goto retry; | 
|---|
| 1624 | } | 
|---|
| 1625 | i915_gem_ww_ctx_fini(ctx: &ww); | 
|---|
| 1626 |  | 
|---|
| 1627 | return err; | 
|---|
| 1628 | } | 
|---|
| 1629 |  | 
|---|
| 1630 | static void flush_idle_contexts(struct intel_gt *gt) | 
|---|
| 1631 | { | 
|---|
| 1632 | struct intel_engine_cs *engine; | 
|---|
| 1633 | enum intel_engine_id id; | 
|---|
| 1634 |  | 
|---|
| 1635 | for_each_engine(engine, gt, id) | 
|---|
| 1636 | intel_engine_flush_barriers(engine); | 
|---|
| 1637 |  | 
|---|
| 1638 | intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); | 
|---|
| 1639 | } | 
|---|
| 1640 |  | 
|---|
| 1641 | static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, | 
|---|
| 1642 | u32 align, unsigned int flags) | 
|---|
| 1643 | { | 
|---|
| 1644 | struct i915_address_space *vm = vma->vm; | 
|---|
| 1645 | struct intel_gt *gt; | 
|---|
| 1646 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); | 
|---|
| 1647 | int err; | 
|---|
| 1648 |  | 
|---|
| 1649 | do { | 
|---|
| 1650 | err = i915_vma_pin_ww(vma, ww, size: 0, alignment: align, flags: flags | PIN_GLOBAL); | 
|---|
| 1651 |  | 
|---|
| 1652 | if (err != -ENOSPC) { | 
|---|
| 1653 | if (!err) { | 
|---|
| 1654 | err = i915_vma_wait_for_bind(vma); | 
|---|
| 1655 | if (err) | 
|---|
| 1656 | i915_vma_unpin(vma); | 
|---|
| 1657 | } | 
|---|
| 1658 | return err; | 
|---|
| 1659 | } | 
|---|
| 1660 |  | 
|---|
| 1661 | /* Unlike i915_vma_pin, we don't take no for an answer! */ | 
|---|
| 1662 | list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) | 
|---|
| 1663 | flush_idle_contexts(gt); | 
|---|
| 1664 | if (mutex_lock_interruptible(lock: &vm->mutex) == 0) { | 
|---|
| 1665 | /* | 
|---|
| 1666 | * We pass NULL ww here, as we don't want to unbind | 
|---|
| 1667 | * locked objects when called from execbuf when pinning | 
|---|
| 1668 | * is removed. This would probably regress badly. | 
|---|
| 1669 | */ | 
|---|
| 1670 | i915_gem_evict_vm(vm, NULL, NULL); | 
|---|
| 1671 | mutex_unlock(lock: &vm->mutex); | 
|---|
| 1672 | } | 
|---|
| 1673 | } while (1); | 
|---|
| 1674 | } | 
|---|
| 1675 |  | 
|---|
| 1676 | int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, | 
|---|
| 1677 | u32 align, unsigned int flags) | 
|---|
| 1678 | { | 
|---|
| 1679 | struct i915_gem_ww_ctx _ww; | 
|---|
| 1680 | int err; | 
|---|
| 1681 |  | 
|---|
| 1682 | GEM_BUG_ON(!i915_vma_is_ggtt(vma)); | 
|---|
| 1683 |  | 
|---|
| 1684 | if (ww) | 
|---|
| 1685 | return __i915_ggtt_pin(vma, ww, align, flags); | 
|---|
| 1686 |  | 
|---|
| 1687 | lockdep_assert_not_held(&vma->obj->base.resv->lock.base); | 
|---|
| 1688 |  | 
|---|
| 1689 | for_i915_gem_ww(&_ww, err, true) { | 
|---|
| 1690 | err = i915_gem_object_lock(obj: vma->obj, ww: &_ww); | 
|---|
| 1691 | if (!err) | 
|---|
| 1692 | err = __i915_ggtt_pin(vma, ww: &_ww, align, flags); | 
|---|
| 1693 | } | 
|---|
| 1694 |  | 
|---|
| 1695 | return err; | 
|---|
| 1696 | } | 
|---|
| 1697 |  | 
|---|
| 1698 | /** | 
|---|
| 1699 | * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas | 
|---|
| 1700 | * @obj: i915 GEM object | 
|---|
| 1701 | * This function clears scanout flags for objects ggtt vmas. These flags are set | 
|---|
| 1702 | * when object is pinned for display use and this function to clear them all is | 
|---|
| 1703 | * targeted to be called by frontbuffer tracking code when the frontbuffer is | 
|---|
| 1704 | * about to be released. | 
|---|
| 1705 | */ | 
|---|
| 1706 | void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj) | 
|---|
| 1707 | { | 
|---|
| 1708 | struct i915_vma *vma; | 
|---|
| 1709 |  | 
|---|
| 1710 | spin_lock(lock: &obj->vma.lock); | 
|---|
| 1711 | for_each_ggtt_vma(vma, obj) { | 
|---|
| 1712 | i915_vma_clear_scanout(vma); | 
|---|
| 1713 | vma->display_alignment = I915_GTT_MIN_ALIGNMENT; | 
|---|
| 1714 | } | 
|---|
| 1715 | spin_unlock(lock: &obj->vma.lock); | 
|---|
| 1716 | } | 
|---|
| 1717 |  | 
|---|
| 1718 | static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) | 
|---|
| 1719 | { | 
|---|
| 1720 | /* | 
|---|
| 1721 | * We defer actually closing, unbinding and destroying the VMA until | 
|---|
| 1722 | * the next idle point, or if the object is freed in the meantime. By | 
|---|
| 1723 | * postponing the unbind, we allow for it to be resurrected by the | 
|---|
| 1724 | * client, avoiding the work required to rebind the VMA. This is | 
|---|
| 1725 | * advantageous for DRI, where the client/server pass objects | 
|---|
| 1726 | * between themselves, temporarily opening a local VMA to the | 
|---|
| 1727 | * object, and then closing it again. The same object is then reused | 
|---|
| 1728 | * on the next frame (or two, depending on the depth of the swap queue) | 
|---|
| 1729 | * causing us to rebind the VMA once more. This ends up being a lot | 
|---|
| 1730 | * of wasted work for the steady state. | 
|---|
| 1731 | */ | 
|---|
| 1732 | GEM_BUG_ON(i915_vma_is_closed(vma)); | 
|---|
| 1733 | list_add(new: &vma->closed_link, head: >->closed_vma); | 
|---|
| 1734 | } | 
|---|
| 1735 |  | 
|---|
| 1736 | void i915_vma_close(struct i915_vma *vma) | 
|---|
| 1737 | { | 
|---|
| 1738 | struct intel_gt *gt = vma->vm->gt; | 
|---|
| 1739 | unsigned long flags; | 
|---|
| 1740 |  | 
|---|
| 1741 | if (i915_vma_is_ggtt(vma)) | 
|---|
| 1742 | return; | 
|---|
| 1743 |  | 
|---|
| 1744 | GEM_BUG_ON(!atomic_read(&vma->open_count)); | 
|---|
| 1745 | if (atomic_dec_and_lock_irqsave(&vma->open_count, | 
|---|
| 1746 | >->closed_lock, | 
|---|
| 1747 | flags)) { | 
|---|
| 1748 | __vma_close(vma, gt); | 
|---|
| 1749 | spin_unlock_irqrestore(lock: >->closed_lock, flags); | 
|---|
| 1750 | } | 
|---|
| 1751 | } | 
|---|
| 1752 |  | 
|---|
| 1753 | static void __i915_vma_remove_closed(struct i915_vma *vma) | 
|---|
| 1754 | { | 
|---|
| 1755 | list_del_init(entry: &vma->closed_link); | 
|---|
| 1756 | } | 
|---|
| 1757 |  | 
|---|
| 1758 | void i915_vma_reopen(struct i915_vma *vma) | 
|---|
| 1759 | { | 
|---|
| 1760 | struct intel_gt *gt = vma->vm->gt; | 
|---|
| 1761 |  | 
|---|
| 1762 | spin_lock_irq(lock: >->closed_lock); | 
|---|
| 1763 | if (i915_vma_is_closed(vma)) | 
|---|
| 1764 | __i915_vma_remove_closed(vma); | 
|---|
| 1765 | spin_unlock_irq(lock: >->closed_lock); | 
|---|
| 1766 | } | 
|---|
| 1767 |  | 
|---|
| 1768 | static void force_unbind(struct i915_vma *vma) | 
|---|
| 1769 | { | 
|---|
| 1770 | if (!drm_mm_node_allocated(node: &vma->node)) | 
|---|
| 1771 | return; | 
|---|
| 1772 |  | 
|---|
| 1773 | atomic_and(i: ~I915_VMA_PIN_MASK, v: &vma->flags); | 
|---|
| 1774 | WARN_ON(__i915_vma_unbind(vma)); | 
|---|
| 1775 | GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); | 
|---|
| 1776 | } | 
|---|
| 1777 |  | 
|---|
| 1778 | static void release_references(struct i915_vma *vma, struct intel_gt *gt, | 
|---|
| 1779 | bool vm_ddestroy) | 
|---|
| 1780 | { | 
|---|
| 1781 | struct drm_i915_gem_object *obj = vma->obj; | 
|---|
| 1782 |  | 
|---|
| 1783 | GEM_BUG_ON(i915_vma_is_active(vma)); | 
|---|
| 1784 |  | 
|---|
| 1785 | spin_lock(lock: &obj->vma.lock); | 
|---|
| 1786 | list_del(entry: &vma->obj_link); | 
|---|
| 1787 | if (!RB_EMPTY_NODE(&vma->obj_node)) | 
|---|
| 1788 | rb_erase(&vma->obj_node, &obj->vma.tree); | 
|---|
| 1789 |  | 
|---|
| 1790 | spin_unlock(lock: &obj->vma.lock); | 
|---|
| 1791 |  | 
|---|
| 1792 | spin_lock_irq(lock: >->closed_lock); | 
|---|
| 1793 | __i915_vma_remove_closed(vma); | 
|---|
| 1794 | spin_unlock_irq(lock: >->closed_lock); | 
|---|
| 1795 |  | 
|---|
| 1796 | if (vm_ddestroy) | 
|---|
| 1797 | i915_vm_resv_put(vm: vma->vm); | 
|---|
| 1798 |  | 
|---|
| 1799 | i915_active_fini(ref: &vma->active); | 
|---|
| 1800 | GEM_WARN_ON(vma->resource); | 
|---|
| 1801 | i915_vma_free(vma); | 
|---|
| 1802 | } | 
|---|
| 1803 |  | 
|---|
| 1804 | /* | 
|---|
| 1805 | * i915_vma_destroy_locked - Remove all weak reference to the vma and put | 
|---|
| 1806 | * the initial reference. | 
|---|
| 1807 | * | 
|---|
| 1808 | * This function should be called when it's decided the vma isn't needed | 
|---|
| 1809 | * anymore. The caller must assure that it doesn't race with another lookup | 
|---|
| 1810 | * plus destroy, typically by taking an appropriate reference. | 
|---|
| 1811 | * | 
|---|
| 1812 | * Current callsites are | 
|---|
| 1813 | * - __i915_gem_object_pages_fini() | 
|---|
| 1814 | * - __i915_vm_close() - Blocks the above function by taking a reference on | 
|---|
| 1815 | * the object. | 
|---|
| 1816 | * - __i915_vma_parked() - Blocks the above functions by taking a reference | 
|---|
| 1817 | * on the vm and a reference on the object. Also takes the object lock so | 
|---|
| 1818 | * destruction from __i915_vma_parked() can be blocked by holding the | 
|---|
| 1819 | * object lock. Since the object lock is only allowed from within i915 with | 
|---|
| 1820 | * an object refcount, holding the object lock also implicitly blocks the | 
|---|
| 1821 | * vma freeing from __i915_gem_object_pages_fini(). | 
|---|
| 1822 | * | 
|---|
| 1823 | * Because of locks taken during destruction, a vma is also guaranteed to | 
|---|
| 1824 | * stay alive while the following locks are held if it was looked up while | 
|---|
| 1825 | * holding one of the locks: | 
|---|
| 1826 | * - vm->mutex | 
|---|
| 1827 | * - obj->vma.lock | 
|---|
| 1828 | * - gt->closed_lock | 
|---|
| 1829 | */ | 
|---|
| 1830 | void i915_vma_destroy_locked(struct i915_vma *vma) | 
|---|
| 1831 | { | 
|---|
| 1832 | lockdep_assert_held(&vma->vm->mutex); | 
|---|
| 1833 |  | 
|---|
| 1834 | force_unbind(vma); | 
|---|
| 1835 | list_del_init(entry: &vma->vm_link); | 
|---|
| 1836 | release_references(vma, gt: vma->vm->gt, vm_ddestroy: false); | 
|---|
| 1837 | } | 
|---|
| 1838 |  | 
|---|
| 1839 | void i915_vma_destroy(struct i915_vma *vma) | 
|---|
| 1840 | { | 
|---|
| 1841 | struct intel_gt *gt; | 
|---|
| 1842 | bool vm_ddestroy; | 
|---|
| 1843 |  | 
|---|
| 1844 | mutex_lock(lock: &vma->vm->mutex); | 
|---|
| 1845 | force_unbind(vma); | 
|---|
| 1846 | list_del_init(entry: &vma->vm_link); | 
|---|
| 1847 | vm_ddestroy = vma->vm_ddestroy; | 
|---|
| 1848 | vma->vm_ddestroy = false; | 
|---|
| 1849 |  | 
|---|
| 1850 | /* vma->vm may be freed when releasing vma->vm->mutex. */ | 
|---|
| 1851 | gt = vma->vm->gt; | 
|---|
| 1852 | mutex_unlock(lock: &vma->vm->mutex); | 
|---|
| 1853 | release_references(vma, gt, vm_ddestroy); | 
|---|
| 1854 | } | 
|---|
| 1855 |  | 
|---|
| 1856 | void i915_vma_parked(struct intel_gt *gt) | 
|---|
| 1857 | { | 
|---|
| 1858 | struct i915_vma *vma, *next; | 
|---|
| 1859 | LIST_HEAD(closed); | 
|---|
| 1860 |  | 
|---|
| 1861 | spin_lock_irq(lock: >->closed_lock); | 
|---|
| 1862 | list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { | 
|---|
| 1863 | struct drm_i915_gem_object *obj = vma->obj; | 
|---|
| 1864 | struct i915_address_space *vm = vma->vm; | 
|---|
| 1865 |  | 
|---|
| 1866 | /* XXX All to avoid keeping a reference on i915_vma itself */ | 
|---|
| 1867 |  | 
|---|
| 1868 | if (!kref_get_unless_zero(kref: &obj->base.refcount)) | 
|---|
| 1869 | continue; | 
|---|
| 1870 |  | 
|---|
| 1871 | if (!i915_vm_tryget(vm)) { | 
|---|
| 1872 | i915_gem_object_put(obj); | 
|---|
| 1873 | continue; | 
|---|
| 1874 | } | 
|---|
| 1875 |  | 
|---|
| 1876 | list_move(list: &vma->closed_link, head: &closed); | 
|---|
| 1877 | } | 
|---|
| 1878 | spin_unlock_irq(lock: >->closed_lock); | 
|---|
| 1879 |  | 
|---|
| 1880 | /* As the GT is held idle, no vma can be reopened as we destroy them */ | 
|---|
| 1881 | list_for_each_entry_safe(vma, next, &closed, closed_link) { | 
|---|
| 1882 | struct drm_i915_gem_object *obj = vma->obj; | 
|---|
| 1883 | struct i915_address_space *vm = vma->vm; | 
|---|
| 1884 |  | 
|---|
| 1885 | if (i915_gem_object_trylock(obj, NULL)) { | 
|---|
| 1886 | INIT_LIST_HEAD(list: &vma->closed_link); | 
|---|
| 1887 | i915_vma_destroy(vma); | 
|---|
| 1888 | i915_gem_object_unlock(obj); | 
|---|
| 1889 | } else { | 
|---|
| 1890 | /* back you go.. */ | 
|---|
| 1891 | spin_lock_irq(lock: >->closed_lock); | 
|---|
| 1892 | list_add(new: &vma->closed_link, head: >->closed_vma); | 
|---|
| 1893 | spin_unlock_irq(lock: >->closed_lock); | 
|---|
| 1894 | } | 
|---|
| 1895 |  | 
|---|
| 1896 | i915_gem_object_put(obj); | 
|---|
| 1897 | i915_vm_put(vm); | 
|---|
| 1898 | } | 
|---|
| 1899 | } | 
|---|
| 1900 |  | 
|---|
| 1901 | static void __i915_vma_iounmap(struct i915_vma *vma) | 
|---|
| 1902 | { | 
|---|
| 1903 | GEM_BUG_ON(i915_vma_is_pinned(vma)); | 
|---|
| 1904 |  | 
|---|
| 1905 | if (vma->iomap == NULL) | 
|---|
| 1906 | return; | 
|---|
| 1907 |  | 
|---|
| 1908 | if (page_unmask_bits(vma->iomap)) | 
|---|
| 1909 | __i915_gem_object_release_map(obj: vma->obj); | 
|---|
| 1910 | else | 
|---|
| 1911 | io_mapping_unmap(vaddr: vma->iomap); | 
|---|
| 1912 | vma->iomap = NULL; | 
|---|
| 1913 | } | 
|---|
| 1914 |  | 
|---|
| 1915 | void i915_vma_revoke_mmap(struct i915_vma *vma) | 
|---|
| 1916 | { | 
|---|
| 1917 | struct drm_vma_offset_node *node; | 
|---|
| 1918 | u64 vma_offset; | 
|---|
| 1919 |  | 
|---|
| 1920 | if (!i915_vma_has_userfault(vma)) | 
|---|
| 1921 | return; | 
|---|
| 1922 |  | 
|---|
| 1923 | GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); | 
|---|
| 1924 | GEM_BUG_ON(!vma->obj->userfault_count); | 
|---|
| 1925 |  | 
|---|
| 1926 | node = &vma->mmo->vma_node; | 
|---|
| 1927 | vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT; | 
|---|
| 1928 | unmap_mapping_range(mapping: vma->vm->i915->drm.anon_inode->i_mapping, | 
|---|
| 1929 | holebegin: drm_vma_node_offset_addr(node) + vma_offset, | 
|---|
| 1930 | holelen: vma->size, | 
|---|
| 1931 | even_cows: 1); | 
|---|
| 1932 |  | 
|---|
| 1933 | i915_vma_unset_userfault(vma); | 
|---|
| 1934 | if (!--vma->obj->userfault_count) | 
|---|
| 1935 | list_del(entry: &vma->obj->userfault_link); | 
|---|
| 1936 | } | 
|---|
| 1937 |  | 
|---|
| 1938 | static int | 
|---|
| 1939 | __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma) | 
|---|
| 1940 | { | 
|---|
| 1941 | return __i915_request_await_exclusive(rq, active: &vma->active); | 
|---|
| 1942 | } | 
|---|
| 1943 |  | 
|---|
| 1944 | static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) | 
|---|
| 1945 | { | 
|---|
| 1946 | int err; | 
|---|
| 1947 |  | 
|---|
| 1948 | /* Wait for the vma to be bound before we start! */ | 
|---|
| 1949 | err = __i915_request_await_bind(rq, vma); | 
|---|
| 1950 | if (err) | 
|---|
| 1951 | return err; | 
|---|
| 1952 |  | 
|---|
| 1953 | return i915_active_add_request(ref: &vma->active, rq); | 
|---|
| 1954 | } | 
|---|
| 1955 |  | 
|---|
| 1956 | int _i915_vma_move_to_active(struct i915_vma *vma, | 
|---|
| 1957 | struct i915_request *rq, | 
|---|
| 1958 | struct dma_fence *fence, | 
|---|
| 1959 | unsigned int flags) | 
|---|
| 1960 | { | 
|---|
| 1961 | struct drm_i915_gem_object *obj = vma->obj; | 
|---|
| 1962 | int err; | 
|---|
| 1963 |  | 
|---|
| 1964 | assert_object_held(obj); | 
|---|
| 1965 |  | 
|---|
| 1966 | GEM_BUG_ON(!vma->pages); | 
|---|
| 1967 |  | 
|---|
| 1968 | if (!(flags & __EXEC_OBJECT_NO_REQUEST_AWAIT)) { | 
|---|
| 1969 | err = i915_request_await_object(to: rq, obj: vma->obj, write: flags & EXEC_OBJECT_WRITE); | 
|---|
| 1970 | if (unlikely(err)) | 
|---|
| 1971 | return err; | 
|---|
| 1972 | } | 
|---|
| 1973 | err = __i915_vma_move_to_active(vma, rq); | 
|---|
| 1974 | if (unlikely(err)) | 
|---|
| 1975 | return err; | 
|---|
| 1976 |  | 
|---|
| 1977 | /* | 
|---|
| 1978 | * Reserve fences slot early to prevent an allocation after preparing | 
|---|
| 1979 | * the workload and associating fences with dma_resv. | 
|---|
| 1980 | */ | 
|---|
| 1981 | if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) { | 
|---|
| 1982 | struct dma_fence *curr; | 
|---|
| 1983 | int idx; | 
|---|
| 1984 |  | 
|---|
| 1985 | dma_fence_array_for_each(curr, idx, fence) | 
|---|
| 1986 | ; | 
|---|
| 1987 | err = dma_resv_reserve_fences(obj: vma->obj->base.resv, num_fences: idx); | 
|---|
| 1988 | if (unlikely(err)) | 
|---|
| 1989 | return err; | 
|---|
| 1990 | } | 
|---|
| 1991 |  | 
|---|
| 1992 | if (flags & EXEC_OBJECT_WRITE) { | 
|---|
| 1993 | struct intel_frontbuffer *front; | 
|---|
| 1994 |  | 
|---|
| 1995 | front = i915_gem_object_get_frontbuffer(obj); | 
|---|
| 1996 | if (unlikely(front)) { | 
|---|
| 1997 | if (intel_frontbuffer_invalidate(front, origin: ORIGIN_CS)) | 
|---|
| 1998 | i915_active_add_request(ref: &front->write, rq); | 
|---|
| 1999 | intel_frontbuffer_put(front); | 
|---|
| 2000 | } | 
|---|
| 2001 | } | 
|---|
| 2002 |  | 
|---|
| 2003 | if (fence) { | 
|---|
| 2004 | struct dma_fence *curr; | 
|---|
| 2005 | enum dma_resv_usage usage; | 
|---|
| 2006 | int idx; | 
|---|
| 2007 |  | 
|---|
| 2008 | if (flags & EXEC_OBJECT_WRITE) { | 
|---|
| 2009 | usage = DMA_RESV_USAGE_WRITE; | 
|---|
| 2010 | obj->write_domain = I915_GEM_DOMAIN_RENDER; | 
|---|
| 2011 | obj->read_domains = 0; | 
|---|
| 2012 | } else { | 
|---|
| 2013 | usage = DMA_RESV_USAGE_READ; | 
|---|
| 2014 | obj->write_domain = 0; | 
|---|
| 2015 | } | 
|---|
| 2016 |  | 
|---|
| 2017 | dma_fence_array_for_each(curr, idx, fence) | 
|---|
| 2018 | dma_resv_add_fence(obj: vma->obj->base.resv, fence: curr, usage); | 
|---|
| 2019 | } | 
|---|
| 2020 |  | 
|---|
| 2021 | if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) | 
|---|
| 2022 | i915_active_add_request(ref: &vma->fence->active, rq); | 
|---|
| 2023 |  | 
|---|
| 2024 | obj->read_domains |= I915_GEM_GPU_DOMAINS; | 
|---|
| 2025 | obj->mm.dirty = true; | 
|---|
| 2026 |  | 
|---|
| 2027 | GEM_BUG_ON(!i915_vma_is_active(vma)); | 
|---|
| 2028 | return 0; | 
|---|
| 2029 | } | 
|---|
| 2030 |  | 
|---|
| 2031 | struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async) | 
|---|
| 2032 | { | 
|---|
| 2033 | struct i915_vma_resource *vma_res = vma->resource; | 
|---|
| 2034 | struct dma_fence *unbind_fence; | 
|---|
| 2035 |  | 
|---|
| 2036 | GEM_BUG_ON(i915_vma_is_pinned(vma)); | 
|---|
| 2037 | assert_vma_held_evict(vma); | 
|---|
| 2038 |  | 
|---|
| 2039 | if (i915_vma_is_map_and_fenceable(vma)) { | 
|---|
| 2040 | /* Force a pagefault for domain tracking on next user access */ | 
|---|
| 2041 | i915_vma_revoke_mmap(vma); | 
|---|
| 2042 |  | 
|---|
| 2043 | /* | 
|---|
| 2044 | * Check that we have flushed all writes through the GGTT | 
|---|
| 2045 | * before the unbind, other due to non-strict nature of those | 
|---|
| 2046 | * indirect writes they may end up referencing the GGTT PTE | 
|---|
| 2047 | * after the unbind. | 
|---|
| 2048 | * | 
|---|
| 2049 | * Note that we may be concurrently poking at the GGTT_WRITE | 
|---|
| 2050 | * bit from set-domain, as we mark all GGTT vma associated | 
|---|
| 2051 | * with an object. We know this is for another vma, as we | 
|---|
| 2052 | * are currently unbinding this one -- so if this vma will be | 
|---|
| 2053 | * reused, it will be refaulted and have its dirty bit set | 
|---|
| 2054 | * before the next write. | 
|---|
| 2055 | */ | 
|---|
| 2056 | i915_vma_flush_writes(vma); | 
|---|
| 2057 |  | 
|---|
| 2058 | /* release the fence reg _after_ flushing */ | 
|---|
| 2059 | i915_vma_revoke_fence(vma); | 
|---|
| 2060 |  | 
|---|
| 2061 | clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); | 
|---|
| 2062 | } | 
|---|
| 2063 |  | 
|---|
| 2064 | __i915_vma_iounmap(vma); | 
|---|
| 2065 |  | 
|---|
| 2066 | GEM_BUG_ON(vma->fence); | 
|---|
| 2067 | GEM_BUG_ON(i915_vma_has_userfault(vma)); | 
|---|
| 2068 |  | 
|---|
| 2069 | /* Object backend must be async capable. */ | 
|---|
| 2070 | GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt); | 
|---|
| 2071 |  | 
|---|
| 2072 | /* If vm is not open, unbind is a nop. */ | 
|---|
| 2073 | vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) && | 
|---|
| 2074 | kref_read(kref: &vma->vm->ref); | 
|---|
| 2075 | vma_res->skip_pte_rewrite = !kref_read(kref: &vma->vm->ref) || | 
|---|
| 2076 | vma->vm->skip_pte_rewrite; | 
|---|
| 2077 | trace_i915_vma_unbind(vma); | 
|---|
| 2078 |  | 
|---|
| 2079 | if (async) | 
|---|
| 2080 | unbind_fence = i915_vma_resource_unbind(vma_res, | 
|---|
| 2081 | tlb: vma->obj->mm.tlb); | 
|---|
| 2082 | else | 
|---|
| 2083 | unbind_fence = i915_vma_resource_unbind(vma_res, NULL); | 
|---|
| 2084 |  | 
|---|
| 2085 | vma->resource = NULL; | 
|---|
| 2086 |  | 
|---|
| 2087 | atomic_and(i: ~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), | 
|---|
| 2088 | v: &vma->flags); | 
|---|
| 2089 |  | 
|---|
| 2090 | i915_vma_detach(vma); | 
|---|
| 2091 |  | 
|---|
| 2092 | if (!async) { | 
|---|
| 2093 | if (unbind_fence) { | 
|---|
| 2094 | dma_fence_wait(fence: unbind_fence, intr: false); | 
|---|
| 2095 | dma_fence_put(fence: unbind_fence); | 
|---|
| 2096 | unbind_fence = NULL; | 
|---|
| 2097 | } | 
|---|
| 2098 | vma_invalidate_tlb(vm: vma->vm, tlb: vma->obj->mm.tlb); | 
|---|
| 2099 | } | 
|---|
| 2100 |  | 
|---|
| 2101 | /* | 
|---|
| 2102 | * Binding itself may not have completed until the unbind fence signals, | 
|---|
| 2103 | * so don't drop the pages until that happens, unless the resource is | 
|---|
| 2104 | * async_capable. | 
|---|
| 2105 | */ | 
|---|
| 2106 |  | 
|---|
| 2107 | vma_unbind_pages(vma); | 
|---|
| 2108 | return unbind_fence; | 
|---|
| 2109 | } | 
|---|
| 2110 |  | 
|---|
| 2111 | int __i915_vma_unbind(struct i915_vma *vma) | 
|---|
| 2112 | { | 
|---|
| 2113 | int ret; | 
|---|
| 2114 |  | 
|---|
| 2115 | lockdep_assert_held(&vma->vm->mutex); | 
|---|
| 2116 | assert_vma_held_evict(vma); | 
|---|
| 2117 |  | 
|---|
| 2118 | if (!drm_mm_node_allocated(node: &vma->node)) | 
|---|
| 2119 | return 0; | 
|---|
| 2120 |  | 
|---|
| 2121 | if (i915_vma_is_pinned(vma)) { | 
|---|
| 2122 | vma_print_allocator(vma, reason: "is pinned"); | 
|---|
| 2123 | return -EAGAIN; | 
|---|
| 2124 | } | 
|---|
| 2125 |  | 
|---|
| 2126 | /* | 
|---|
| 2127 | * After confirming that no one else is pinning this vma, wait for | 
|---|
| 2128 | * any laggards who may have crept in during the wait (through | 
|---|
| 2129 | * a residual pin skipping the vm->mutex) to complete. | 
|---|
| 2130 | */ | 
|---|
| 2131 | ret = i915_vma_sync(vma); | 
|---|
| 2132 | if (ret) | 
|---|
| 2133 | return ret; | 
|---|
| 2134 |  | 
|---|
| 2135 | GEM_BUG_ON(i915_vma_is_active(vma)); | 
|---|
| 2136 | __i915_vma_evict(vma, async: false); | 
|---|
| 2137 |  | 
|---|
| 2138 | drm_mm_remove_node(node: &vma->node); /* pairs with i915_vma_release() */ | 
|---|
| 2139 | return 0; | 
|---|
| 2140 | } | 
|---|
| 2141 |  | 
|---|
| 2142 | static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma) | 
|---|
| 2143 | { | 
|---|
| 2144 | struct dma_fence *fence; | 
|---|
| 2145 |  | 
|---|
| 2146 | lockdep_assert_held(&vma->vm->mutex); | 
|---|
| 2147 |  | 
|---|
| 2148 | if (!drm_mm_node_allocated(node: &vma->node)) | 
|---|
| 2149 | return NULL; | 
|---|
| 2150 |  | 
|---|
| 2151 | if (i915_vma_is_pinned(vma) || | 
|---|
| 2152 | &vma->obj->mm.rsgt->table != vma->resource->bi.pages) | 
|---|
| 2153 | return ERR_PTR(error: -EAGAIN); | 
|---|
| 2154 |  | 
|---|
| 2155 | /* | 
|---|
| 2156 | * We probably need to replace this with awaiting the fences of the | 
|---|
| 2157 | * object's dma_resv when the vma active goes away. When doing that | 
|---|
| 2158 | * we need to be careful to not add the vma_resource unbind fence | 
|---|
| 2159 | * immediately to the object's dma_resv, because then unbinding | 
|---|
| 2160 | * the next vma from the object, in case there are many, will | 
|---|
| 2161 | * actually await the unbinding of the previous vmas, which is | 
|---|
| 2162 | * undesirable. | 
|---|
| 2163 | */ | 
|---|
| 2164 | if (i915_sw_fence_await_active(fence: &vma->resource->chain, ref: &vma->active, | 
|---|
| 2165 | I915_ACTIVE_AWAIT_EXCL | | 
|---|
| 2166 | I915_ACTIVE_AWAIT_ACTIVE) < 0) { | 
|---|
| 2167 | return ERR_PTR(error: -EBUSY); | 
|---|
| 2168 | } | 
|---|
| 2169 |  | 
|---|
| 2170 | fence = __i915_vma_evict(vma, async: true); | 
|---|
| 2171 |  | 
|---|
| 2172 | drm_mm_remove_node(node: &vma->node); /* pairs with i915_vma_release() */ | 
|---|
| 2173 |  | 
|---|
| 2174 | return fence; | 
|---|
| 2175 | } | 
|---|
| 2176 |  | 
|---|
| 2177 | int i915_vma_unbind(struct i915_vma *vma) | 
|---|
| 2178 | { | 
|---|
| 2179 | struct i915_address_space *vm = vma->vm; | 
|---|
| 2180 | intel_wakeref_t wakeref = NULL; | 
|---|
| 2181 | int err; | 
|---|
| 2182 |  | 
|---|
| 2183 | assert_object_held_shared(obj: vma->obj); | 
|---|
| 2184 |  | 
|---|
| 2185 | /* Optimistic wait before taking the mutex */ | 
|---|
| 2186 | err = i915_vma_sync(vma); | 
|---|
| 2187 | if (err) | 
|---|
| 2188 | return err; | 
|---|
| 2189 |  | 
|---|
| 2190 | if (!drm_mm_node_allocated(node: &vma->node)) | 
|---|
| 2191 | return 0; | 
|---|
| 2192 |  | 
|---|
| 2193 | if (i915_vma_is_pinned(vma)) { | 
|---|
| 2194 | vma_print_allocator(vma, reason: "is pinned"); | 
|---|
| 2195 | return -EAGAIN; | 
|---|
| 2196 | } | 
|---|
| 2197 |  | 
|---|
| 2198 | if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) | 
|---|
| 2199 | /* XXX not always required: nop_clear_range */ | 
|---|
| 2200 | wakeref = intel_runtime_pm_get(rpm: &vm->i915->runtime_pm); | 
|---|
| 2201 |  | 
|---|
| 2202 | err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); | 
|---|
| 2203 | if (err) | 
|---|
| 2204 | goto out_rpm; | 
|---|
| 2205 |  | 
|---|
| 2206 | err = __i915_vma_unbind(vma); | 
|---|
| 2207 | mutex_unlock(lock: &vm->mutex); | 
|---|
| 2208 |  | 
|---|
| 2209 | out_rpm: | 
|---|
| 2210 | if (wakeref) | 
|---|
| 2211 | intel_runtime_pm_put(rpm: &vm->i915->runtime_pm, wref: wakeref); | 
|---|
| 2212 | return err; | 
|---|
| 2213 | } | 
|---|
| 2214 |  | 
|---|
| 2215 | int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm) | 
|---|
| 2216 | { | 
|---|
| 2217 | struct drm_i915_gem_object *obj = vma->obj; | 
|---|
| 2218 | struct i915_address_space *vm = vma->vm; | 
|---|
| 2219 | intel_wakeref_t wakeref = NULL; | 
|---|
| 2220 | struct dma_fence *fence; | 
|---|
| 2221 | int err; | 
|---|
| 2222 |  | 
|---|
| 2223 | /* | 
|---|
| 2224 | * We need the dma-resv lock since we add the | 
|---|
| 2225 | * unbind fence to the dma-resv object. | 
|---|
| 2226 | */ | 
|---|
| 2227 | assert_object_held(obj); | 
|---|
| 2228 |  | 
|---|
| 2229 | if (!drm_mm_node_allocated(node: &vma->node)) | 
|---|
| 2230 | return 0; | 
|---|
| 2231 |  | 
|---|
| 2232 | if (i915_vma_is_pinned(vma)) { | 
|---|
| 2233 | vma_print_allocator(vma, reason: "is pinned"); | 
|---|
| 2234 | return -EAGAIN; | 
|---|
| 2235 | } | 
|---|
| 2236 |  | 
|---|
| 2237 | if (!obj->mm.rsgt) | 
|---|
| 2238 | return -EBUSY; | 
|---|
| 2239 |  | 
|---|
| 2240 | err = dma_resv_reserve_fences(obj: obj->base.resv, num_fences: 2); | 
|---|
| 2241 | if (err) | 
|---|
| 2242 | return -EBUSY; | 
|---|
| 2243 |  | 
|---|
| 2244 | /* | 
|---|
| 2245 | * It would be great if we could grab this wakeref from the | 
|---|
| 2246 | * async unbind work if needed, but we can't because it uses | 
|---|
| 2247 | * kmalloc and it's in the dma-fence signalling critical path. | 
|---|
| 2248 | */ | 
|---|
| 2249 | if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) | 
|---|
| 2250 | wakeref = intel_runtime_pm_get(rpm: &vm->i915->runtime_pm); | 
|---|
| 2251 |  | 
|---|
| 2252 | if (trylock_vm && !mutex_trylock(lock: &vm->mutex)) { | 
|---|
| 2253 | err = -EBUSY; | 
|---|
| 2254 | goto out_rpm; | 
|---|
| 2255 | } else if (!trylock_vm) { | 
|---|
| 2256 | err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref); | 
|---|
| 2257 | if (err) | 
|---|
| 2258 | goto out_rpm; | 
|---|
| 2259 | } | 
|---|
| 2260 |  | 
|---|
| 2261 | fence = __i915_vma_unbind_async(vma); | 
|---|
| 2262 | mutex_unlock(lock: &vm->mutex); | 
|---|
| 2263 | if (IS_ERR_OR_NULL(ptr: fence)) { | 
|---|
| 2264 | err = PTR_ERR_OR_ZERO(ptr: fence); | 
|---|
| 2265 | goto out_rpm; | 
|---|
| 2266 | } | 
|---|
| 2267 |  | 
|---|
| 2268 | dma_resv_add_fence(obj: obj->base.resv, fence, usage: DMA_RESV_USAGE_READ); | 
|---|
| 2269 | dma_fence_put(fence); | 
|---|
| 2270 |  | 
|---|
| 2271 | out_rpm: | 
|---|
| 2272 | if (wakeref) | 
|---|
| 2273 | intel_runtime_pm_put(rpm: &vm->i915->runtime_pm, wref: wakeref); | 
|---|
| 2274 | return err; | 
|---|
| 2275 | } | 
|---|
| 2276 |  | 
|---|
| 2277 | int i915_vma_unbind_unlocked(struct i915_vma *vma) | 
|---|
| 2278 | { | 
|---|
| 2279 | int err; | 
|---|
| 2280 |  | 
|---|
| 2281 | i915_gem_object_lock(obj: vma->obj, NULL); | 
|---|
| 2282 | err = i915_vma_unbind(vma); | 
|---|
| 2283 | i915_gem_object_unlock(obj: vma->obj); | 
|---|
| 2284 |  | 
|---|
| 2285 | return err; | 
|---|
| 2286 | } | 
|---|
| 2287 |  | 
|---|
| 2288 | struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) | 
|---|
| 2289 | { | 
|---|
| 2290 | i915_gem_object_make_unshrinkable(obj: vma->obj); | 
|---|
| 2291 | return vma; | 
|---|
| 2292 | } | 
|---|
| 2293 |  | 
|---|
| 2294 | void i915_vma_make_shrinkable(struct i915_vma *vma) | 
|---|
| 2295 | { | 
|---|
| 2296 | i915_gem_object_make_shrinkable(obj: vma->obj); | 
|---|
| 2297 | } | 
|---|
| 2298 |  | 
|---|
| 2299 | void i915_vma_make_purgeable(struct i915_vma *vma) | 
|---|
| 2300 | { | 
|---|
| 2301 | i915_gem_object_make_purgeable(obj: vma->obj); | 
|---|
| 2302 | } | 
|---|
| 2303 |  | 
|---|
| 2304 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | 
|---|
| 2305 | #include "selftests/i915_vma.c" | 
|---|
| 2306 | #endif | 
|---|
| 2307 |  | 
|---|
| 2308 | void i915_vma_module_exit(void) | 
|---|
| 2309 | { | 
|---|
| 2310 | kmem_cache_destroy(s: slab_vmas); | 
|---|
| 2311 | } | 
|---|
| 2312 |  | 
|---|
| 2313 | int __init i915_vma_module_init(void) | 
|---|
| 2314 | { | 
|---|
| 2315 | slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); | 
|---|
| 2316 | if (!slab_vmas) | 
|---|
| 2317 | return -ENOMEM; | 
|---|
| 2318 |  | 
|---|
| 2319 | return 0; | 
|---|
| 2320 | } | 
|---|
| 2321 |  | 
|---|