| 1 | // SPDX-License-Identifier: MIT | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright © 2008-2015 Intel Corporation | 
|---|
| 4 | */ | 
|---|
| 5 |  | 
|---|
| 6 | #include <linux/oom.h> | 
|---|
| 7 | #include <linux/sched/mm.h> | 
|---|
| 8 | #include <linux/shmem_fs.h> | 
|---|
| 9 | #include <linux/slab.h> | 
|---|
| 10 | #include <linux/swap.h> | 
|---|
| 11 | #include <linux/pci.h> | 
|---|
| 12 | #include <linux/dma-buf.h> | 
|---|
| 13 | #include <linux/vmalloc.h> | 
|---|
| 14 |  | 
|---|
| 15 | #include "gt/intel_gt_requests.h" | 
|---|
| 16 | #include "gt/intel_gt.h" | 
|---|
| 17 |  | 
|---|
| 18 | #include "i915_trace.h" | 
|---|
| 19 |  | 
|---|
| 20 | static bool swap_available(void) | 
|---|
| 21 | { | 
|---|
| 22 | return get_nr_swap_pages() > 0; | 
|---|
| 23 | } | 
|---|
| 24 |  | 
|---|
| 25 | static bool can_release_pages(struct drm_i915_gem_object *obj) | 
|---|
| 26 | { | 
|---|
| 27 | /* Consider only shrinkable objects. */ | 
|---|
| 28 | if (!i915_gem_object_is_shrinkable(obj)) | 
|---|
| 29 | return false; | 
|---|
| 30 |  | 
|---|
| 31 | /* | 
|---|
| 32 | * We can only return physical pages to the system if we can either | 
|---|
| 33 | * discard the contents (because the user has marked them as being | 
|---|
| 34 | * purgeable) or if we can move their contents out to swap. | 
|---|
| 35 | */ | 
|---|
| 36 | return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; | 
|---|
| 37 | } | 
|---|
| 38 |  | 
|---|
| 39 | static bool drop_pages(struct drm_i915_gem_object *obj, | 
|---|
| 40 | unsigned long shrink, bool trylock_vm) | 
|---|
| 41 | { | 
|---|
| 42 | unsigned long flags; | 
|---|
| 43 |  | 
|---|
| 44 | flags = 0; | 
|---|
| 45 | if (shrink & I915_SHRINK_ACTIVE) | 
|---|
| 46 | flags |= I915_GEM_OBJECT_UNBIND_ACTIVE; | 
|---|
| 47 | if (!(shrink & I915_SHRINK_BOUND)) | 
|---|
| 48 | flags |= I915_GEM_OBJECT_UNBIND_TEST; | 
|---|
| 49 | if (trylock_vm) | 
|---|
| 50 | flags |= I915_GEM_OBJECT_UNBIND_VM_TRYLOCK; | 
|---|
| 51 |  | 
|---|
| 52 | if (i915_gem_object_unbind(obj, flags) == 0) | 
|---|
| 53 | return true; | 
|---|
| 54 |  | 
|---|
| 55 | return false; | 
|---|
| 56 | } | 
|---|
| 57 |  | 
|---|
| 58 | static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags) | 
|---|
| 59 | { | 
|---|
| 60 | if (obj->ops->shrink) { | 
|---|
| 61 | unsigned int shrink_flags = 0; | 
|---|
| 62 |  | 
|---|
| 63 | if (!(flags & I915_SHRINK_ACTIVE)) | 
|---|
| 64 | shrink_flags |= I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT; | 
|---|
| 65 |  | 
|---|
| 66 | if (flags & I915_SHRINK_WRITEBACK) | 
|---|
| 67 | shrink_flags |= I915_GEM_OBJECT_SHRINK_WRITEBACK; | 
|---|
| 68 |  | 
|---|
| 69 | return obj->ops->shrink(obj, shrink_flags); | 
|---|
| 70 | } | 
|---|
| 71 |  | 
|---|
| 72 | return 0; | 
|---|
| 73 | } | 
|---|
| 74 |  | 
|---|
| 75 | /** | 
|---|
| 76 | * i915_gem_shrink - Shrink buffer object caches | 
|---|
| 77 | * @ww: i915 gem ww acquire ctx, or NULL | 
|---|
| 78 | * @i915: i915 device | 
|---|
| 79 | * @target: amount of memory to make available, in pages | 
|---|
| 80 | * @nr_scanned: optional output for number of pages scanned (incremental) | 
|---|
| 81 | * @shrink: control flags for selecting cache types | 
|---|
| 82 | * | 
|---|
| 83 | * This function is the main interface to the shrinker. It will try to release | 
|---|
| 84 | * up to @target pages of main memory backing storage from buffer objects. | 
|---|
| 85 | * Selection of the specific caches can be done with @flags. This is e.g. useful | 
|---|
| 86 | * when purgeable objects should be removed from caches preferentially. | 
|---|
| 87 | * | 
|---|
| 88 | * Note that it's not guaranteed that released amount is actually available as | 
|---|
| 89 | * free system memory - the pages might still be in-used to due to other reasons | 
|---|
| 90 | * (like cpu mmaps) or the mm core has reused them before we could grab them. | 
|---|
| 91 | * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to | 
|---|
| 92 | * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). | 
|---|
| 93 | * | 
|---|
| 94 | * Also note that any kind of pinning (both per-vma address space pins and | 
|---|
| 95 | * backing storage pins at the buffer object level) result in the shrinker code | 
|---|
| 96 | * having to skip the object. | 
|---|
| 97 | * | 
|---|
| 98 | * Returns: | 
|---|
| 99 | * The number of pages of backing storage actually released. | 
|---|
| 100 | */ | 
|---|
| 101 | unsigned long | 
|---|
| 102 | i915_gem_shrink(struct i915_gem_ww_ctx *ww, | 
|---|
| 103 | struct drm_i915_private *i915, | 
|---|
| 104 | unsigned long target, | 
|---|
| 105 | unsigned long *nr_scanned, | 
|---|
| 106 | unsigned int shrink) | 
|---|
| 107 | { | 
|---|
| 108 | const struct { | 
|---|
| 109 | struct list_head *list; | 
|---|
| 110 | unsigned int bit; | 
|---|
| 111 | } phases[] = { | 
|---|
| 112 | { &i915->mm.purge_list, ~0u }, | 
|---|
| 113 | { | 
|---|
| 114 | &i915->mm.shrink_list, | 
|---|
| 115 | I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | 
|---|
| 116 | }, | 
|---|
| 117 | { NULL, 0 }, | 
|---|
| 118 | }, *phase; | 
|---|
| 119 | intel_wakeref_t wakeref = NULL; | 
|---|
| 120 | unsigned long count = 0; | 
|---|
| 121 | unsigned long scanned = 0; | 
|---|
| 122 | int err = 0, i = 0; | 
|---|
| 123 | struct intel_gt *gt; | 
|---|
| 124 |  | 
|---|
| 125 | /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */ | 
|---|
| 126 | bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915); | 
|---|
| 127 |  | 
|---|
| 128 | trace_i915_gem_shrink(i915, target, flags: shrink); | 
|---|
| 129 |  | 
|---|
| 130 | /* | 
|---|
| 131 | * Unbinding of objects will require HW access; Let us not wake the | 
|---|
| 132 | * device just to recover a little memory. If absolutely necessary, | 
|---|
| 133 | * we will force the wake during oom-notifier. | 
|---|
| 134 | */ | 
|---|
| 135 | if (shrink & I915_SHRINK_BOUND) { | 
|---|
| 136 | wakeref = intel_runtime_pm_get_if_in_use(rpm: &i915->runtime_pm); | 
|---|
| 137 | if (!wakeref) | 
|---|
| 138 | shrink &= ~I915_SHRINK_BOUND; | 
|---|
| 139 | } | 
|---|
| 140 |  | 
|---|
| 141 | /* | 
|---|
| 142 | * When shrinking the active list, we should also consider active | 
|---|
| 143 | * contexts. Active contexts are pinned until they are retired, and | 
|---|
| 144 | * so can not be simply unbound to retire and unpin their pages. To | 
|---|
| 145 | * shrink the contexts, we must wait until the gpu is idle and | 
|---|
| 146 | * completed its switch to the kernel context. In short, we do | 
|---|
| 147 | * not have a good mechanism for idling a specific context, but | 
|---|
| 148 | * what we can do is give them a kick so that we do not keep idle | 
|---|
| 149 | * contexts around longer than is necessary. | 
|---|
| 150 | */ | 
|---|
| 151 | if (shrink & I915_SHRINK_ACTIVE) { | 
|---|
| 152 | for_each_gt(gt, i915, i) | 
|---|
| 153 | /* Retire requests to unpin all idle contexts */ | 
|---|
| 154 | intel_gt_retire_requests(gt); | 
|---|
| 155 | } | 
|---|
| 156 |  | 
|---|
| 157 | /* | 
|---|
| 158 | * As we may completely rewrite the (un)bound list whilst unbinding | 
|---|
| 159 | * (due to retiring requests) we have to strictly process only | 
|---|
| 160 | * one element of the list at the time, and recheck the list | 
|---|
| 161 | * on every iteration. | 
|---|
| 162 | * | 
|---|
| 163 | * In particular, we must hold a reference whilst removing the | 
|---|
| 164 | * object as we may end up waiting for and/or retiring the objects. | 
|---|
| 165 | * This might release the final reference (held by the active list) | 
|---|
| 166 | * and result in the object being freed from under us. This is | 
|---|
| 167 | * similar to the precautions the eviction code must take whilst | 
|---|
| 168 | * removing objects. | 
|---|
| 169 | * | 
|---|
| 170 | * Also note that although these lists do not hold a reference to | 
|---|
| 171 | * the object we can safely grab one here: The final object | 
|---|
| 172 | * unreferencing and the bound_list are both protected by the | 
|---|
| 173 | * i915->mm.obj_lock and so we won't ever be able to observe an | 
|---|
| 174 | * object on the bound_list with a reference count equals 0. | 
|---|
| 175 | */ | 
|---|
| 176 | for (phase = phases; phase->list; phase++) { | 
|---|
| 177 | struct list_head still_in_list; | 
|---|
| 178 | struct drm_i915_gem_object *obj; | 
|---|
| 179 | unsigned long flags; | 
|---|
| 180 |  | 
|---|
| 181 | if ((shrink & phase->bit) == 0) | 
|---|
| 182 | continue; | 
|---|
| 183 |  | 
|---|
| 184 | INIT_LIST_HEAD(list: &still_in_list); | 
|---|
| 185 |  | 
|---|
| 186 | /* | 
|---|
| 187 | * We serialize our access to unreferenced objects through | 
|---|
| 188 | * the use of the obj_lock. While the objects are not | 
|---|
| 189 | * yet freed (due to RCU then a workqueue) we still want | 
|---|
| 190 | * to be able to shrink their pages, so they remain on | 
|---|
| 191 | * the unbound/bound list until actually freed. | 
|---|
| 192 | */ | 
|---|
| 193 | spin_lock_irqsave(&i915->mm.obj_lock, flags); | 
|---|
| 194 | while (count < target && | 
|---|
| 195 | (obj = list_first_entry_or_null(phase->list, | 
|---|
| 196 | typeof(*obj), | 
|---|
| 197 | mm.link))) { | 
|---|
| 198 | list_move_tail(list: &obj->mm.link, head: &still_in_list); | 
|---|
| 199 |  | 
|---|
| 200 | if (shrink & I915_SHRINK_VMAPS && | 
|---|
| 201 | !is_vmalloc_addr(x: obj->mm.mapping)) | 
|---|
| 202 | continue; | 
|---|
| 203 |  | 
|---|
| 204 | if (!(shrink & I915_SHRINK_ACTIVE) && | 
|---|
| 205 | i915_gem_object_is_framebuffer(obj)) | 
|---|
| 206 | continue; | 
|---|
| 207 |  | 
|---|
| 208 | if (!can_release_pages(obj)) | 
|---|
| 209 | continue; | 
|---|
| 210 |  | 
|---|
| 211 | if (!kref_get_unless_zero(kref: &obj->base.refcount)) | 
|---|
| 212 | continue; | 
|---|
| 213 |  | 
|---|
| 214 | spin_unlock_irqrestore(lock: &i915->mm.obj_lock, flags); | 
|---|
| 215 |  | 
|---|
| 216 | /* May arrive from get_pages on another bo */ | 
|---|
| 217 | if (!ww) { | 
|---|
| 218 | if (!i915_gem_object_trylock(obj, NULL)) | 
|---|
| 219 | goto skip; | 
|---|
| 220 | } else { | 
|---|
| 221 | err = i915_gem_object_lock(obj, ww); | 
|---|
| 222 | if (err) | 
|---|
| 223 | goto skip; | 
|---|
| 224 | } | 
|---|
| 225 |  | 
|---|
| 226 | if (drop_pages(obj, shrink, trylock_vm) && | 
|---|
| 227 | !__i915_gem_object_put_pages(obj) && | 
|---|
| 228 | !try_to_writeback(obj, flags: shrink)) | 
|---|
| 229 | count += obj->base.size >> PAGE_SHIFT; | 
|---|
| 230 |  | 
|---|
| 231 | if (!ww) | 
|---|
| 232 | i915_gem_object_unlock(obj); | 
|---|
| 233 |  | 
|---|
| 234 | scanned += obj->base.size >> PAGE_SHIFT; | 
|---|
| 235 | skip: | 
|---|
| 236 | i915_gem_object_put(obj); | 
|---|
| 237 |  | 
|---|
| 238 | spin_lock_irqsave(&i915->mm.obj_lock, flags); | 
|---|
| 239 | if (err) | 
|---|
| 240 | break; | 
|---|
| 241 | } | 
|---|
| 242 | list_splice_tail(list: &still_in_list, head: phase->list); | 
|---|
| 243 | spin_unlock_irqrestore(lock: &i915->mm.obj_lock, flags); | 
|---|
| 244 | if (err) | 
|---|
| 245 | break; | 
|---|
| 246 | } | 
|---|
| 247 |  | 
|---|
| 248 | if (shrink & I915_SHRINK_BOUND) | 
|---|
| 249 | intel_runtime_pm_put(rpm: &i915->runtime_pm, wref: wakeref); | 
|---|
| 250 |  | 
|---|
| 251 | if (err) | 
|---|
| 252 | return err; | 
|---|
| 253 |  | 
|---|
| 254 | if (nr_scanned) | 
|---|
| 255 | *nr_scanned += scanned; | 
|---|
| 256 | return count; | 
|---|
| 257 | } | 
|---|
| 258 |  | 
|---|
| 259 | /** | 
|---|
| 260 | * i915_gem_shrink_all - Shrink buffer object caches completely | 
|---|
| 261 | * @i915: i915 device | 
|---|
| 262 | * | 
|---|
| 263 | * This is a simple wrapper around i915_gem_shrink() to aggressively shrink all | 
|---|
| 264 | * caches completely. It also first waits for and retires all outstanding | 
|---|
| 265 | * requests to also be able to release backing storage for active objects. | 
|---|
| 266 | * | 
|---|
| 267 | * This should only be used in code to intentionally quiescent the gpu or as a | 
|---|
| 268 | * last-ditch effort when memory seems to have run out. | 
|---|
| 269 | * | 
|---|
| 270 | * Returns: | 
|---|
| 271 | * The number of pages of backing storage actually released. | 
|---|
| 272 | */ | 
|---|
| 273 | unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) | 
|---|
| 274 | { | 
|---|
| 275 | intel_wakeref_t wakeref; | 
|---|
| 276 | unsigned long freed = 0; | 
|---|
| 277 |  | 
|---|
| 278 | with_intel_runtime_pm(&i915->runtime_pm, wakeref) { | 
|---|
| 279 | freed = i915_gem_shrink(NULL, i915, target: -1UL, NULL, | 
|---|
| 280 | I915_SHRINK_BOUND | | 
|---|
| 281 | I915_SHRINK_UNBOUND); | 
|---|
| 282 | } | 
|---|
| 283 |  | 
|---|
| 284 | return freed; | 
|---|
| 285 | } | 
|---|
| 286 |  | 
|---|
| 287 | static unsigned long | 
|---|
| 288 | i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) | 
|---|
| 289 | { | 
|---|
| 290 | struct drm_i915_private *i915 = shrinker->private_data; | 
|---|
| 291 | unsigned long num_objects; | 
|---|
| 292 | unsigned long count; | 
|---|
| 293 |  | 
|---|
| 294 | count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT; | 
|---|
| 295 | num_objects = READ_ONCE(i915->mm.shrink_count); | 
|---|
| 296 |  | 
|---|
| 297 | /* | 
|---|
| 298 | * Update our preferred vmscan batch size for the next pass. | 
|---|
| 299 | * Our rough guess for an effective batch size is roughly 2 | 
|---|
| 300 | * available GEM objects worth of pages. That is we don't want | 
|---|
| 301 | * the shrinker to fire, until it is worth the cost of freeing an | 
|---|
| 302 | * entire GEM object. | 
|---|
| 303 | */ | 
|---|
| 304 | if (num_objects) { | 
|---|
| 305 | unsigned long avg = 2 * count / num_objects; | 
|---|
| 306 |  | 
|---|
| 307 | i915->mm.shrinker->batch = | 
|---|
| 308 | max((i915->mm.shrinker->batch + avg) >> 1, | 
|---|
| 309 | 128ul /* default SHRINK_BATCH */); | 
|---|
| 310 | } | 
|---|
| 311 |  | 
|---|
| 312 | return count; | 
|---|
| 313 | } | 
|---|
| 314 |  | 
|---|
| 315 | static unsigned long | 
|---|
| 316 | i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) | 
|---|
| 317 | { | 
|---|
| 318 | struct drm_i915_private *i915 = shrinker->private_data; | 
|---|
| 319 | unsigned long freed; | 
|---|
| 320 |  | 
|---|
| 321 | sc->nr_scanned = 0; | 
|---|
| 322 |  | 
|---|
| 323 | freed = i915_gem_shrink(NULL, i915, | 
|---|
| 324 | target: sc->nr_to_scan, | 
|---|
| 325 | nr_scanned: &sc->nr_scanned, | 
|---|
| 326 | I915_SHRINK_BOUND | | 
|---|
| 327 | I915_SHRINK_UNBOUND); | 
|---|
| 328 | if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { | 
|---|
| 329 | intel_wakeref_t wakeref; | 
|---|
| 330 |  | 
|---|
| 331 | with_intel_runtime_pm(&i915->runtime_pm, wakeref) { | 
|---|
| 332 | freed += i915_gem_shrink(NULL, i915, | 
|---|
| 333 | target: sc->nr_to_scan - sc->nr_scanned, | 
|---|
| 334 | nr_scanned: &sc->nr_scanned, | 
|---|
| 335 | I915_SHRINK_ACTIVE | | 
|---|
| 336 | I915_SHRINK_BOUND | | 
|---|
| 337 | I915_SHRINK_UNBOUND | | 
|---|
| 338 | I915_SHRINK_WRITEBACK); | 
|---|
| 339 | } | 
|---|
| 340 | } | 
|---|
| 341 |  | 
|---|
| 342 | return sc->nr_scanned ? freed : SHRINK_STOP; | 
|---|
| 343 | } | 
|---|
| 344 |  | 
|---|
| 345 | static int | 
|---|
| 346 | i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) | 
|---|
| 347 | { | 
|---|
| 348 | struct drm_i915_private *i915 = | 
|---|
| 349 | container_of(nb, struct drm_i915_private, mm.oom_notifier); | 
|---|
| 350 | struct drm_i915_gem_object *obj; | 
|---|
| 351 | unsigned long unevictable, available, freed_pages; | 
|---|
| 352 | intel_wakeref_t wakeref; | 
|---|
| 353 | unsigned long flags; | 
|---|
| 354 |  | 
|---|
| 355 | freed_pages = 0; | 
|---|
| 356 | with_intel_runtime_pm(&i915->runtime_pm, wakeref) | 
|---|
| 357 | freed_pages += i915_gem_shrink(NULL, i915, target: -1UL, NULL, | 
|---|
| 358 | I915_SHRINK_BOUND | | 
|---|
| 359 | I915_SHRINK_UNBOUND | | 
|---|
| 360 | I915_SHRINK_WRITEBACK); | 
|---|
| 361 |  | 
|---|
| 362 | /* Because we may be allocating inside our own driver, we cannot | 
|---|
| 363 | * assert that there are no objects with pinned pages that are not | 
|---|
| 364 | * being pointed to by hardware. | 
|---|
| 365 | */ | 
|---|
| 366 | available = unevictable = 0; | 
|---|
| 367 | spin_lock_irqsave(&i915->mm.obj_lock, flags); | 
|---|
| 368 | list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { | 
|---|
| 369 | if (!can_release_pages(obj)) | 
|---|
| 370 | unevictable += obj->base.size >> PAGE_SHIFT; | 
|---|
| 371 | else | 
|---|
| 372 | available += obj->base.size >> PAGE_SHIFT; | 
|---|
| 373 | } | 
|---|
| 374 | spin_unlock_irqrestore(lock: &i915->mm.obj_lock, flags); | 
|---|
| 375 |  | 
|---|
| 376 | if (freed_pages || available) | 
|---|
| 377 | pr_info( "Purging GPU memory, %lu pages freed, " | 
|---|
| 378 | "%lu pages still pinned, %lu pages left available.\n", | 
|---|
| 379 | freed_pages, unevictable, available); | 
|---|
| 380 |  | 
|---|
| 381 | *(unsigned long *)ptr += freed_pages; | 
|---|
| 382 | return NOTIFY_DONE; | 
|---|
| 383 | } | 
|---|
| 384 |  | 
|---|
| 385 | static int | 
|---|
| 386 | i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) | 
|---|
| 387 | { | 
|---|
| 388 | struct drm_i915_private *i915 = | 
|---|
| 389 | container_of(nb, struct drm_i915_private, mm.vmap_notifier); | 
|---|
| 390 | struct i915_vma *vma, *next; | 
|---|
| 391 | unsigned long freed_pages = 0; | 
|---|
| 392 | intel_wakeref_t wakeref; | 
|---|
| 393 | struct intel_gt *gt; | 
|---|
| 394 | int i; | 
|---|
| 395 |  | 
|---|
| 396 | with_intel_runtime_pm(&i915->runtime_pm, wakeref) | 
|---|
| 397 | freed_pages += i915_gem_shrink(NULL, i915, target: -1UL, NULL, | 
|---|
| 398 | I915_SHRINK_BOUND | | 
|---|
| 399 | I915_SHRINK_UNBOUND | | 
|---|
| 400 | I915_SHRINK_VMAPS); | 
|---|
| 401 |  | 
|---|
| 402 | /* We also want to clear any cached iomaps as they wrap vmap */ | 
|---|
| 403 | for_each_gt(gt, i915, i) { | 
|---|
| 404 | mutex_lock(lock: >->ggtt->vm.mutex); | 
|---|
| 405 | list_for_each_entry_safe(vma, next, | 
|---|
| 406 | >->ggtt->vm.bound_list, vm_link) { | 
|---|
| 407 | unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT; | 
|---|
| 408 | struct drm_i915_gem_object *obj = vma->obj; | 
|---|
| 409 |  | 
|---|
| 410 | if (!vma->iomap || i915_vma_is_active(vma)) | 
|---|
| 411 | continue; | 
|---|
| 412 |  | 
|---|
| 413 | if (!i915_gem_object_trylock(obj, NULL)) | 
|---|
| 414 | continue; | 
|---|
| 415 |  | 
|---|
| 416 | if (__i915_vma_unbind(vma) == 0) | 
|---|
| 417 | freed_pages += count; | 
|---|
| 418 |  | 
|---|
| 419 | i915_gem_object_unlock(obj); | 
|---|
| 420 | } | 
|---|
| 421 | mutex_unlock(lock: >->ggtt->vm.mutex); | 
|---|
| 422 | } | 
|---|
| 423 |  | 
|---|
| 424 | *(unsigned long *)ptr += freed_pages; | 
|---|
| 425 | return NOTIFY_DONE; | 
|---|
| 426 | } | 
|---|
| 427 |  | 
|---|
| 428 | void i915_gem_driver_register__shrinker(struct drm_i915_private *i915) | 
|---|
| 429 | { | 
|---|
| 430 | i915->mm.shrinker = shrinker_alloc(flags: 0, fmt: "drm-i915_gem"); | 
|---|
| 431 | if (!i915->mm.shrinker) { | 
|---|
| 432 | drm_WARN_ON(&i915->drm, 1); | 
|---|
| 433 | } else { | 
|---|
| 434 | i915->mm.shrinker->scan_objects = i915_gem_shrinker_scan; | 
|---|
| 435 | i915->mm.shrinker->count_objects = i915_gem_shrinker_count; | 
|---|
| 436 | i915->mm.shrinker->batch = 4096; | 
|---|
| 437 | i915->mm.shrinker->private_data = i915; | 
|---|
| 438 |  | 
|---|
| 439 | shrinker_register(shrinker: i915->mm.shrinker); | 
|---|
| 440 | } | 
|---|
| 441 |  | 
|---|
| 442 | i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; | 
|---|
| 443 | drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier)); | 
|---|
| 444 |  | 
|---|
| 445 | i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; | 
|---|
| 446 | drm_WARN_ON(&i915->drm, | 
|---|
| 447 | register_vmap_purge_notifier(&i915->mm.vmap_notifier)); | 
|---|
| 448 | } | 
|---|
| 449 |  | 
|---|
| 450 | void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915) | 
|---|
| 451 | { | 
|---|
| 452 | drm_WARN_ON(&i915->drm, | 
|---|
| 453 | unregister_vmap_purge_notifier(&i915->mm.vmap_notifier)); | 
|---|
| 454 | drm_WARN_ON(&i915->drm, | 
|---|
| 455 | unregister_oom_notifier(&i915->mm.oom_notifier)); | 
|---|
| 456 | shrinker_free(shrinker: i915->mm.shrinker); | 
|---|
| 457 | } | 
|---|
| 458 |  | 
|---|
| 459 | void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, | 
|---|
| 460 | struct mutex *mutex) | 
|---|
| 461 | { | 
|---|
| 462 | if (!IS_ENABLED(CONFIG_LOCKDEP)) | 
|---|
| 463 | return; | 
|---|
| 464 |  | 
|---|
| 465 | fs_reclaim_acquire(GFP_KERNEL); | 
|---|
| 466 |  | 
|---|
| 467 | mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_); | 
|---|
| 468 | mutex_release(&mutex->dep_map, _RET_IP_); | 
|---|
| 469 |  | 
|---|
| 470 | fs_reclaim_release(GFP_KERNEL); | 
|---|
| 471 | } | 
|---|
| 472 |  | 
|---|
| 473 | /** | 
|---|
| 474 | * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By | 
|---|
| 475 | * default all object types that support shrinking(see IS_SHRINKABLE), will also | 
|---|
| 476 | * make the object visible to the shrinker after allocating the system memory | 
|---|
| 477 | * pages. | 
|---|
| 478 | * @obj: The GEM object. | 
|---|
| 479 | * | 
|---|
| 480 | * This is typically used for special kernel internal objects that can't be | 
|---|
| 481 | * easily processed by the shrinker, like if they are perma-pinned. | 
|---|
| 482 | */ | 
|---|
| 483 | void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) | 
|---|
| 484 | { | 
|---|
| 485 | struct drm_i915_private *i915 = obj_to_i915(obj); | 
|---|
| 486 | unsigned long flags; | 
|---|
| 487 |  | 
|---|
| 488 | /* | 
|---|
| 489 | * We can only be called while the pages are pinned or when | 
|---|
| 490 | * the pages are released. If pinned, we should only be called | 
|---|
| 491 | * from a single caller under controlled conditions; and on release | 
|---|
| 492 | * only one caller may release us. Neither the two may cross. | 
|---|
| 493 | */ | 
|---|
| 494 | if (atomic_add_unless(v: &obj->mm.shrink_pin, a: 1, u: 0)) | 
|---|
| 495 | return; | 
|---|
| 496 |  | 
|---|
| 497 | spin_lock_irqsave(&i915->mm.obj_lock, flags); | 
|---|
| 498 | if (!atomic_fetch_inc(v: &obj->mm.shrink_pin) && | 
|---|
| 499 | !list_empty(head: &obj->mm.link)) { | 
|---|
| 500 | list_del_init(entry: &obj->mm.link); | 
|---|
| 501 | i915->mm.shrink_count--; | 
|---|
| 502 | i915->mm.shrink_memory -= obj->base.size; | 
|---|
| 503 | } | 
|---|
| 504 | spin_unlock_irqrestore(lock: &i915->mm.obj_lock, flags); | 
|---|
| 505 | } | 
|---|
| 506 |  | 
|---|
| 507 | static void ___i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, | 
|---|
| 508 | struct list_head *head) | 
|---|
| 509 | { | 
|---|
| 510 | struct drm_i915_private *i915 = obj_to_i915(obj); | 
|---|
| 511 | unsigned long flags; | 
|---|
| 512 |  | 
|---|
| 513 | if (!i915_gem_object_is_shrinkable(obj)) | 
|---|
| 514 | return; | 
|---|
| 515 |  | 
|---|
| 516 | if (atomic_add_unless(v: &obj->mm.shrink_pin, a: -1, u: 1)) | 
|---|
| 517 | return; | 
|---|
| 518 |  | 
|---|
| 519 | spin_lock_irqsave(&i915->mm.obj_lock, flags); | 
|---|
| 520 | GEM_BUG_ON(!kref_read(&obj->base.refcount)); | 
|---|
| 521 | if (atomic_dec_and_test(v: &obj->mm.shrink_pin)) { | 
|---|
| 522 | GEM_BUG_ON(!list_empty(&obj->mm.link)); | 
|---|
| 523 |  | 
|---|
| 524 | list_add_tail(new: &obj->mm.link, head); | 
|---|
| 525 | i915->mm.shrink_count++; | 
|---|
| 526 | i915->mm.shrink_memory += obj->base.size; | 
|---|
| 527 |  | 
|---|
| 528 | } | 
|---|
| 529 | spin_unlock_irqrestore(lock: &i915->mm.obj_lock, flags); | 
|---|
| 530 | } | 
|---|
| 531 |  | 
|---|
| 532 | /** | 
|---|
| 533 | * __i915_gem_object_make_shrinkable - Move the object to the tail of the | 
|---|
| 534 | * shrinkable list. Objects on this list might be swapped out. Used with | 
|---|
| 535 | * WILLNEED objects. | 
|---|
| 536 | * @obj: The GEM object. | 
|---|
| 537 | * | 
|---|
| 538 | * DO NOT USE. This is intended to be called on very special objects that don't | 
|---|
| 539 | * yet have mm.pages, but are guaranteed to have potentially reclaimable pages | 
|---|
| 540 | * underneath. | 
|---|
| 541 | */ | 
|---|
| 542 | void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) | 
|---|
| 543 | { | 
|---|
| 544 | ___i915_gem_object_make_shrinkable(obj, | 
|---|
| 545 | head: &obj_to_i915(obj)->mm.shrink_list); | 
|---|
| 546 | } | 
|---|
| 547 |  | 
|---|
| 548 | /** | 
|---|
| 549 | * __i915_gem_object_make_purgeable - Move the object to the tail of the | 
|---|
| 550 | * purgeable list. Objects on this list might be swapped out. Used with | 
|---|
| 551 | * DONTNEED objects. | 
|---|
| 552 | * @obj: The GEM object. | 
|---|
| 553 | * | 
|---|
| 554 | * DO NOT USE. This is intended to be called on very special objects that don't | 
|---|
| 555 | * yet have mm.pages, but are guaranteed to have potentially reclaimable pages | 
|---|
| 556 | * underneath. | 
|---|
| 557 | */ | 
|---|
| 558 | void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) | 
|---|
| 559 | { | 
|---|
| 560 | ___i915_gem_object_make_shrinkable(obj, | 
|---|
| 561 | head: &obj_to_i915(obj)->mm.purge_list); | 
|---|
| 562 | } | 
|---|
| 563 |  | 
|---|
| 564 | /** | 
|---|
| 565 | * i915_gem_object_make_shrinkable - Move the object to the tail of the | 
|---|
| 566 | * shrinkable list. Objects on this list might be swapped out. Used with | 
|---|
| 567 | * WILLNEED objects. | 
|---|
| 568 | * @obj: The GEM object. | 
|---|
| 569 | * | 
|---|
| 570 | * MUST only be called on objects which have backing pages. | 
|---|
| 571 | * | 
|---|
| 572 | * MUST be balanced with previous call to i915_gem_object_make_unshrinkable(). | 
|---|
| 573 | */ | 
|---|
| 574 | void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) | 
|---|
| 575 | { | 
|---|
| 576 | GEM_BUG_ON(!i915_gem_object_has_pages(obj)); | 
|---|
| 577 | __i915_gem_object_make_shrinkable(obj); | 
|---|
| 578 | } | 
|---|
| 579 |  | 
|---|
| 580 | /** | 
|---|
| 581 | * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable | 
|---|
| 582 | * list. Used with DONTNEED objects. Unlike with shrinkable objects, the | 
|---|
| 583 | * shrinker will attempt to discard the backing pages, instead of trying to swap | 
|---|
| 584 | * them out. | 
|---|
| 585 | * @obj: The GEM object. | 
|---|
| 586 | * | 
|---|
| 587 | * MUST only be called on objects which have backing pages. | 
|---|
| 588 | * | 
|---|
| 589 | * MUST be balanced with previous call to i915_gem_object_make_unshrinkable(). | 
|---|
| 590 | */ | 
|---|
| 591 | void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) | 
|---|
| 592 | { | 
|---|
| 593 | GEM_BUG_ON(!i915_gem_object_has_pages(obj)); | 
|---|
| 594 | __i915_gem_object_make_purgeable(obj); | 
|---|
| 595 | } | 
|---|
| 596 |  | 
|---|