| 1 | /* | 
|---|
| 2 | * SPDX-License-Identifier: MIT | 
|---|
| 3 | * | 
|---|
| 4 | * Copyright © 2019 Intel Corporation | 
|---|
| 5 | */ | 
|---|
| 6 |  | 
|---|
| 7 | #include <linux/debugobjects.h> | 
|---|
| 8 |  | 
|---|
| 9 | #include "gt/intel_context.h" | 
|---|
| 10 | #include "gt/intel_engine_heartbeat.h" | 
|---|
| 11 | #include "gt/intel_engine_pm.h" | 
|---|
| 12 | #include "gt/intel_ring.h" | 
|---|
| 13 |  | 
|---|
| 14 | #include "i915_drv.h" | 
|---|
| 15 | #include "i915_active.h" | 
|---|
| 16 |  | 
|---|
| 17 | /* | 
|---|
| 18 | * Active refs memory management | 
|---|
| 19 | * | 
|---|
| 20 | * To be more economical with memory, we reap all the i915_active trees as | 
|---|
| 21 | * they idle (when we know the active requests are inactive) and allocate the | 
|---|
| 22 | * nodes from a local slab cache to hopefully reduce the fragmentation. | 
|---|
| 23 | */ | 
|---|
| 24 | static struct kmem_cache *slab_cache; | 
|---|
| 25 |  | 
|---|
| 26 | struct active_node { | 
|---|
| 27 | struct rb_node node; | 
|---|
| 28 | struct i915_active_fence base; | 
|---|
| 29 | struct i915_active *ref; | 
|---|
| 30 | u64 timeline; | 
|---|
| 31 | }; | 
|---|
| 32 |  | 
|---|
| 33 | #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node) | 
|---|
| 34 |  | 
|---|
| 35 | static inline struct active_node * | 
|---|
| 36 | node_from_active(struct i915_active_fence *active) | 
|---|
| 37 | { | 
|---|
| 38 | return container_of(active, struct active_node, base); | 
|---|
| 39 | } | 
|---|
| 40 |  | 
|---|
| 41 | #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers) | 
|---|
| 42 |  | 
|---|
| 43 | static inline bool is_barrier(const struct i915_active_fence *active) | 
|---|
| 44 | { | 
|---|
| 45 | return IS_ERR(rcu_access_pointer(active->fence)); | 
|---|
| 46 | } | 
|---|
| 47 |  | 
|---|
| 48 | static inline struct llist_node *barrier_to_ll(struct active_node *node) | 
|---|
| 49 | { | 
|---|
| 50 | GEM_BUG_ON(!is_barrier(&node->base)); | 
|---|
| 51 | return (struct llist_node *)&node->base.cb.node; | 
|---|
| 52 | } | 
|---|
| 53 |  | 
|---|
| 54 | static inline struct intel_engine_cs * | 
|---|
| 55 | __barrier_to_engine(struct active_node *node) | 
|---|
| 56 | { | 
|---|
| 57 | return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev); | 
|---|
| 58 | } | 
|---|
| 59 |  | 
|---|
| 60 | static inline struct intel_engine_cs * | 
|---|
| 61 | barrier_to_engine(struct active_node *node) | 
|---|
| 62 | { | 
|---|
| 63 | GEM_BUG_ON(!is_barrier(&node->base)); | 
|---|
| 64 | return __barrier_to_engine(node); | 
|---|
| 65 | } | 
|---|
| 66 |  | 
|---|
| 67 | static inline struct active_node *barrier_from_ll(struct llist_node *x) | 
|---|
| 68 | { | 
|---|
| 69 | return container_of((struct list_head *)x, | 
|---|
| 70 | struct active_node, base.cb.node); | 
|---|
| 71 | } | 
|---|
| 72 |  | 
|---|
| 73 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS) | 
|---|
| 74 |  | 
|---|
| 75 | static void *active_debug_hint(void *addr) | 
|---|
| 76 | { | 
|---|
| 77 | struct i915_active *ref = addr; | 
|---|
| 78 |  | 
|---|
| 79 | return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; | 
|---|
| 80 | } | 
|---|
| 81 |  | 
|---|
| 82 | static const struct debug_obj_descr active_debug_desc = { | 
|---|
| 83 | .name = "i915_active", | 
|---|
| 84 | .debug_hint = active_debug_hint, | 
|---|
| 85 | }; | 
|---|
| 86 |  | 
|---|
| 87 | static void debug_active_init(struct i915_active *ref) | 
|---|
| 88 | { | 
|---|
| 89 | debug_object_init(ref, &active_debug_desc); | 
|---|
| 90 | } | 
|---|
| 91 |  | 
|---|
| 92 | static void debug_active_activate(struct i915_active *ref) | 
|---|
| 93 | { | 
|---|
| 94 | lockdep_assert_held(&ref->tree_lock); | 
|---|
| 95 | debug_object_activate(ref, &active_debug_desc); | 
|---|
| 96 | } | 
|---|
| 97 |  | 
|---|
| 98 | static void debug_active_deactivate(struct i915_active *ref) | 
|---|
| 99 | { | 
|---|
| 100 | lockdep_assert_held(&ref->tree_lock); | 
|---|
| 101 | if (!atomic_read(&ref->count)) /* after the last dec */ | 
|---|
| 102 | debug_object_deactivate(ref, &active_debug_desc); | 
|---|
| 103 | } | 
|---|
| 104 |  | 
|---|
| 105 | static void debug_active_fini(struct i915_active *ref) | 
|---|
| 106 | { | 
|---|
| 107 | debug_object_free(ref, &active_debug_desc); | 
|---|
| 108 | } | 
|---|
| 109 |  | 
|---|
| 110 | static void debug_active_assert(struct i915_active *ref) | 
|---|
| 111 | { | 
|---|
| 112 | debug_object_assert_init(ref, &active_debug_desc); | 
|---|
| 113 | } | 
|---|
| 114 |  | 
|---|
| 115 | #else | 
|---|
| 116 |  | 
|---|
| 117 | static inline void debug_active_init(struct i915_active *ref) { } | 
|---|
| 118 | static inline void debug_active_activate(struct i915_active *ref) { } | 
|---|
| 119 | static inline void debug_active_deactivate(struct i915_active *ref) { } | 
|---|
| 120 | static inline void debug_active_fini(struct i915_active *ref) { } | 
|---|
| 121 | static inline void debug_active_assert(struct i915_active *ref) { } | 
|---|
| 122 |  | 
|---|
| 123 | #endif | 
|---|
| 124 |  | 
|---|
| 125 | static void | 
|---|
| 126 | __active_retire(struct i915_active *ref) | 
|---|
| 127 | { | 
|---|
| 128 | struct rb_root root = RB_ROOT; | 
|---|
| 129 | struct active_node *it, *n; | 
|---|
| 130 | unsigned long flags; | 
|---|
| 131 |  | 
|---|
| 132 | GEM_BUG_ON(i915_active_is_idle(ref)); | 
|---|
| 133 |  | 
|---|
| 134 | /* return the unused nodes to our slabcache -- flushing the allocator */ | 
|---|
| 135 | if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags)) | 
|---|
| 136 | return; | 
|---|
| 137 |  | 
|---|
| 138 | GEM_BUG_ON(rcu_access_pointer(ref->excl.fence)); | 
|---|
| 139 | debug_active_deactivate(ref); | 
|---|
| 140 |  | 
|---|
| 141 | /* Even if we have not used the cache, we may still have a barrier */ | 
|---|
| 142 | if (!ref->cache) | 
|---|
| 143 | ref->cache = fetch_node(ref->tree.rb_node); | 
|---|
| 144 |  | 
|---|
| 145 | /* Keep the MRU cached node for reuse */ | 
|---|
| 146 | if (ref->cache) { | 
|---|
| 147 | /* Discard all other nodes in the tree */ | 
|---|
| 148 | rb_erase(&ref->cache->node, &ref->tree); | 
|---|
| 149 | root = ref->tree; | 
|---|
| 150 |  | 
|---|
| 151 | /* Rebuild the tree with only the cached node */ | 
|---|
| 152 | rb_link_node(node: &ref->cache->node, NULL, rb_link: &ref->tree.rb_node); | 
|---|
| 153 | rb_insert_color(&ref->cache->node, &ref->tree); | 
|---|
| 154 | GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node); | 
|---|
| 155 |  | 
|---|
| 156 | /* Make the cached node available for reuse with any timeline */ | 
|---|
| 157 | ref->cache->timeline = 0; /* needs cmpxchg(u64) */ | 
|---|
| 158 | } | 
|---|
| 159 |  | 
|---|
| 160 | spin_unlock_irqrestore(lock: &ref->tree_lock, flags); | 
|---|
| 161 |  | 
|---|
| 162 | /* After the final retire, the entire struct may be freed */ | 
|---|
| 163 | if (ref->retire) | 
|---|
| 164 | ref->retire(ref); | 
|---|
| 165 |  | 
|---|
| 166 | /* ... except if you wait on it, you must manage your own references! */ | 
|---|
| 167 | wake_up_var(var: ref); | 
|---|
| 168 |  | 
|---|
| 169 | /* Finally free the discarded timeline tree  */ | 
|---|
| 170 | rbtree_postorder_for_each_entry_safe(it, n, &root, node) { | 
|---|
| 171 | GEM_BUG_ON(i915_active_fence_isset(&it->base)); | 
|---|
| 172 | kmem_cache_free(s: slab_cache, objp: it); | 
|---|
| 173 | } | 
|---|
| 174 | } | 
|---|
| 175 |  | 
|---|
| 176 | static void | 
|---|
| 177 | active_work(struct work_struct *wrk) | 
|---|
| 178 | { | 
|---|
| 179 | struct i915_active *ref = container_of(wrk, typeof(*ref), work); | 
|---|
| 180 |  | 
|---|
| 181 | GEM_BUG_ON(!atomic_read(&ref->count)); | 
|---|
| 182 | if (atomic_add_unless(v: &ref->count, a: -1, u: 1)) | 
|---|
| 183 | return; | 
|---|
| 184 |  | 
|---|
| 185 | __active_retire(ref); | 
|---|
| 186 | } | 
|---|
| 187 |  | 
|---|
| 188 | static void | 
|---|
| 189 | active_retire(struct i915_active *ref) | 
|---|
| 190 | { | 
|---|
| 191 | GEM_BUG_ON(!atomic_read(&ref->count)); | 
|---|
| 192 | if (atomic_add_unless(v: &ref->count, a: -1, u: 1)) | 
|---|
| 193 | return; | 
|---|
| 194 |  | 
|---|
| 195 | if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) { | 
|---|
| 196 | queue_work(wq: system_unbound_wq, work: &ref->work); | 
|---|
| 197 | return; | 
|---|
| 198 | } | 
|---|
| 199 |  | 
|---|
| 200 | __active_retire(ref); | 
|---|
| 201 | } | 
|---|
| 202 |  | 
|---|
| 203 | static inline struct dma_fence ** | 
|---|
| 204 | __active_fence_slot(struct i915_active_fence *active) | 
|---|
| 205 | { | 
|---|
| 206 | return (struct dma_fence ** __force)&active->fence; | 
|---|
| 207 | } | 
|---|
| 208 |  | 
|---|
| 209 | static inline bool | 
|---|
| 210 | active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) | 
|---|
| 211 | { | 
|---|
| 212 | struct i915_active_fence *active = | 
|---|
| 213 | container_of(cb, typeof(*active), cb); | 
|---|
| 214 |  | 
|---|
| 215 | return try_cmpxchg(__active_fence_slot(active), &fence, NULL); | 
|---|
| 216 | } | 
|---|
| 217 |  | 
|---|
| 218 | static void | 
|---|
| 219 | node_retire(struct dma_fence *fence, struct dma_fence_cb *cb) | 
|---|
| 220 | { | 
|---|
| 221 | if (active_fence_cb(fence, cb)) | 
|---|
| 222 | active_retire(container_of(cb, struct active_node, base.cb)->ref); | 
|---|
| 223 | } | 
|---|
| 224 |  | 
|---|
| 225 | static void | 
|---|
| 226 | excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb) | 
|---|
| 227 | { | 
|---|
| 228 | if (active_fence_cb(fence, cb)) | 
|---|
| 229 | active_retire(container_of(cb, struct i915_active, excl.cb)); | 
|---|
| 230 | } | 
|---|
| 231 |  | 
|---|
| 232 | static struct active_node *__active_lookup(struct i915_active *ref, u64 idx) | 
|---|
| 233 | { | 
|---|
| 234 | struct active_node *it; | 
|---|
| 235 |  | 
|---|
| 236 | GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */ | 
|---|
| 237 |  | 
|---|
| 238 | /* | 
|---|
| 239 | * We track the most recently used timeline to skip a rbtree search | 
|---|
| 240 | * for the common case, under typical loads we never need the rbtree | 
|---|
| 241 | * at all. We can reuse the last slot if it is empty, that is | 
|---|
| 242 | * after the previous activity has been retired, or if it matches the | 
|---|
| 243 | * current timeline. | 
|---|
| 244 | */ | 
|---|
| 245 | it = READ_ONCE(ref->cache); | 
|---|
| 246 | if (it) { | 
|---|
| 247 | u64 cached = READ_ONCE(it->timeline); | 
|---|
| 248 |  | 
|---|
| 249 | /* Once claimed, this slot will only belong to this idx */ | 
|---|
| 250 | if (cached == idx) | 
|---|
| 251 | return it; | 
|---|
| 252 |  | 
|---|
| 253 | /* | 
|---|
| 254 | * An unclaimed cache [.timeline=0] can only be claimed once. | 
|---|
| 255 | * | 
|---|
| 256 | * If the value is already non-zero, some other thread has | 
|---|
| 257 | * claimed the cache and we know that is does not match our | 
|---|
| 258 | * idx. If, and only if, the timeline is currently zero is it | 
|---|
| 259 | * worth competing to claim it atomically for ourselves (for | 
|---|
| 260 | * only the winner of that race will cmpxchg succeed). | 
|---|
| 261 | */ | 
|---|
| 262 | if (!cached && try_cmpxchg64(&it->timeline, &cached, idx)) | 
|---|
| 263 | return it; | 
|---|
| 264 | } | 
|---|
| 265 |  | 
|---|
| 266 | BUILD_BUG_ON(offsetof(typeof(*it), node)); | 
|---|
| 267 |  | 
|---|
| 268 | /* While active, the tree can only be built; not destroyed */ | 
|---|
| 269 | GEM_BUG_ON(i915_active_is_idle(ref)); | 
|---|
| 270 |  | 
|---|
| 271 | it = fetch_node(ref->tree.rb_node); | 
|---|
| 272 | while (it) { | 
|---|
| 273 | if (it->timeline < idx) { | 
|---|
| 274 | it = fetch_node(it->node.rb_right); | 
|---|
| 275 | } else if (it->timeline > idx) { | 
|---|
| 276 | it = fetch_node(it->node.rb_left); | 
|---|
| 277 | } else { | 
|---|
| 278 | WRITE_ONCE(ref->cache, it); | 
|---|
| 279 | break; | 
|---|
| 280 | } | 
|---|
| 281 | } | 
|---|
| 282 |  | 
|---|
| 283 | /* NB: If the tree rotated beneath us, we may miss our target. */ | 
|---|
| 284 | return it; | 
|---|
| 285 | } | 
|---|
| 286 |  | 
|---|
| 287 | static struct i915_active_fence * | 
|---|
| 288 | active_instance(struct i915_active *ref, u64 idx) | 
|---|
| 289 | { | 
|---|
| 290 | struct active_node *node; | 
|---|
| 291 | struct rb_node **p, *parent; | 
|---|
| 292 |  | 
|---|
| 293 | node = __active_lookup(ref, idx); | 
|---|
| 294 | if (likely(node)) | 
|---|
| 295 | return &node->base; | 
|---|
| 296 |  | 
|---|
| 297 | spin_lock_irq(lock: &ref->tree_lock); | 
|---|
| 298 | GEM_BUG_ON(i915_active_is_idle(ref)); | 
|---|
| 299 |  | 
|---|
| 300 | parent = NULL; | 
|---|
| 301 | p = &ref->tree.rb_node; | 
|---|
| 302 | while (*p) { | 
|---|
| 303 | parent = *p; | 
|---|
| 304 |  | 
|---|
| 305 | node = rb_entry(parent, struct active_node, node); | 
|---|
| 306 | if (node->timeline == idx) | 
|---|
| 307 | goto out; | 
|---|
| 308 |  | 
|---|
| 309 | if (node->timeline < idx) | 
|---|
| 310 | p = &parent->rb_right; | 
|---|
| 311 | else | 
|---|
| 312 | p = &parent->rb_left; | 
|---|
| 313 | } | 
|---|
| 314 |  | 
|---|
| 315 | /* | 
|---|
| 316 | * XXX: We should preallocate this before i915_active_ref() is ever | 
|---|
| 317 | *  called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC. | 
|---|
| 318 | */ | 
|---|
| 319 | node = kmem_cache_alloc(slab_cache, GFP_ATOMIC); | 
|---|
| 320 | if (!node) | 
|---|
| 321 | goto out; | 
|---|
| 322 |  | 
|---|
| 323 | __i915_active_fence_init(active: &node->base, NULL, fn: node_retire); | 
|---|
| 324 | node->ref = ref; | 
|---|
| 325 | node->timeline = idx; | 
|---|
| 326 |  | 
|---|
| 327 | rb_link_node(node: &node->node, parent, rb_link: p); | 
|---|
| 328 | rb_insert_color(&node->node, &ref->tree); | 
|---|
| 329 |  | 
|---|
| 330 | out: | 
|---|
| 331 | WRITE_ONCE(ref->cache, node); | 
|---|
| 332 | spin_unlock_irq(lock: &ref->tree_lock); | 
|---|
| 333 |  | 
|---|
| 334 | return &node->base; | 
|---|
| 335 | } | 
|---|
| 336 |  | 
|---|
| 337 | void __i915_active_init(struct i915_active *ref, | 
|---|
| 338 | int (*active)(struct i915_active *ref), | 
|---|
| 339 | void (*retire)(struct i915_active *ref), | 
|---|
| 340 | unsigned long flags, | 
|---|
| 341 | struct lock_class_key *mkey, | 
|---|
| 342 | struct lock_class_key *wkey) | 
|---|
| 343 | { | 
|---|
| 344 | debug_active_init(ref); | 
|---|
| 345 |  | 
|---|
| 346 | ref->flags = flags; | 
|---|
| 347 | ref->active = active; | 
|---|
| 348 | ref->retire = retire; | 
|---|
| 349 |  | 
|---|
| 350 | spin_lock_init(&ref->tree_lock); | 
|---|
| 351 | ref->tree = RB_ROOT; | 
|---|
| 352 | ref->cache = NULL; | 
|---|
| 353 |  | 
|---|
| 354 | init_llist_head(list: &ref->preallocated_barriers); | 
|---|
| 355 | atomic_set(v: &ref->count, i: 0); | 
|---|
| 356 | __mutex_init(lock: &ref->mutex, name: "i915_active", key: mkey); | 
|---|
| 357 | __i915_active_fence_init(active: &ref->excl, NULL, fn: excl_retire); | 
|---|
| 358 | INIT_WORK(&ref->work, active_work); | 
|---|
| 359 | #if IS_ENABLED(CONFIG_LOCKDEP) | 
|---|
| 360 | lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0); | 
|---|
| 361 | #endif | 
|---|
| 362 | } | 
|---|
| 363 |  | 
|---|
| 364 | static bool ____active_del_barrier(struct i915_active *ref, | 
|---|
| 365 | struct active_node *node, | 
|---|
| 366 | struct intel_engine_cs *engine) | 
|---|
| 367 |  | 
|---|
| 368 | { | 
|---|
| 369 | struct llist_node *head = NULL, *tail = NULL; | 
|---|
| 370 | struct llist_node *pos, *next; | 
|---|
| 371 |  | 
|---|
| 372 | GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context); | 
|---|
| 373 |  | 
|---|
| 374 | /* | 
|---|
| 375 | * Rebuild the llist excluding our node. We may perform this | 
|---|
| 376 | * outside of the kernel_context timeline mutex and so someone | 
|---|
| 377 | * else may be manipulating the engine->barrier_tasks, in | 
|---|
| 378 | * which case either we or they will be upset :) | 
|---|
| 379 | * | 
|---|
| 380 | * A second __active_del_barrier() will report failure to claim | 
|---|
| 381 | * the active_node and the caller will just shrug and know not to | 
|---|
| 382 | * claim ownership of its node. | 
|---|
| 383 | * | 
|---|
| 384 | * A concurrent i915_request_add_active_barriers() will miss adding | 
|---|
| 385 | * any of the tasks, but we will try again on the next -- and since | 
|---|
| 386 | * we are actively using the barrier, we know that there will be | 
|---|
| 387 | * at least another opportunity when we idle. | 
|---|
| 388 | */ | 
|---|
| 389 | llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) { | 
|---|
| 390 | if (node == barrier_from_ll(x: pos)) { | 
|---|
| 391 | node = NULL; | 
|---|
| 392 | continue; | 
|---|
| 393 | } | 
|---|
| 394 |  | 
|---|
| 395 | pos->next = head; | 
|---|
| 396 | head = pos; | 
|---|
| 397 | if (!tail) | 
|---|
| 398 | tail = pos; | 
|---|
| 399 | } | 
|---|
| 400 | if (head) | 
|---|
| 401 | llist_add_batch(new_first: head, new_last: tail, head: &engine->barrier_tasks); | 
|---|
| 402 |  | 
|---|
| 403 | return !node; | 
|---|
| 404 | } | 
|---|
| 405 |  | 
|---|
| 406 | static bool | 
|---|
| 407 | __active_del_barrier(struct i915_active *ref, struct active_node *node) | 
|---|
| 408 | { | 
|---|
| 409 | return ____active_del_barrier(ref, node, engine: barrier_to_engine(node)); | 
|---|
| 410 | } | 
|---|
| 411 |  | 
|---|
| 412 | static bool | 
|---|
| 413 | replace_barrier(struct i915_active *ref, struct i915_active_fence *active) | 
|---|
| 414 | { | 
|---|
| 415 | if (!is_barrier(active)) /* proto-node used by our idle barrier? */ | 
|---|
| 416 | return false; | 
|---|
| 417 |  | 
|---|
| 418 | /* | 
|---|
| 419 | * This request is on the kernel_context timeline, and so | 
|---|
| 420 | * we can use it to substitute for the pending idle-barrer | 
|---|
| 421 | * request that we want to emit on the kernel_context. | 
|---|
| 422 | */ | 
|---|
| 423 | return __active_del_barrier(ref, node: node_from_active(active)); | 
|---|
| 424 | } | 
|---|
| 425 |  | 
|---|
| 426 | int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) | 
|---|
| 427 | { | 
|---|
| 428 | u64 idx = i915_request_timeline(rq)->fence_context; | 
|---|
| 429 | struct dma_fence *fence = &rq->fence; | 
|---|
| 430 | struct i915_active_fence *active; | 
|---|
| 431 | int err; | 
|---|
| 432 |  | 
|---|
| 433 | /* Prevent reaping in case we malloc/wait while building the tree */ | 
|---|
| 434 | err = i915_active_acquire(ref); | 
|---|
| 435 | if (err) | 
|---|
| 436 | return err; | 
|---|
| 437 |  | 
|---|
| 438 | do { | 
|---|
| 439 | active = active_instance(ref, idx); | 
|---|
| 440 | if (!active) { | 
|---|
| 441 | err = -ENOMEM; | 
|---|
| 442 | goto out; | 
|---|
| 443 | } | 
|---|
| 444 |  | 
|---|
| 445 | if (replace_barrier(ref, active)) { | 
|---|
| 446 | RCU_INIT_POINTER(active->fence, NULL); | 
|---|
| 447 | atomic_dec(v: &ref->count); | 
|---|
| 448 | } | 
|---|
| 449 | } while (unlikely(is_barrier(active))); | 
|---|
| 450 |  | 
|---|
| 451 | fence = __i915_active_fence_set(active, fence); | 
|---|
| 452 | if (!fence) | 
|---|
| 453 | __i915_active_acquire(ref); | 
|---|
| 454 | else | 
|---|
| 455 | dma_fence_put(fence); | 
|---|
| 456 |  | 
|---|
| 457 | out: | 
|---|
| 458 | i915_active_release(ref); | 
|---|
| 459 | return err; | 
|---|
| 460 | } | 
|---|
| 461 |  | 
|---|
| 462 | static struct dma_fence * | 
|---|
| 463 | __i915_active_set_fence(struct i915_active *ref, | 
|---|
| 464 | struct i915_active_fence *active, | 
|---|
| 465 | struct dma_fence *fence) | 
|---|
| 466 | { | 
|---|
| 467 | struct dma_fence *prev; | 
|---|
| 468 |  | 
|---|
| 469 | if (replace_barrier(ref, active)) { | 
|---|
| 470 | RCU_INIT_POINTER(active->fence, fence); | 
|---|
| 471 | return NULL; | 
|---|
| 472 | } | 
|---|
| 473 |  | 
|---|
| 474 | prev = __i915_active_fence_set(active, fence); | 
|---|
| 475 | if (!prev) | 
|---|
| 476 | __i915_active_acquire(ref); | 
|---|
| 477 |  | 
|---|
| 478 | return prev; | 
|---|
| 479 | } | 
|---|
| 480 |  | 
|---|
| 481 | struct dma_fence * | 
|---|
| 482 | i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) | 
|---|
| 483 | { | 
|---|
| 484 | /* We expect the caller to manage the exclusive timeline ordering */ | 
|---|
| 485 | return __i915_active_set_fence(ref, active: &ref->excl, fence: f); | 
|---|
| 486 | } | 
|---|
| 487 |  | 
|---|
| 488 | bool i915_active_acquire_if_busy(struct i915_active *ref) | 
|---|
| 489 | { | 
|---|
| 490 | debug_active_assert(ref); | 
|---|
| 491 | return atomic_add_unless(v: &ref->count, a: 1, u: 0); | 
|---|
| 492 | } | 
|---|
| 493 |  | 
|---|
| 494 | static void __i915_active_activate(struct i915_active *ref) | 
|---|
| 495 | { | 
|---|
| 496 | spin_lock_irq(lock: &ref->tree_lock); /* __active_retire() */ | 
|---|
| 497 | if (!atomic_fetch_inc(v: &ref->count)) | 
|---|
| 498 | debug_active_activate(ref); | 
|---|
| 499 | spin_unlock_irq(lock: &ref->tree_lock); | 
|---|
| 500 | } | 
|---|
| 501 |  | 
|---|
| 502 | int i915_active_acquire(struct i915_active *ref) | 
|---|
| 503 | { | 
|---|
| 504 | int err; | 
|---|
| 505 |  | 
|---|
| 506 | if (i915_active_acquire_if_busy(ref)) | 
|---|
| 507 | return 0; | 
|---|
| 508 |  | 
|---|
| 509 | if (!ref->active) { | 
|---|
| 510 | __i915_active_activate(ref); | 
|---|
| 511 | return 0; | 
|---|
| 512 | } | 
|---|
| 513 |  | 
|---|
| 514 | err = mutex_lock_interruptible(lock: &ref->mutex); | 
|---|
| 515 | if (err) | 
|---|
| 516 | return err; | 
|---|
| 517 |  | 
|---|
| 518 | if (likely(!i915_active_acquire_if_busy(ref))) { | 
|---|
| 519 | err = ref->active(ref); | 
|---|
| 520 | if (!err) | 
|---|
| 521 | __i915_active_activate(ref); | 
|---|
| 522 | } | 
|---|
| 523 |  | 
|---|
| 524 | mutex_unlock(lock: &ref->mutex); | 
|---|
| 525 |  | 
|---|
| 526 | return err; | 
|---|
| 527 | } | 
|---|
| 528 |  | 
|---|
| 529 | void i915_active_release(struct i915_active *ref) | 
|---|
| 530 | { | 
|---|
| 531 | debug_active_assert(ref); | 
|---|
| 532 | active_retire(ref); | 
|---|
| 533 | } | 
|---|
| 534 |  | 
|---|
| 535 | static void enable_signaling(struct i915_active_fence *active) | 
|---|
| 536 | { | 
|---|
| 537 | struct dma_fence *fence; | 
|---|
| 538 |  | 
|---|
| 539 | if (unlikely(is_barrier(active))) | 
|---|
| 540 | return; | 
|---|
| 541 |  | 
|---|
| 542 | fence = i915_active_fence_get(active); | 
|---|
| 543 | if (!fence) | 
|---|
| 544 | return; | 
|---|
| 545 |  | 
|---|
| 546 | dma_fence_enable_sw_signaling(fence); | 
|---|
| 547 | dma_fence_put(fence); | 
|---|
| 548 | } | 
|---|
| 549 |  | 
|---|
| 550 | static int flush_barrier(struct active_node *it) | 
|---|
| 551 | { | 
|---|
| 552 | struct intel_engine_cs *engine; | 
|---|
| 553 |  | 
|---|
| 554 | if (likely(!is_barrier(&it->base))) | 
|---|
| 555 | return 0; | 
|---|
| 556 |  | 
|---|
| 557 | engine = __barrier_to_engine(node: it); | 
|---|
| 558 | smp_rmb(); /* serialise with add_active_barriers */ | 
|---|
| 559 | if (!is_barrier(active: &it->base)) | 
|---|
| 560 | return 0; | 
|---|
| 561 |  | 
|---|
| 562 | return intel_engine_flush_barriers(engine); | 
|---|
| 563 | } | 
|---|
| 564 |  | 
|---|
| 565 | static int flush_lazy_signals(struct i915_active *ref) | 
|---|
| 566 | { | 
|---|
| 567 | struct active_node *it, *n; | 
|---|
| 568 | int err = 0; | 
|---|
| 569 |  | 
|---|
| 570 | enable_signaling(active: &ref->excl); | 
|---|
| 571 | rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { | 
|---|
| 572 | err = flush_barrier(it); /* unconnected idle barrier? */ | 
|---|
| 573 | if (err) | 
|---|
| 574 | break; | 
|---|
| 575 |  | 
|---|
| 576 | enable_signaling(active: &it->base); | 
|---|
| 577 | } | 
|---|
| 578 |  | 
|---|
| 579 | return err; | 
|---|
| 580 | } | 
|---|
| 581 |  | 
|---|
| 582 | int __i915_active_wait(struct i915_active *ref, int state) | 
|---|
| 583 | { | 
|---|
| 584 | might_sleep(); | 
|---|
| 585 |  | 
|---|
| 586 | /* Any fence added after the wait begins will not be auto-signaled */ | 
|---|
| 587 | if (i915_active_acquire_if_busy(ref)) { | 
|---|
| 588 | int err; | 
|---|
| 589 |  | 
|---|
| 590 | err = flush_lazy_signals(ref); | 
|---|
| 591 | i915_active_release(ref); | 
|---|
| 592 | if (err) | 
|---|
| 593 | return err; | 
|---|
| 594 |  | 
|---|
| 595 | if (___wait_var_event(ref, i915_active_is_idle(ref), | 
|---|
| 596 | state, 0, 0, schedule())) | 
|---|
| 597 | return -EINTR; | 
|---|
| 598 | } | 
|---|
| 599 |  | 
|---|
| 600 | /* | 
|---|
| 601 | * After the wait is complete, the caller may free the active. | 
|---|
| 602 | * We have to flush any concurrent retirement before returning. | 
|---|
| 603 | */ | 
|---|
| 604 | flush_work(work: &ref->work); | 
|---|
| 605 | return 0; | 
|---|
| 606 | } | 
|---|
| 607 |  | 
|---|
| 608 | static int __await_active(struct i915_active_fence *active, | 
|---|
| 609 | int (*fn)(void *arg, struct dma_fence *fence), | 
|---|
| 610 | void *arg) | 
|---|
| 611 | { | 
|---|
| 612 | struct dma_fence *fence; | 
|---|
| 613 |  | 
|---|
| 614 | if (is_barrier(active)) /* XXX flush the barrier? */ | 
|---|
| 615 | return 0; | 
|---|
| 616 |  | 
|---|
| 617 | fence = i915_active_fence_get(active); | 
|---|
| 618 | if (fence) { | 
|---|
| 619 | int err; | 
|---|
| 620 |  | 
|---|
| 621 | err = fn(arg, fence); | 
|---|
| 622 | dma_fence_put(fence); | 
|---|
| 623 | if (err < 0) | 
|---|
| 624 | return err; | 
|---|
| 625 | } | 
|---|
| 626 |  | 
|---|
| 627 | return 0; | 
|---|
| 628 | } | 
|---|
| 629 |  | 
|---|
| 630 | struct wait_barrier { | 
|---|
| 631 | struct wait_queue_entry base; | 
|---|
| 632 | struct i915_active *ref; | 
|---|
| 633 | }; | 
|---|
| 634 |  | 
|---|
| 635 | static int | 
|---|
| 636 | barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key) | 
|---|
| 637 | { | 
|---|
| 638 | struct wait_barrier *wb = container_of(wq, typeof(*wb), base); | 
|---|
| 639 |  | 
|---|
| 640 | if (i915_active_is_idle(ref: wb->ref)) { | 
|---|
| 641 | list_del(entry: &wq->entry); | 
|---|
| 642 | i915_sw_fence_complete(fence: wq->private); | 
|---|
| 643 | kfree(objp: wq); | 
|---|
| 644 | } | 
|---|
| 645 |  | 
|---|
| 646 | return 0; | 
|---|
| 647 | } | 
|---|
| 648 |  | 
|---|
| 649 | static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence) | 
|---|
| 650 | { | 
|---|
| 651 | struct wait_barrier *wb; | 
|---|
| 652 |  | 
|---|
| 653 | wb = kmalloc(sizeof(*wb), GFP_KERNEL); | 
|---|
| 654 | if (unlikely(!wb)) | 
|---|
| 655 | return -ENOMEM; | 
|---|
| 656 |  | 
|---|
| 657 | GEM_BUG_ON(i915_active_is_idle(ref)); | 
|---|
| 658 | if (!i915_sw_fence_await(fence)) { | 
|---|
| 659 | kfree(objp: wb); | 
|---|
| 660 | return -EINVAL; | 
|---|
| 661 | } | 
|---|
| 662 |  | 
|---|
| 663 | wb->base.flags = 0; | 
|---|
| 664 | wb->base.func = barrier_wake; | 
|---|
| 665 | wb->base.private = fence; | 
|---|
| 666 | wb->ref = ref; | 
|---|
| 667 |  | 
|---|
| 668 | add_wait_queue(wq_head: __var_waitqueue(p: ref), wq_entry: &wb->base); | 
|---|
| 669 | return 0; | 
|---|
| 670 | } | 
|---|
| 671 |  | 
|---|
| 672 | static int await_active(struct i915_active *ref, | 
|---|
| 673 | unsigned int flags, | 
|---|
| 674 | int (*fn)(void *arg, struct dma_fence *fence), | 
|---|
| 675 | void *arg, struct i915_sw_fence *barrier) | 
|---|
| 676 | { | 
|---|
| 677 | int err = 0; | 
|---|
| 678 |  | 
|---|
| 679 | if (!i915_active_acquire_if_busy(ref)) | 
|---|
| 680 | return 0; | 
|---|
| 681 |  | 
|---|
| 682 | if (flags & I915_ACTIVE_AWAIT_EXCL && | 
|---|
| 683 | rcu_access_pointer(ref->excl.fence)) { | 
|---|
| 684 | err = __await_active(active: &ref->excl, fn, arg); | 
|---|
| 685 | if (err) | 
|---|
| 686 | goto out; | 
|---|
| 687 | } | 
|---|
| 688 |  | 
|---|
| 689 | if (flags & I915_ACTIVE_AWAIT_ACTIVE) { | 
|---|
| 690 | struct active_node *it, *n; | 
|---|
| 691 |  | 
|---|
| 692 | rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { | 
|---|
| 693 | err = __await_active(active: &it->base, fn, arg); | 
|---|
| 694 | if (err) | 
|---|
| 695 | goto out; | 
|---|
| 696 | } | 
|---|
| 697 | } | 
|---|
| 698 |  | 
|---|
| 699 | if (flags & I915_ACTIVE_AWAIT_BARRIER) { | 
|---|
| 700 | err = flush_lazy_signals(ref); | 
|---|
| 701 | if (err) | 
|---|
| 702 | goto out; | 
|---|
| 703 |  | 
|---|
| 704 | err = __await_barrier(ref, fence: barrier); | 
|---|
| 705 | if (err) | 
|---|
| 706 | goto out; | 
|---|
| 707 | } | 
|---|
| 708 |  | 
|---|
| 709 | out: | 
|---|
| 710 | i915_active_release(ref); | 
|---|
| 711 | return err; | 
|---|
| 712 | } | 
|---|
| 713 |  | 
|---|
| 714 | static int rq_await_fence(void *arg, struct dma_fence *fence) | 
|---|
| 715 | { | 
|---|
| 716 | return i915_request_await_dma_fence(rq: arg, fence); | 
|---|
| 717 | } | 
|---|
| 718 |  | 
|---|
| 719 | int i915_request_await_active(struct i915_request *rq, | 
|---|
| 720 | struct i915_active *ref, | 
|---|
| 721 | unsigned int flags) | 
|---|
| 722 | { | 
|---|
| 723 | return await_active(ref, flags, fn: rq_await_fence, arg: rq, barrier: &rq->submit); | 
|---|
| 724 | } | 
|---|
| 725 |  | 
|---|
| 726 | static int sw_await_fence(void *arg, struct dma_fence *fence) | 
|---|
| 727 | { | 
|---|
| 728 | return i915_sw_fence_await_dma_fence(fence: arg, dma: fence, timeout: 0, | 
|---|
| 729 | GFP_NOWAIT | __GFP_NOWARN); | 
|---|
| 730 | } | 
|---|
| 731 |  | 
|---|
| 732 | int i915_sw_fence_await_active(struct i915_sw_fence *fence, | 
|---|
| 733 | struct i915_active *ref, | 
|---|
| 734 | unsigned int flags) | 
|---|
| 735 | { | 
|---|
| 736 | return await_active(ref, flags, fn: sw_await_fence, arg: fence, barrier: fence); | 
|---|
| 737 | } | 
|---|
| 738 |  | 
|---|
| 739 | void i915_active_fini(struct i915_active *ref) | 
|---|
| 740 | { | 
|---|
| 741 | debug_active_fini(ref); | 
|---|
| 742 | GEM_BUG_ON(atomic_read(&ref->count)); | 
|---|
| 743 | GEM_BUG_ON(work_pending(&ref->work)); | 
|---|
| 744 | mutex_destroy(lock: &ref->mutex); | 
|---|
| 745 |  | 
|---|
| 746 | if (ref->cache) | 
|---|
| 747 | kmem_cache_free(s: slab_cache, objp: ref->cache); | 
|---|
| 748 | } | 
|---|
| 749 |  | 
|---|
| 750 | static inline bool is_idle_barrier(struct active_node *node, u64 idx) | 
|---|
| 751 | { | 
|---|
| 752 | return node->timeline == idx && !i915_active_fence_isset(active: &node->base); | 
|---|
| 753 | } | 
|---|
| 754 |  | 
|---|
| 755 | static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) | 
|---|
| 756 | { | 
|---|
| 757 | struct rb_node *prev, *p; | 
|---|
| 758 |  | 
|---|
| 759 | if (RB_EMPTY_ROOT(&ref->tree)) | 
|---|
| 760 | return NULL; | 
|---|
| 761 |  | 
|---|
| 762 | GEM_BUG_ON(i915_active_is_idle(ref)); | 
|---|
| 763 |  | 
|---|
| 764 | /* | 
|---|
| 765 | * Try to reuse any existing barrier nodes already allocated for this | 
|---|
| 766 | * i915_active, due to overlapping active phases there is likely a | 
|---|
| 767 | * node kept alive (as we reuse before parking). We prefer to reuse | 
|---|
| 768 | * completely idle barriers (less hassle in manipulating the llists), | 
|---|
| 769 | * but otherwise any will do. | 
|---|
| 770 | */ | 
|---|
| 771 | if (ref->cache && is_idle_barrier(node: ref->cache, idx)) { | 
|---|
| 772 | p = &ref->cache->node; | 
|---|
| 773 | goto match; | 
|---|
| 774 | } | 
|---|
| 775 |  | 
|---|
| 776 | prev = NULL; | 
|---|
| 777 | p = ref->tree.rb_node; | 
|---|
| 778 | while (p) { | 
|---|
| 779 | struct active_node *node = | 
|---|
| 780 | rb_entry(p, struct active_node, node); | 
|---|
| 781 |  | 
|---|
| 782 | if (is_idle_barrier(node, idx)) | 
|---|
| 783 | goto match; | 
|---|
| 784 |  | 
|---|
| 785 | prev = p; | 
|---|
| 786 | if (node->timeline < idx) | 
|---|
| 787 | p = READ_ONCE(p->rb_right); | 
|---|
| 788 | else | 
|---|
| 789 | p = READ_ONCE(p->rb_left); | 
|---|
| 790 | } | 
|---|
| 791 |  | 
|---|
| 792 | /* | 
|---|
| 793 | * No quick match, but we did find the leftmost rb_node for the | 
|---|
| 794 | * kernel_context. Walk the rb_tree in-order to see if there were | 
|---|
| 795 | * any idle-barriers on this timeline that we missed, or just use | 
|---|
| 796 | * the first pending barrier. | 
|---|
| 797 | */ | 
|---|
| 798 | for (p = prev; p; p = rb_next(p)) { | 
|---|
| 799 | struct active_node *node = | 
|---|
| 800 | rb_entry(p, struct active_node, node); | 
|---|
| 801 | struct intel_engine_cs *engine; | 
|---|
| 802 |  | 
|---|
| 803 | if (node->timeline > idx) | 
|---|
| 804 | break; | 
|---|
| 805 |  | 
|---|
| 806 | if (node->timeline < idx) | 
|---|
| 807 | continue; | 
|---|
| 808 |  | 
|---|
| 809 | if (is_idle_barrier(node, idx)) | 
|---|
| 810 | goto match; | 
|---|
| 811 |  | 
|---|
| 812 | /* | 
|---|
| 813 | * The list of pending barriers is protected by the | 
|---|
| 814 | * kernel_context timeline, which notably we do not hold | 
|---|
| 815 | * here. i915_request_add_active_barriers() may consume | 
|---|
| 816 | * the barrier before we claim it, so we have to check | 
|---|
| 817 | * for success. | 
|---|
| 818 | */ | 
|---|
| 819 | engine = __barrier_to_engine(node); | 
|---|
| 820 | smp_rmb(); /* serialise with add_active_barriers */ | 
|---|
| 821 | if (is_barrier(active: &node->base) && | 
|---|
| 822 | ____active_del_barrier(ref, node, engine)) | 
|---|
| 823 | goto match; | 
|---|
| 824 | } | 
|---|
| 825 |  | 
|---|
| 826 | return NULL; | 
|---|
| 827 |  | 
|---|
| 828 | match: | 
|---|
| 829 | spin_lock_irq(lock: &ref->tree_lock); | 
|---|
| 830 | rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ | 
|---|
| 831 | if (p == &ref->cache->node) | 
|---|
| 832 | WRITE_ONCE(ref->cache, NULL); | 
|---|
| 833 | spin_unlock_irq(lock: &ref->tree_lock); | 
|---|
| 834 |  | 
|---|
| 835 | return rb_entry(p, struct active_node, node); | 
|---|
| 836 | } | 
|---|
| 837 |  | 
|---|
| 838 | int i915_active_acquire_preallocate_barrier(struct i915_active *ref, | 
|---|
| 839 | struct intel_engine_cs *engine) | 
|---|
| 840 | { | 
|---|
| 841 | intel_engine_mask_t tmp, mask = engine->mask; | 
|---|
| 842 | struct llist_node *first = NULL, *last = NULL; | 
|---|
| 843 | struct intel_gt *gt = engine->gt; | 
|---|
| 844 |  | 
|---|
| 845 | GEM_BUG_ON(i915_active_is_idle(ref)); | 
|---|
| 846 |  | 
|---|
| 847 | /* Wait until the previous preallocation is completed */ | 
|---|
| 848 | while (!llist_empty(head: &ref->preallocated_barriers)) | 
|---|
| 849 | cond_resched(); | 
|---|
| 850 |  | 
|---|
| 851 | /* | 
|---|
| 852 | * Preallocate a node for each physical engine supporting the target | 
|---|
| 853 | * engine (remember virtual engines have more than one sibling). | 
|---|
| 854 | * We can then use the preallocated nodes in | 
|---|
| 855 | * i915_active_acquire_barrier() | 
|---|
| 856 | */ | 
|---|
| 857 | GEM_BUG_ON(!mask); | 
|---|
| 858 | for_each_engine_masked(engine, gt, mask, tmp) { | 
|---|
| 859 | u64 idx = engine->kernel_context->timeline->fence_context; | 
|---|
| 860 | struct llist_node *prev = first; | 
|---|
| 861 | struct active_node *node; | 
|---|
| 862 |  | 
|---|
| 863 | rcu_read_lock(); | 
|---|
| 864 | node = reuse_idle_barrier(ref, idx); | 
|---|
| 865 | rcu_read_unlock(); | 
|---|
| 866 | if (!node) { | 
|---|
| 867 | node = kmem_cache_alloc(slab_cache, GFP_KERNEL); | 
|---|
| 868 | if (!node) | 
|---|
| 869 | goto unwind; | 
|---|
| 870 |  | 
|---|
| 871 | RCU_INIT_POINTER(node->base.fence, NULL); | 
|---|
| 872 | node->base.cb.func = node_retire; | 
|---|
| 873 | node->timeline = idx; | 
|---|
| 874 | node->ref = ref; | 
|---|
| 875 | } | 
|---|
| 876 |  | 
|---|
| 877 | if (!i915_active_fence_isset(active: &node->base)) { | 
|---|
| 878 | /* | 
|---|
| 879 | * Mark this as being *our* unconnected proto-node. | 
|---|
| 880 | * | 
|---|
| 881 | * Since this node is not in any list, and we have | 
|---|
| 882 | * decoupled it from the rbtree, we can reuse the | 
|---|
| 883 | * request to indicate this is an idle-barrier node | 
|---|
| 884 | * and then we can use the rb_node and list pointers | 
|---|
| 885 | * for our tracking of the pending barrier. | 
|---|
| 886 | */ | 
|---|
| 887 | RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN)); | 
|---|
| 888 | node->base.cb.node.prev = (void *)engine; | 
|---|
| 889 | __i915_active_acquire(ref); | 
|---|
| 890 | } | 
|---|
| 891 | GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); | 
|---|
| 892 |  | 
|---|
| 893 | GEM_BUG_ON(barrier_to_engine(node) != engine); | 
|---|
| 894 | first = barrier_to_ll(node); | 
|---|
| 895 | first->next = prev; | 
|---|
| 896 | if (!last) | 
|---|
| 897 | last = first; | 
|---|
| 898 | intel_engine_pm_get(engine); | 
|---|
| 899 | } | 
|---|
| 900 |  | 
|---|
| 901 | GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); | 
|---|
| 902 | llist_add_batch(new_first: first, new_last: last, head: &ref->preallocated_barriers); | 
|---|
| 903 |  | 
|---|
| 904 | return 0; | 
|---|
| 905 |  | 
|---|
| 906 | unwind: | 
|---|
| 907 | while (first) { | 
|---|
| 908 | struct active_node *node = barrier_from_ll(x: first); | 
|---|
| 909 |  | 
|---|
| 910 | first = first->next; | 
|---|
| 911 |  | 
|---|
| 912 | atomic_dec(v: &ref->count); | 
|---|
| 913 | intel_engine_pm_put(engine: barrier_to_engine(node)); | 
|---|
| 914 |  | 
|---|
| 915 | kmem_cache_free(s: slab_cache, objp: node); | 
|---|
| 916 | } | 
|---|
| 917 | return -ENOMEM; | 
|---|
| 918 | } | 
|---|
| 919 |  | 
|---|
| 920 | void i915_active_acquire_barrier(struct i915_active *ref) | 
|---|
| 921 | { | 
|---|
| 922 | struct llist_node *pos, *next; | 
|---|
| 923 | unsigned long flags; | 
|---|
| 924 |  | 
|---|
| 925 | GEM_BUG_ON(i915_active_is_idle(ref)); | 
|---|
| 926 |  | 
|---|
| 927 | /* | 
|---|
| 928 | * Transfer the list of preallocated barriers into the | 
|---|
| 929 | * i915_active rbtree, but only as proto-nodes. They will be | 
|---|
| 930 | * populated by i915_request_add_active_barriers() to point to the | 
|---|
| 931 | * request that will eventually release them. | 
|---|
| 932 | */ | 
|---|
| 933 | llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { | 
|---|
| 934 | struct active_node *node = barrier_from_ll(x: pos); | 
|---|
| 935 | struct intel_engine_cs *engine = barrier_to_engine(node); | 
|---|
| 936 | struct rb_node **p, *parent; | 
|---|
| 937 |  | 
|---|
| 938 | spin_lock_irqsave_nested(&ref->tree_lock, flags, | 
|---|
| 939 | SINGLE_DEPTH_NESTING); | 
|---|
| 940 | parent = NULL; | 
|---|
| 941 | p = &ref->tree.rb_node; | 
|---|
| 942 | while (*p) { | 
|---|
| 943 | struct active_node *it; | 
|---|
| 944 |  | 
|---|
| 945 | parent = *p; | 
|---|
| 946 |  | 
|---|
| 947 | it = rb_entry(parent, struct active_node, node); | 
|---|
| 948 | if (it->timeline < node->timeline) | 
|---|
| 949 | p = &parent->rb_right; | 
|---|
| 950 | else | 
|---|
| 951 | p = &parent->rb_left; | 
|---|
| 952 | } | 
|---|
| 953 | rb_link_node(node: &node->node, parent, rb_link: p); | 
|---|
| 954 | rb_insert_color(&node->node, &ref->tree); | 
|---|
| 955 | spin_unlock_irqrestore(lock: &ref->tree_lock, flags); | 
|---|
| 956 |  | 
|---|
| 957 | GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); | 
|---|
| 958 | llist_add(new: barrier_to_ll(node), head: &engine->barrier_tasks); | 
|---|
| 959 | intel_engine_pm_put_delay(engine, delay: 2); | 
|---|
| 960 | } | 
|---|
| 961 | } | 
|---|
| 962 |  | 
|---|
| 963 | static struct dma_fence **ll_to_fence_slot(struct llist_node *node) | 
|---|
| 964 | { | 
|---|
| 965 | return __active_fence_slot(active: &barrier_from_ll(x: node)->base); | 
|---|
| 966 | } | 
|---|
| 967 |  | 
|---|
| 968 | void i915_request_add_active_barriers(struct i915_request *rq) | 
|---|
| 969 | { | 
|---|
| 970 | struct intel_engine_cs *engine = rq->engine; | 
|---|
| 971 | struct llist_node *node, *next; | 
|---|
| 972 | unsigned long flags; | 
|---|
| 973 |  | 
|---|
| 974 | GEM_BUG_ON(!intel_context_is_barrier(rq->context)); | 
|---|
| 975 | GEM_BUG_ON(intel_engine_is_virtual(engine)); | 
|---|
| 976 | GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline); | 
|---|
| 977 |  | 
|---|
| 978 | node = llist_del_all(head: &engine->barrier_tasks); | 
|---|
| 979 | if (!node) | 
|---|
| 980 | return; | 
|---|
| 981 | /* | 
|---|
| 982 | * Attach the list of proto-fences to the in-flight request such | 
|---|
| 983 | * that the parent i915_active will be released when this request | 
|---|
| 984 | * is retired. | 
|---|
| 985 | */ | 
|---|
| 986 | spin_lock_irqsave(&rq->lock, flags); | 
|---|
| 987 | llist_for_each_safe(node, next, node) { | 
|---|
| 988 | /* serialise with reuse_idle_barrier */ | 
|---|
| 989 | smp_store_mb(*ll_to_fence_slot(node), &rq->fence); | 
|---|
| 990 | list_add_tail(new: (struct list_head *)node, head: &rq->fence.cb_list); | 
|---|
| 991 | } | 
|---|
| 992 | spin_unlock_irqrestore(lock: &rq->lock, flags); | 
|---|
| 993 | } | 
|---|
| 994 |  | 
|---|
| 995 | /* | 
|---|
| 996 | * __i915_active_fence_set: Update the last active fence along its timeline | 
|---|
| 997 | * @active: the active tracker | 
|---|
| 998 | * @fence: the new fence (under construction) | 
|---|
| 999 | * | 
|---|
| 1000 | * Records the new @fence as the last active fence along its timeline in | 
|---|
| 1001 | * this active tracker, moving the tracking callbacks from the previous | 
|---|
| 1002 | * fence onto this one. Gets and returns a reference to the previous fence | 
|---|
| 1003 | * (if not already completed), which the caller must put after making sure | 
|---|
| 1004 | * that it is executed before the new fence. To ensure that the order of | 
|---|
| 1005 | * fences within the timeline of the i915_active_fence is understood, it | 
|---|
| 1006 | * should be locked by the caller. | 
|---|
| 1007 | */ | 
|---|
| 1008 | struct dma_fence * | 
|---|
| 1009 | __i915_active_fence_set(struct i915_active_fence *active, | 
|---|
| 1010 | struct dma_fence *fence) | 
|---|
| 1011 | { | 
|---|
| 1012 | struct dma_fence *prev; | 
|---|
| 1013 | unsigned long flags; | 
|---|
| 1014 |  | 
|---|
| 1015 | /* | 
|---|
| 1016 | * In case of fences embedded in i915_requests, their memory is | 
|---|
| 1017 | * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release | 
|---|
| 1018 | * by new requests.  Then, there is a risk of passing back a pointer | 
|---|
| 1019 | * to a new, completely unrelated fence that reuses the same memory | 
|---|
| 1020 | * while tracked under a different active tracker.  Combined with i915 | 
|---|
| 1021 | * perf open/close operations that build await dependencies between | 
|---|
| 1022 | * engine kernel context requests and user requests from different | 
|---|
| 1023 | * timelines, this can lead to dependency loops and infinite waits. | 
|---|
| 1024 | * | 
|---|
| 1025 | * As a countermeasure, we try to get a reference to the active->fence | 
|---|
| 1026 | * first, so if we succeed and pass it back to our user then it is not | 
|---|
| 1027 | * released and potentially reused by an unrelated request before the | 
|---|
| 1028 | * user has a chance to set up an await dependency on it. | 
|---|
| 1029 | */ | 
|---|
| 1030 | prev = i915_active_fence_get(active); | 
|---|
| 1031 | if (fence == prev) | 
|---|
| 1032 | return fence; | 
|---|
| 1033 |  | 
|---|
| 1034 | GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); | 
|---|
| 1035 |  | 
|---|
| 1036 | /* | 
|---|
| 1037 | * Consider that we have two threads arriving (A and B), with | 
|---|
| 1038 | * C already resident as the active->fence. | 
|---|
| 1039 | * | 
|---|
| 1040 | * Both A and B have got a reference to C or NULL, depending on the | 
|---|
| 1041 | * timing of the interrupt handler.  Let's assume that if A has got C | 
|---|
| 1042 | * then it has locked C first (before B). | 
|---|
| 1043 | * | 
|---|
| 1044 | * Note the strong ordering of the timeline also provides consistent | 
|---|
| 1045 | * nesting rules for the fence->lock; the inner lock is always the | 
|---|
| 1046 | * older lock. | 
|---|
| 1047 | */ | 
|---|
| 1048 | spin_lock_irqsave(fence->lock, flags); | 
|---|
| 1049 | if (prev) | 
|---|
| 1050 | spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); | 
|---|
| 1051 |  | 
|---|
| 1052 | /* | 
|---|
| 1053 | * A does the cmpxchg first, and so it sees C or NULL, as before, or | 
|---|
| 1054 | * something else, depending on the timing of other threads and/or | 
|---|
| 1055 | * interrupt handler.  If not the same as before then A unlocks C if | 
|---|
| 1056 | * applicable and retries, starting from an attempt to get a new | 
|---|
| 1057 | * active->fence.  Meanwhile, B follows the same path as A. | 
|---|
| 1058 | * Once A succeeds with cmpxch, B fails again, retires, gets A from | 
|---|
| 1059 | * active->fence, locks it as soon as A completes, and possibly | 
|---|
| 1060 | * succeeds with cmpxchg. | 
|---|
| 1061 | */ | 
|---|
| 1062 | while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) { | 
|---|
| 1063 | if (prev) { | 
|---|
| 1064 | spin_unlock(lock: prev->lock); | 
|---|
| 1065 | dma_fence_put(fence: prev); | 
|---|
| 1066 | } | 
|---|
| 1067 | spin_unlock_irqrestore(lock: fence->lock, flags); | 
|---|
| 1068 |  | 
|---|
| 1069 | prev = i915_active_fence_get(active); | 
|---|
| 1070 | GEM_BUG_ON(prev == fence); | 
|---|
| 1071 |  | 
|---|
| 1072 | spin_lock_irqsave(fence->lock, flags); | 
|---|
| 1073 | if (prev) | 
|---|
| 1074 | spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); | 
|---|
| 1075 | } | 
|---|
| 1076 |  | 
|---|
| 1077 | /* | 
|---|
| 1078 | * If prev is NULL then the previous fence must have been signaled | 
|---|
| 1079 | * and we know that we are first on the timeline.  If it is still | 
|---|
| 1080 | * present then, having the lock on that fence already acquired, we | 
|---|
| 1081 | * serialise with the interrupt handler, in the process of removing it | 
|---|
| 1082 | * from any future interrupt callback.  A will then wait on C before | 
|---|
| 1083 | * executing (if present). | 
|---|
| 1084 | * | 
|---|
| 1085 | * As B is second, it sees A as the previous fence and so waits for | 
|---|
| 1086 | * it to complete its transition and takes over the occupancy for | 
|---|
| 1087 | * itself -- remembering that it needs to wait on A before executing. | 
|---|
| 1088 | */ | 
|---|
| 1089 | if (prev) { | 
|---|
| 1090 | __list_del_entry(entry: &active->cb.node); | 
|---|
| 1091 | spin_unlock(lock: prev->lock); /* serialise with prev->cb_list */ | 
|---|
| 1092 | } | 
|---|
| 1093 | list_add_tail(new: &active->cb.node, head: &fence->cb_list); | 
|---|
| 1094 | spin_unlock_irqrestore(lock: fence->lock, flags); | 
|---|
| 1095 |  | 
|---|
| 1096 | return prev; | 
|---|
| 1097 | } | 
|---|
| 1098 |  | 
|---|
| 1099 | int i915_active_fence_set(struct i915_active_fence *active, | 
|---|
| 1100 | struct i915_request *rq) | 
|---|
| 1101 | { | 
|---|
| 1102 | struct dma_fence *fence; | 
|---|
| 1103 | int err = 0; | 
|---|
| 1104 |  | 
|---|
| 1105 | /* Must maintain timeline ordering wrt previous active requests */ | 
|---|
| 1106 | fence = __i915_active_fence_set(active, fence: &rq->fence); | 
|---|
| 1107 | if (fence) { | 
|---|
| 1108 | err = i915_request_await_dma_fence(rq, fence); | 
|---|
| 1109 | dma_fence_put(fence); | 
|---|
| 1110 | } | 
|---|
| 1111 |  | 
|---|
| 1112 | return err; | 
|---|
| 1113 | } | 
|---|
| 1114 |  | 
|---|
| 1115 | void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb) | 
|---|
| 1116 | { | 
|---|
| 1117 | active_fence_cb(fence, cb); | 
|---|
| 1118 | } | 
|---|
| 1119 |  | 
|---|
| 1120 | struct auto_active { | 
|---|
| 1121 | struct i915_active base; | 
|---|
| 1122 | struct kref ref; | 
|---|
| 1123 | }; | 
|---|
| 1124 |  | 
|---|
| 1125 | struct i915_active *i915_active_get(struct i915_active *ref) | 
|---|
| 1126 | { | 
|---|
| 1127 | struct auto_active *aa = container_of(ref, typeof(*aa), base); | 
|---|
| 1128 |  | 
|---|
| 1129 | kref_get(kref: &aa->ref); | 
|---|
| 1130 | return &aa->base; | 
|---|
| 1131 | } | 
|---|
| 1132 |  | 
|---|
| 1133 | static void auto_release(struct kref *ref) | 
|---|
| 1134 | { | 
|---|
| 1135 | struct auto_active *aa = container_of(ref, typeof(*aa), ref); | 
|---|
| 1136 |  | 
|---|
| 1137 | i915_active_fini(ref: &aa->base); | 
|---|
| 1138 | kfree(objp: aa); | 
|---|
| 1139 | } | 
|---|
| 1140 |  | 
|---|
| 1141 | void i915_active_put(struct i915_active *ref) | 
|---|
| 1142 | { | 
|---|
| 1143 | struct auto_active *aa = container_of(ref, typeof(*aa), base); | 
|---|
| 1144 |  | 
|---|
| 1145 | kref_put(kref: &aa->ref, release: auto_release); | 
|---|
| 1146 | } | 
|---|
| 1147 |  | 
|---|
| 1148 | static int auto_active(struct i915_active *ref) | 
|---|
| 1149 | { | 
|---|
| 1150 | i915_active_get(ref); | 
|---|
| 1151 | return 0; | 
|---|
| 1152 | } | 
|---|
| 1153 |  | 
|---|
| 1154 | static void auto_retire(struct i915_active *ref) | 
|---|
| 1155 | { | 
|---|
| 1156 | i915_active_put(ref); | 
|---|
| 1157 | } | 
|---|
| 1158 |  | 
|---|
| 1159 | struct i915_active *i915_active_create(void) | 
|---|
| 1160 | { | 
|---|
| 1161 | struct auto_active *aa; | 
|---|
| 1162 |  | 
|---|
| 1163 | aa = kmalloc(sizeof(*aa), GFP_KERNEL); | 
|---|
| 1164 | if (!aa) | 
|---|
| 1165 | return NULL; | 
|---|
| 1166 |  | 
|---|
| 1167 | kref_init(kref: &aa->ref); | 
|---|
| 1168 | i915_active_init(&aa->base, auto_active, auto_retire, 0); | 
|---|
| 1169 |  | 
|---|
| 1170 | return &aa->base; | 
|---|
| 1171 | } | 
|---|
| 1172 |  | 
|---|
| 1173 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | 
|---|
| 1174 | #include "selftests/i915_active.c" | 
|---|
| 1175 | #endif | 
|---|
| 1176 |  | 
|---|
| 1177 | void i915_active_module_exit(void) | 
|---|
| 1178 | { | 
|---|
| 1179 | kmem_cache_destroy(s: slab_cache); | 
|---|
| 1180 | } | 
|---|
| 1181 |  | 
|---|
| 1182 | int __init i915_active_module_init(void) | 
|---|
| 1183 | { | 
|---|
| 1184 | slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); | 
|---|
| 1185 | if (!slab_cache) | 
|---|
| 1186 | return -ENOMEM; | 
|---|
| 1187 |  | 
|---|
| 1188 | return 0; | 
|---|
| 1189 | } | 
|---|
| 1190 |  | 
|---|