| 1 | /* | 
|---|
| 2 | * SPDX-License-Identifier: MIT | 
|---|
| 3 | * | 
|---|
| 4 | * (C) Copyright 2016 Intel Corporation | 
|---|
| 5 | */ | 
|---|
| 6 |  | 
|---|
| 7 | #include <linux/slab.h> | 
|---|
| 8 | #include <linux/dma-fence.h> | 
|---|
| 9 | #include <linux/irq_work.h> | 
|---|
| 10 | #include <linux/dma-resv.h> | 
|---|
| 11 |  | 
|---|
| 12 | #include "i915_sw_fence.h" | 
|---|
| 13 | #include "i915_selftest.h" | 
|---|
| 14 |  | 
|---|
| 15 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) | 
|---|
| 16 | #define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr) | 
|---|
| 17 | #else | 
|---|
| 18 | #define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) | 
|---|
| 19 | #endif | 
|---|
| 20 |  | 
|---|
| 21 | #ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG | 
|---|
| 22 | static DEFINE_SPINLOCK(i915_sw_fence_lock); | 
|---|
| 23 | #endif | 
|---|
| 24 |  | 
|---|
| 25 | #define WQ_FLAG_BITS \ | 
|---|
| 26 | BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags)) | 
|---|
| 27 |  | 
|---|
| 28 | /* after WQ_FLAG_* for safety */ | 
|---|
| 29 | #define I915_SW_FENCE_FLAG_FENCE BIT(WQ_FLAG_BITS - 1) | 
|---|
| 30 | #define I915_SW_FENCE_FLAG_ALLOC BIT(WQ_FLAG_BITS - 2) | 
|---|
| 31 |  | 
|---|
| 32 | enum { | 
|---|
| 33 | DEBUG_FENCE_IDLE = 0, | 
|---|
| 34 | DEBUG_FENCE_NOTIFY, | 
|---|
| 35 | }; | 
|---|
| 36 |  | 
|---|
| 37 | static void *i915_sw_fence_debug_hint(void *addr) | 
|---|
| 38 | { | 
|---|
| 39 | return (void *)(((struct i915_sw_fence *)addr)->fn); | 
|---|
| 40 | } | 
|---|
| 41 |  | 
|---|
| 42 | #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS | 
|---|
| 43 |  | 
|---|
| 44 | static const struct debug_obj_descr i915_sw_fence_debug_descr = { | 
|---|
| 45 | .name = "i915_sw_fence", | 
|---|
| 46 | .debug_hint = i915_sw_fence_debug_hint, | 
|---|
| 47 | }; | 
|---|
| 48 |  | 
|---|
| 49 | static inline void debug_fence_init(struct i915_sw_fence *fence) | 
|---|
| 50 | { | 
|---|
| 51 | debug_object_init(fence, &i915_sw_fence_debug_descr); | 
|---|
| 52 | } | 
|---|
| 53 |  | 
|---|
| 54 | static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence) | 
|---|
| 55 | { | 
|---|
| 56 | debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr); | 
|---|
| 57 | } | 
|---|
| 58 |  | 
|---|
| 59 | static inline void debug_fence_activate(struct i915_sw_fence *fence) | 
|---|
| 60 | { | 
|---|
| 61 | debug_object_activate(fence, &i915_sw_fence_debug_descr); | 
|---|
| 62 | } | 
|---|
| 63 |  | 
|---|
| 64 | static inline void debug_fence_set_state(struct i915_sw_fence *fence, | 
|---|
| 65 | int old, int new) | 
|---|
| 66 | { | 
|---|
| 67 | debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new); | 
|---|
| 68 | } | 
|---|
| 69 |  | 
|---|
| 70 | static inline void debug_fence_deactivate(struct i915_sw_fence *fence) | 
|---|
| 71 | { | 
|---|
| 72 | debug_object_deactivate(fence, &i915_sw_fence_debug_descr); | 
|---|
| 73 | } | 
|---|
| 74 |  | 
|---|
| 75 | static inline void debug_fence_destroy(struct i915_sw_fence *fence) | 
|---|
| 76 | { | 
|---|
| 77 | debug_object_destroy(fence, &i915_sw_fence_debug_descr); | 
|---|
| 78 | } | 
|---|
| 79 |  | 
|---|
| 80 | static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence) | 
|---|
| 81 | { | 
|---|
| 82 | debug_object_free(fence, &i915_sw_fence_debug_descr); | 
|---|
| 83 | smp_wmb(); /* flush the change in state before reallocation */ | 
|---|
| 84 | } | 
|---|
| 85 |  | 
|---|
| 86 | static inline void debug_fence_assert(struct i915_sw_fence *fence) | 
|---|
| 87 | { | 
|---|
| 88 | debug_object_assert_init(fence, &i915_sw_fence_debug_descr); | 
|---|
| 89 | } | 
|---|
| 90 |  | 
|---|
| 91 | #else | 
|---|
| 92 |  | 
|---|
| 93 | static inline void debug_fence_init(struct i915_sw_fence *fence) | 
|---|
| 94 | { | 
|---|
| 95 | } | 
|---|
| 96 |  | 
|---|
| 97 | static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence) | 
|---|
| 98 | { | 
|---|
| 99 | } | 
|---|
| 100 |  | 
|---|
| 101 | static inline void debug_fence_activate(struct i915_sw_fence *fence) | 
|---|
| 102 | { | 
|---|
| 103 | } | 
|---|
| 104 |  | 
|---|
| 105 | static inline void debug_fence_set_state(struct i915_sw_fence *fence, | 
|---|
| 106 | int old, int new) | 
|---|
| 107 | { | 
|---|
| 108 | } | 
|---|
| 109 |  | 
|---|
| 110 | static inline void debug_fence_deactivate(struct i915_sw_fence *fence) | 
|---|
| 111 | { | 
|---|
| 112 | } | 
|---|
| 113 |  | 
|---|
| 114 | static inline void debug_fence_destroy(struct i915_sw_fence *fence) | 
|---|
| 115 | { | 
|---|
| 116 | } | 
|---|
| 117 |  | 
|---|
| 118 | static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence) | 
|---|
| 119 | { | 
|---|
| 120 | } | 
|---|
| 121 |  | 
|---|
| 122 | static inline void debug_fence_assert(struct i915_sw_fence *fence) | 
|---|
| 123 | { | 
|---|
| 124 | } | 
|---|
| 125 |  | 
|---|
| 126 | #endif | 
|---|
| 127 |  | 
|---|
| 128 | static int __i915_sw_fence_notify(struct i915_sw_fence *fence, | 
|---|
| 129 | enum i915_sw_fence_notify state) | 
|---|
| 130 | { | 
|---|
| 131 | return fence->fn(fence, state); | 
|---|
| 132 | } | 
|---|
| 133 |  | 
|---|
| 134 | #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS | 
|---|
| 135 | void i915_sw_fence_fini(struct i915_sw_fence *fence) | 
|---|
| 136 | { | 
|---|
| 137 | debug_fence_free(fence); | 
|---|
| 138 | } | 
|---|
| 139 | #endif | 
|---|
| 140 |  | 
|---|
| 141 | static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, | 
|---|
| 142 | struct list_head *continuation) | 
|---|
| 143 | { | 
|---|
| 144 | wait_queue_head_t *x = &fence->wait; | 
|---|
| 145 | wait_queue_entry_t *pos, *next; | 
|---|
| 146 | unsigned long flags; | 
|---|
| 147 |  | 
|---|
| 148 | debug_fence_deactivate(fence); | 
|---|
| 149 | atomic_set_release(v: &fence->pending, i: -1); /* 0 -> -1 [done] */ | 
|---|
| 150 |  | 
|---|
| 151 | /* | 
|---|
| 152 | * To prevent unbounded recursion as we traverse the graph of | 
|---|
| 153 | * i915_sw_fences, we move the entry list from this, the next ready | 
|---|
| 154 | * fence, to the tail of the original fence's entry list | 
|---|
| 155 | * (and so added to the list to be woken). | 
|---|
| 156 | */ | 
|---|
| 157 |  | 
|---|
| 158 | spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation); | 
|---|
| 159 | if (continuation) { | 
|---|
| 160 | list_for_each_entry_safe(pos, next, &x->head, entry) { | 
|---|
| 161 | if (pos->flags & I915_SW_FENCE_FLAG_FENCE) | 
|---|
| 162 | list_move_tail(list: &pos->entry, head: continuation); | 
|---|
| 163 | else | 
|---|
| 164 | pos->func(pos, TASK_NORMAL, 0, continuation); | 
|---|
| 165 | } | 
|---|
| 166 | } else { | 
|---|
| 167 | LIST_HEAD(); | 
|---|
| 168 |  | 
|---|
| 169 | do { | 
|---|
| 170 | list_for_each_entry_safe(pos, next, &x->head, entry) { | 
|---|
| 171 | int wake_flags; | 
|---|
| 172 |  | 
|---|
| 173 | wake_flags = 0; | 
|---|
| 174 | if (pos->flags & I915_SW_FENCE_FLAG_FENCE) | 
|---|
| 175 | wake_flags = fence->error; | 
|---|
| 176 |  | 
|---|
| 177 | pos->func(pos, TASK_NORMAL, wake_flags, &extra); | 
|---|
| 178 | } | 
|---|
| 179 |  | 
|---|
| 180 | if (list_empty(head: &extra)) | 
|---|
| 181 | break; | 
|---|
| 182 |  | 
|---|
| 183 | list_splice_tail_init(list: &extra, head: &x->head); | 
|---|
| 184 | } while (1); | 
|---|
| 185 | } | 
|---|
| 186 | spin_unlock_irqrestore(lock: &x->lock, flags); | 
|---|
| 187 |  | 
|---|
| 188 | debug_fence_assert(fence); | 
|---|
| 189 | } | 
|---|
| 190 |  | 
|---|
| 191 | static void __i915_sw_fence_complete(struct i915_sw_fence *fence, | 
|---|
| 192 | struct list_head *continuation) | 
|---|
| 193 | { | 
|---|
| 194 | debug_fence_assert(fence); | 
|---|
| 195 |  | 
|---|
| 196 | if (!atomic_dec_and_test(v: &fence->pending)) | 
|---|
| 197 | return; | 
|---|
| 198 |  | 
|---|
| 199 | debug_fence_set_state(fence, old: DEBUG_FENCE_IDLE, new: DEBUG_FENCE_NOTIFY); | 
|---|
| 200 |  | 
|---|
| 201 | if (__i915_sw_fence_notify(fence, state: FENCE_COMPLETE) != NOTIFY_DONE) | 
|---|
| 202 | return; | 
|---|
| 203 |  | 
|---|
| 204 | debug_fence_set_state(fence, old: DEBUG_FENCE_NOTIFY, new: DEBUG_FENCE_IDLE); | 
|---|
| 205 |  | 
|---|
| 206 | __i915_sw_fence_wake_up_all(fence, continuation); | 
|---|
| 207 |  | 
|---|
| 208 | debug_fence_destroy(fence); | 
|---|
| 209 | __i915_sw_fence_notify(fence, state: FENCE_FREE); | 
|---|
| 210 | } | 
|---|
| 211 |  | 
|---|
| 212 | void i915_sw_fence_complete(struct i915_sw_fence *fence) | 
|---|
| 213 | { | 
|---|
| 214 | debug_fence_assert(fence); | 
|---|
| 215 |  | 
|---|
| 216 | if (WARN_ON(i915_sw_fence_done(fence))) | 
|---|
| 217 | return; | 
|---|
| 218 |  | 
|---|
| 219 | __i915_sw_fence_complete(fence, NULL); | 
|---|
| 220 | } | 
|---|
| 221 |  | 
|---|
| 222 | bool i915_sw_fence_await(struct i915_sw_fence *fence) | 
|---|
| 223 | { | 
|---|
| 224 | int pending; | 
|---|
| 225 |  | 
|---|
| 226 | /* | 
|---|
| 227 | * It is only safe to add a new await to the fence while it has | 
|---|
| 228 | * not yet been signaled (i.e. there are still existing signalers). | 
|---|
| 229 | */ | 
|---|
| 230 | pending = atomic_read(v: &fence->pending); | 
|---|
| 231 | do { | 
|---|
| 232 | if (pending < 1) | 
|---|
| 233 | return false; | 
|---|
| 234 | } while (!atomic_try_cmpxchg(v: &fence->pending, old: &pending, new: pending + 1)); | 
|---|
| 235 |  | 
|---|
| 236 | return true; | 
|---|
| 237 | } | 
|---|
| 238 |  | 
|---|
| 239 | void __i915_sw_fence_init(struct i915_sw_fence *fence, | 
|---|
| 240 | i915_sw_fence_notify_t fn, | 
|---|
| 241 | const char *name, | 
|---|
| 242 | struct lock_class_key *key) | 
|---|
| 243 | { | 
|---|
| 244 | __init_waitqueue_head(wq_head: &fence->wait, name, key); | 
|---|
| 245 | fence->fn = fn; | 
|---|
| 246 | #ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG | 
|---|
| 247 | fence->flags = 0; | 
|---|
| 248 | #endif | 
|---|
| 249 |  | 
|---|
| 250 | i915_sw_fence_reinit(fence); | 
|---|
| 251 | } | 
|---|
| 252 |  | 
|---|
| 253 | void i915_sw_fence_reinit(struct i915_sw_fence *fence) | 
|---|
| 254 | { | 
|---|
| 255 | debug_fence_init(fence); | 
|---|
| 256 |  | 
|---|
| 257 | atomic_set(v: &fence->pending, i: 1); | 
|---|
| 258 | fence->error = 0; | 
|---|
| 259 |  | 
|---|
| 260 | I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head)); | 
|---|
| 261 | } | 
|---|
| 262 |  | 
|---|
| 263 | void i915_sw_fence_commit(struct i915_sw_fence *fence) | 
|---|
| 264 | { | 
|---|
| 265 | debug_fence_activate(fence); | 
|---|
| 266 | i915_sw_fence_complete(fence); | 
|---|
| 267 | } | 
|---|
| 268 |  | 
|---|
| 269 | static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key) | 
|---|
| 270 | { | 
|---|
| 271 | i915_sw_fence_set_error_once(fence: wq->private, error: flags); | 
|---|
| 272 |  | 
|---|
| 273 | list_del(entry: &wq->entry); | 
|---|
| 274 | __i915_sw_fence_complete(fence: wq->private, continuation: key); | 
|---|
| 275 |  | 
|---|
| 276 | if (wq->flags & I915_SW_FENCE_FLAG_ALLOC) | 
|---|
| 277 | kfree(objp: wq); | 
|---|
| 278 | return 0; | 
|---|
| 279 | } | 
|---|
| 280 |  | 
|---|
| 281 | #ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG | 
|---|
| 282 | static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, | 
|---|
| 283 | const struct i915_sw_fence * const signaler) | 
|---|
| 284 | { | 
|---|
| 285 | wait_queue_entry_t *wq; | 
|---|
| 286 |  | 
|---|
| 287 | if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) | 
|---|
| 288 | return false; | 
|---|
| 289 |  | 
|---|
| 290 | if (fence == signaler) | 
|---|
| 291 | return true; | 
|---|
| 292 |  | 
|---|
| 293 | list_for_each_entry(wq, &fence->wait.head, entry) { | 
|---|
| 294 | if (wq->func != i915_sw_fence_wake) | 
|---|
| 295 | continue; | 
|---|
| 296 |  | 
|---|
| 297 | if (__i915_sw_fence_check_if_after(wq->private, signaler)) | 
|---|
| 298 | return true; | 
|---|
| 299 | } | 
|---|
| 300 |  | 
|---|
| 301 | return false; | 
|---|
| 302 | } | 
|---|
| 303 |  | 
|---|
| 304 | static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence) | 
|---|
| 305 | { | 
|---|
| 306 | wait_queue_entry_t *wq; | 
|---|
| 307 |  | 
|---|
| 308 | if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) | 
|---|
| 309 | return; | 
|---|
| 310 |  | 
|---|
| 311 | list_for_each_entry(wq, &fence->wait.head, entry) { | 
|---|
| 312 | if (wq->func != i915_sw_fence_wake) | 
|---|
| 313 | continue; | 
|---|
| 314 |  | 
|---|
| 315 | __i915_sw_fence_clear_checked_bit(wq->private); | 
|---|
| 316 | } | 
|---|
| 317 | } | 
|---|
| 318 |  | 
|---|
| 319 | static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, | 
|---|
| 320 | const struct i915_sw_fence * const signaler) | 
|---|
| 321 | { | 
|---|
| 322 | unsigned long flags; | 
|---|
| 323 | bool err; | 
|---|
| 324 |  | 
|---|
| 325 | spin_lock_irqsave(&i915_sw_fence_lock, flags); | 
|---|
| 326 | err = __i915_sw_fence_check_if_after(fence, signaler); | 
|---|
| 327 | __i915_sw_fence_clear_checked_bit(fence); | 
|---|
| 328 | spin_unlock_irqrestore(&i915_sw_fence_lock, flags); | 
|---|
| 329 |  | 
|---|
| 330 | return err; | 
|---|
| 331 | } | 
|---|
| 332 | #else | 
|---|
| 333 | static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, | 
|---|
| 334 | const struct i915_sw_fence * const signaler) | 
|---|
| 335 | { | 
|---|
| 336 | return false; | 
|---|
| 337 | } | 
|---|
| 338 | #endif | 
|---|
| 339 |  | 
|---|
| 340 | static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | 
|---|
| 341 | struct i915_sw_fence *signaler, | 
|---|
| 342 | wait_queue_entry_t *wq, gfp_t gfp) | 
|---|
| 343 | { | 
|---|
| 344 | unsigned int pending; | 
|---|
| 345 | unsigned long flags; | 
|---|
| 346 |  | 
|---|
| 347 | debug_fence_assert(fence); | 
|---|
| 348 | might_sleep_if(gfpflags_allow_blocking(gfp)); | 
|---|
| 349 |  | 
|---|
| 350 | if (i915_sw_fence_done(fence: signaler)) { | 
|---|
| 351 | i915_sw_fence_set_error_once(fence, error: signaler->error); | 
|---|
| 352 | return 0; | 
|---|
| 353 | } | 
|---|
| 354 |  | 
|---|
| 355 | debug_fence_assert(fence: signaler); | 
|---|
| 356 |  | 
|---|
| 357 | /* The dependency graph must be acyclic. */ | 
|---|
| 358 | if (unlikely(i915_sw_fence_check_if_after(fence, signaler))) | 
|---|
| 359 | return -EINVAL; | 
|---|
| 360 |  | 
|---|
| 361 | pending = I915_SW_FENCE_FLAG_FENCE; | 
|---|
| 362 | if (!wq) { | 
|---|
| 363 | wq = kmalloc(sizeof(*wq), gfp); | 
|---|
| 364 | if (!wq) { | 
|---|
| 365 | if (!gfpflags_allow_blocking(gfp_flags: gfp)) | 
|---|
| 366 | return -ENOMEM; | 
|---|
| 367 |  | 
|---|
| 368 | i915_sw_fence_wait(fence: signaler); | 
|---|
| 369 | i915_sw_fence_set_error_once(fence, error: signaler->error); | 
|---|
| 370 | return 0; | 
|---|
| 371 | } | 
|---|
| 372 |  | 
|---|
| 373 | pending |= I915_SW_FENCE_FLAG_ALLOC; | 
|---|
| 374 | } | 
|---|
| 375 |  | 
|---|
| 376 | INIT_LIST_HEAD(list: &wq->entry); | 
|---|
| 377 | wq->flags = pending; | 
|---|
| 378 | wq->func = i915_sw_fence_wake; | 
|---|
| 379 | wq->private = fence; | 
|---|
| 380 |  | 
|---|
| 381 | i915_sw_fence_await(fence); | 
|---|
| 382 |  | 
|---|
| 383 | spin_lock_irqsave(&signaler->wait.lock, flags); | 
|---|
| 384 | if (likely(!i915_sw_fence_done(signaler))) { | 
|---|
| 385 | __add_wait_queue_entry_tail(wq_head: &signaler->wait, wq_entry: wq); | 
|---|
| 386 | pending = 1; | 
|---|
| 387 | } else { | 
|---|
| 388 | i915_sw_fence_wake(wq, mode: 0, flags: signaler->error, NULL); | 
|---|
| 389 | pending = 0; | 
|---|
| 390 | } | 
|---|
| 391 | spin_unlock_irqrestore(lock: &signaler->wait.lock, flags); | 
|---|
| 392 |  | 
|---|
| 393 | return pending; | 
|---|
| 394 | } | 
|---|
| 395 |  | 
|---|
| 396 | int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, | 
|---|
| 397 | struct i915_sw_fence *signaler, | 
|---|
| 398 | wait_queue_entry_t *wq) | 
|---|
| 399 | { | 
|---|
| 400 | return __i915_sw_fence_await_sw_fence(fence, signaler, wq, gfp: 0); | 
|---|
| 401 | } | 
|---|
| 402 |  | 
|---|
| 403 | int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, | 
|---|
| 404 | struct i915_sw_fence *signaler, | 
|---|
| 405 | gfp_t gfp) | 
|---|
| 406 | { | 
|---|
| 407 | return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp); | 
|---|
| 408 | } | 
|---|
| 409 |  | 
|---|
| 410 | struct i915_sw_dma_fence_cb_timer { | 
|---|
| 411 | struct i915_sw_dma_fence_cb base; | 
|---|
| 412 | struct dma_fence *dma; | 
|---|
| 413 | struct timer_list timer; | 
|---|
| 414 | struct irq_work work; | 
|---|
| 415 | struct rcu_head rcu; | 
|---|
| 416 | }; | 
|---|
| 417 |  | 
|---|
| 418 | static void dma_i915_sw_fence_wake(struct dma_fence *dma, | 
|---|
| 419 | struct dma_fence_cb *data) | 
|---|
| 420 | { | 
|---|
| 421 | struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); | 
|---|
| 422 |  | 
|---|
| 423 | i915_sw_fence_set_error_once(fence: cb->fence, error: dma->error); | 
|---|
| 424 | i915_sw_fence_complete(fence: cb->fence); | 
|---|
| 425 | kfree(objp: cb); | 
|---|
| 426 | } | 
|---|
| 427 |  | 
|---|
| 428 | static void timer_i915_sw_fence_wake(struct timer_list *t) | 
|---|
| 429 | { | 
|---|
| 430 | struct i915_sw_dma_fence_cb_timer *cb = timer_container_of(cb, t, | 
|---|
| 431 | timer); | 
|---|
| 432 | struct i915_sw_fence *fence; | 
|---|
| 433 | const char __rcu *timeline; | 
|---|
| 434 | const char __rcu *driver; | 
|---|
| 435 |  | 
|---|
| 436 | fence = xchg(&cb->base.fence, NULL); | 
|---|
| 437 | if (!fence) | 
|---|
| 438 | return; | 
|---|
| 439 |  | 
|---|
| 440 | rcu_read_lock(); | 
|---|
| 441 | driver = dma_fence_driver_name(fence: cb->dma); | 
|---|
| 442 | timeline = dma_fence_timeline_name(fence: cb->dma); | 
|---|
| 443 | pr_notice( "Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n", | 
|---|
| 444 | rcu_dereference(driver), | 
|---|
| 445 | rcu_dereference(timeline), | 
|---|
| 446 | cb->dma->seqno, | 
|---|
| 447 | i915_sw_fence_debug_hint(fence)); | 
|---|
| 448 | rcu_read_unlock(); | 
|---|
| 449 |  | 
|---|
| 450 | i915_sw_fence_set_error_once(fence, error: -ETIMEDOUT); | 
|---|
| 451 | i915_sw_fence_complete(fence); | 
|---|
| 452 | } | 
|---|
| 453 |  | 
|---|
| 454 | static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma, | 
|---|
| 455 | struct dma_fence_cb *data) | 
|---|
| 456 | { | 
|---|
| 457 | struct i915_sw_dma_fence_cb_timer *cb = | 
|---|
| 458 | container_of(data, typeof(*cb), base.base); | 
|---|
| 459 | struct i915_sw_fence *fence; | 
|---|
| 460 |  | 
|---|
| 461 | fence = xchg(&cb->base.fence, NULL); | 
|---|
| 462 | if (fence) { | 
|---|
| 463 | i915_sw_fence_set_error_once(fence, error: dma->error); | 
|---|
| 464 | i915_sw_fence_complete(fence); | 
|---|
| 465 | } | 
|---|
| 466 |  | 
|---|
| 467 | irq_work_queue(work: &cb->work); | 
|---|
| 468 | } | 
|---|
| 469 |  | 
|---|
| 470 | static void irq_i915_sw_fence_work(struct irq_work *wrk) | 
|---|
| 471 | { | 
|---|
| 472 | struct i915_sw_dma_fence_cb_timer *cb = | 
|---|
| 473 | container_of(wrk, typeof(*cb), work); | 
|---|
| 474 |  | 
|---|
| 475 | timer_shutdown_sync(timer: &cb->timer); | 
|---|
| 476 | dma_fence_put(fence: cb->dma); | 
|---|
| 477 |  | 
|---|
| 478 | kfree_rcu(cb, rcu); | 
|---|
| 479 | } | 
|---|
| 480 |  | 
|---|
| 481 | int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | 
|---|
| 482 | struct dma_fence *dma, | 
|---|
| 483 | unsigned long timeout, | 
|---|
| 484 | gfp_t gfp) | 
|---|
| 485 | { | 
|---|
| 486 | struct i915_sw_dma_fence_cb *cb; | 
|---|
| 487 | dma_fence_func_t func; | 
|---|
| 488 | int ret; | 
|---|
| 489 |  | 
|---|
| 490 | debug_fence_assert(fence); | 
|---|
| 491 | might_sleep_if(gfpflags_allow_blocking(gfp)); | 
|---|
| 492 |  | 
|---|
| 493 | if (dma_fence_is_signaled(fence: dma)) { | 
|---|
| 494 | i915_sw_fence_set_error_once(fence, error: dma->error); | 
|---|
| 495 | return 0; | 
|---|
| 496 | } | 
|---|
| 497 |  | 
|---|
| 498 | cb = kmalloc(timeout ? | 
|---|
| 499 | sizeof(struct i915_sw_dma_fence_cb_timer) : | 
|---|
| 500 | sizeof(struct i915_sw_dma_fence_cb), | 
|---|
| 501 | gfp); | 
|---|
| 502 | if (!cb) { | 
|---|
| 503 | if (!gfpflags_allow_blocking(gfp_flags: gfp)) | 
|---|
| 504 | return -ENOMEM; | 
|---|
| 505 |  | 
|---|
| 506 | ret = dma_fence_wait(fence: dma, intr: false); | 
|---|
| 507 | if (ret) | 
|---|
| 508 | return ret; | 
|---|
| 509 |  | 
|---|
| 510 | i915_sw_fence_set_error_once(fence, error: dma->error); | 
|---|
| 511 | return 0; | 
|---|
| 512 | } | 
|---|
| 513 |  | 
|---|
| 514 | cb->fence = fence; | 
|---|
| 515 | i915_sw_fence_await(fence); | 
|---|
| 516 |  | 
|---|
| 517 | func = dma_i915_sw_fence_wake; | 
|---|
| 518 | if (timeout) { | 
|---|
| 519 | struct i915_sw_dma_fence_cb_timer *timer = | 
|---|
| 520 | container_of(cb, typeof(*timer), base); | 
|---|
| 521 |  | 
|---|
| 522 | timer->dma = dma_fence_get(fence: dma); | 
|---|
| 523 | init_irq_work(work: &timer->work, func: irq_i915_sw_fence_work); | 
|---|
| 524 |  | 
|---|
| 525 | timer_setup(&timer->timer, | 
|---|
| 526 | timer_i915_sw_fence_wake, TIMER_IRQSAFE); | 
|---|
| 527 | mod_timer(timer: &timer->timer, expires: round_jiffies_up(j: jiffies + timeout)); | 
|---|
| 528 |  | 
|---|
| 529 | func = dma_i915_sw_fence_wake_timer; | 
|---|
| 530 | } | 
|---|
| 531 |  | 
|---|
| 532 | ret = dma_fence_add_callback(fence: dma, cb: &cb->base, func); | 
|---|
| 533 | if (ret == 0) { | 
|---|
| 534 | ret = 1; | 
|---|
| 535 | } else { | 
|---|
| 536 | func(dma, &cb->base); | 
|---|
| 537 | if (ret == -ENOENT) /* fence already signaled */ | 
|---|
| 538 | ret = 0; | 
|---|
| 539 | } | 
|---|
| 540 |  | 
|---|
| 541 | return ret; | 
|---|
| 542 | } | 
|---|
| 543 |  | 
|---|
| 544 | static void __dma_i915_sw_fence_wake(struct dma_fence *dma, | 
|---|
| 545 | struct dma_fence_cb *data) | 
|---|
| 546 | { | 
|---|
| 547 | struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); | 
|---|
| 548 |  | 
|---|
| 549 | i915_sw_fence_set_error_once(fence: cb->fence, error: dma->error); | 
|---|
| 550 | i915_sw_fence_complete(fence: cb->fence); | 
|---|
| 551 | } | 
|---|
| 552 |  | 
|---|
| 553 | int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, | 
|---|
| 554 | struct dma_fence *dma, | 
|---|
| 555 | struct i915_sw_dma_fence_cb *cb) | 
|---|
| 556 | { | 
|---|
| 557 | int ret; | 
|---|
| 558 |  | 
|---|
| 559 | debug_fence_assert(fence); | 
|---|
| 560 |  | 
|---|
| 561 | if (dma_fence_is_signaled(fence: dma)) { | 
|---|
| 562 | i915_sw_fence_set_error_once(fence, error: dma->error); | 
|---|
| 563 | return 0; | 
|---|
| 564 | } | 
|---|
| 565 |  | 
|---|
| 566 | cb->fence = fence; | 
|---|
| 567 | i915_sw_fence_await(fence); | 
|---|
| 568 |  | 
|---|
| 569 | ret = 1; | 
|---|
| 570 | if (dma_fence_add_callback(fence: dma, cb: &cb->base, func: __dma_i915_sw_fence_wake)) { | 
|---|
| 571 | /* fence already signaled */ | 
|---|
| 572 | __dma_i915_sw_fence_wake(dma, data: &cb->base); | 
|---|
| 573 | ret = 0; | 
|---|
| 574 | } | 
|---|
| 575 |  | 
|---|
| 576 | return ret; | 
|---|
| 577 | } | 
|---|
| 578 |  | 
|---|
| 579 | int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, | 
|---|
| 580 | struct dma_resv *resv, | 
|---|
| 581 | bool write, | 
|---|
| 582 | unsigned long timeout, | 
|---|
| 583 | gfp_t gfp) | 
|---|
| 584 | { | 
|---|
| 585 | struct dma_resv_iter cursor; | 
|---|
| 586 | struct dma_fence *f; | 
|---|
| 587 | int ret = 0, pending; | 
|---|
| 588 |  | 
|---|
| 589 | debug_fence_assert(fence); | 
|---|
| 590 | might_sleep_if(gfpflags_allow_blocking(gfp)); | 
|---|
| 591 |  | 
|---|
| 592 | dma_resv_iter_begin(cursor: &cursor, obj: resv, usage: dma_resv_usage_rw(write)); | 
|---|
| 593 | dma_resv_for_each_fence_unlocked(&cursor, f) { | 
|---|
| 594 | pending = i915_sw_fence_await_dma_fence(fence, dma: f, timeout, | 
|---|
| 595 | gfp); | 
|---|
| 596 | if (pending < 0) { | 
|---|
| 597 | ret = pending; | 
|---|
| 598 | break; | 
|---|
| 599 | } | 
|---|
| 600 |  | 
|---|
| 601 | ret |= pending; | 
|---|
| 602 | } | 
|---|
| 603 | dma_resv_iter_end(cursor: &cursor); | 
|---|
| 604 | return ret; | 
|---|
| 605 | } | 
|---|
| 606 |  | 
|---|
| 607 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | 
|---|
| 608 | #include "selftests/lib_sw_fence.c" | 
|---|
| 609 | #include "selftests/i915_sw_fence.c" | 
|---|
| 610 | #endif | 
|---|
| 611 |  | 
|---|