| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* | 
|---|
| 3 | * Fence mechanism for dma-buf and to allow for asynchronous dma access | 
|---|
| 4 | * | 
|---|
| 5 | * Copyright (C) 2012 Canonical Ltd | 
|---|
| 6 | * Copyright (C) 2012 Texas Instruments | 
|---|
| 7 | * | 
|---|
| 8 | * Authors: | 
|---|
| 9 | * Rob Clark <robdclark@gmail.com> | 
|---|
| 10 | * Maarten Lankhorst <maarten.lankhorst@canonical.com> | 
|---|
| 11 | */ | 
|---|
| 12 |  | 
|---|
| 13 | #include <linux/slab.h> | 
|---|
| 14 | #include <linux/export.h> | 
|---|
| 15 | #include <linux/atomic.h> | 
|---|
| 16 | #include <linux/dma-fence.h> | 
|---|
| 17 | #include <linux/sched/signal.h> | 
|---|
| 18 | #include <linux/seq_file.h> | 
|---|
| 19 |  | 
|---|
| 20 | #define CREATE_TRACE_POINTS | 
|---|
| 21 | #include <trace/events/dma_fence.h> | 
|---|
| 22 |  | 
|---|
| 23 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); | 
|---|
| 24 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); | 
|---|
| 25 | EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled); | 
|---|
| 26 |  | 
|---|
| 27 | static DEFINE_SPINLOCK(dma_fence_stub_lock); | 
|---|
| 28 | static struct dma_fence dma_fence_stub; | 
|---|
| 29 |  | 
|---|
| 30 | /* | 
|---|
| 31 | * fence context counter: each execution context should have its own | 
|---|
| 32 | * fence context, this allows checking if fences belong to the same | 
|---|
| 33 | * context or not. One device can have multiple separate contexts, | 
|---|
| 34 | * and they're used if some engine can run independently of another. | 
|---|
| 35 | */ | 
|---|
| 36 | static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); | 
|---|
| 37 |  | 
|---|
| 38 | /** | 
|---|
| 39 | * DOC: DMA fences overview | 
|---|
| 40 | * | 
|---|
| 41 | * DMA fences, represented by &struct dma_fence, are the kernel internal | 
|---|
| 42 | * synchronization primitive for DMA operations like GPU rendering, video | 
|---|
| 43 | * encoding/decoding, or displaying buffers on a screen. | 
|---|
| 44 | * | 
|---|
| 45 | * A fence is initialized using dma_fence_init() and completed using | 
|---|
| 46 | * dma_fence_signal(). Fences are associated with a context, allocated through | 
|---|
| 47 | * dma_fence_context_alloc(), and all fences on the same context are | 
|---|
| 48 | * fully ordered. | 
|---|
| 49 | * | 
|---|
| 50 | * Since the purposes of fences is to facilitate cross-device and | 
|---|
| 51 | * cross-application synchronization, there's multiple ways to use one: | 
|---|
| 52 | * | 
|---|
| 53 | * - Individual fences can be exposed as a &sync_file, accessed as a file | 
|---|
| 54 | *   descriptor from userspace, created by calling sync_file_create(). This is | 
|---|
| 55 | *   called explicit fencing, since userspace passes around explicit | 
|---|
| 56 | *   synchronization points. | 
|---|
| 57 | * | 
|---|
| 58 | * - Some subsystems also have their own explicit fencing primitives, like | 
|---|
| 59 | *   &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying | 
|---|
| 60 | *   fence to be updated. | 
|---|
| 61 | * | 
|---|
| 62 | * - Then there's also implicit fencing, where the synchronization points are | 
|---|
| 63 | *   implicitly passed around as part of shared &dma_buf instances. Such | 
|---|
| 64 | *   implicit fences are stored in &struct dma_resv through the | 
|---|
| 65 | *   &dma_buf.resv pointer. | 
|---|
| 66 | */ | 
|---|
| 67 |  | 
|---|
| 68 | /** | 
|---|
| 69 | * DOC: fence cross-driver contract | 
|---|
| 70 | * | 
|---|
| 71 | * Since &dma_fence provide a cross driver contract, all drivers must follow the | 
|---|
| 72 | * same rules: | 
|---|
| 73 | * | 
|---|
| 74 | * * Fences must complete in a reasonable time. Fences which represent kernels | 
|---|
| 75 | *   and shaders submitted by userspace, which could run forever, must be backed | 
|---|
| 76 | *   up by timeout and gpu hang recovery code. Minimally that code must prevent | 
|---|
| 77 | *   further command submission and force complete all in-flight fences, e.g. | 
|---|
| 78 | *   when the driver or hardware do not support gpu reset, or if the gpu reset | 
|---|
| 79 | *   failed for some reason. Ideally the driver supports gpu recovery which only | 
|---|
| 80 | *   affects the offending userspace context, and no other userspace | 
|---|
| 81 | *   submissions. | 
|---|
| 82 | * | 
|---|
| 83 | * * Drivers may have different ideas of what completion within a reasonable | 
|---|
| 84 | *   time means. Some hang recovery code uses a fixed timeout, others a mix | 
|---|
| 85 | *   between observing forward progress and increasingly strict timeouts. | 
|---|
| 86 | *   Drivers should not try to second guess timeout handling of fences from | 
|---|
| 87 | *   other drivers. | 
|---|
| 88 | * | 
|---|
| 89 | * * To ensure there's no deadlocks of dma_fence_wait() against other locks | 
|---|
| 90 | *   drivers should annotate all code required to reach dma_fence_signal(), | 
|---|
| 91 | *   which completes the fences, with dma_fence_begin_signalling() and | 
|---|
| 92 | *   dma_fence_end_signalling(). | 
|---|
| 93 | * | 
|---|
| 94 | * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock(). | 
|---|
| 95 | *   This means any code required for fence completion cannot acquire a | 
|---|
| 96 | *   &dma_resv lock. Note that this also pulls in the entire established | 
|---|
| 97 | *   locking hierarchy around dma_resv_lock() and dma_resv_unlock(). | 
|---|
| 98 | * | 
|---|
| 99 | * * Drivers are allowed to call dma_fence_wait() from their &shrinker | 
|---|
| 100 | *   callbacks. This means any code required for fence completion cannot | 
|---|
| 101 | *   allocate memory with GFP_KERNEL. | 
|---|
| 102 | * | 
|---|
| 103 | * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier | 
|---|
| 104 | *   respectively &mmu_interval_notifier callbacks. This means any code required | 
|---|
| 105 | *   for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO. | 
|---|
| 106 | *   Only GFP_ATOMIC is permissible, which might fail. | 
|---|
| 107 | * | 
|---|
| 108 | * Note that only GPU drivers have a reasonable excuse for both requiring | 
|---|
| 109 | * &mmu_interval_notifier and &shrinker callbacks at the same time as having to | 
|---|
| 110 | * track asynchronous compute work using &dma_fence. No driver outside of | 
|---|
| 111 | * drivers/gpu should ever call dma_fence_wait() in such contexts. | 
|---|
| 112 | */ | 
|---|
| 113 |  | 
|---|
| 114 | static const char *dma_fence_stub_get_name(struct dma_fence *fence) | 
|---|
| 115 | { | 
|---|
| 116 | return "stub"; | 
|---|
| 117 | } | 
|---|
| 118 |  | 
|---|
| 119 | static const struct dma_fence_ops dma_fence_stub_ops = { | 
|---|
| 120 | .get_driver_name = dma_fence_stub_get_name, | 
|---|
| 121 | .get_timeline_name = dma_fence_stub_get_name, | 
|---|
| 122 | }; | 
|---|
| 123 |  | 
|---|
| 124 | /** | 
|---|
| 125 | * dma_fence_get_stub - return a signaled fence | 
|---|
| 126 | * | 
|---|
| 127 | * Return a stub fence which is already signaled. The fence's | 
|---|
| 128 | * timestamp corresponds to the first time after boot this | 
|---|
| 129 | * function is called. | 
|---|
| 130 | */ | 
|---|
| 131 | struct dma_fence *dma_fence_get_stub(void) | 
|---|
| 132 | { | 
|---|
| 133 | spin_lock(lock: &dma_fence_stub_lock); | 
|---|
| 134 | if (!dma_fence_stub.ops) { | 
|---|
| 135 | dma_fence_init(fence: &dma_fence_stub, | 
|---|
| 136 | ops: &dma_fence_stub_ops, | 
|---|
| 137 | lock: &dma_fence_stub_lock, | 
|---|
| 138 | context: 0, seqno: 0); | 
|---|
| 139 |  | 
|---|
| 140 | set_bit(nr: DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, | 
|---|
| 141 | addr: &dma_fence_stub.flags); | 
|---|
| 142 |  | 
|---|
| 143 | dma_fence_signal_locked(fence: &dma_fence_stub); | 
|---|
| 144 | } | 
|---|
| 145 | spin_unlock(lock: &dma_fence_stub_lock); | 
|---|
| 146 |  | 
|---|
| 147 | return dma_fence_get(fence: &dma_fence_stub); | 
|---|
| 148 | } | 
|---|
| 149 | EXPORT_SYMBOL(dma_fence_get_stub); | 
|---|
| 150 |  | 
|---|
| 151 | /** | 
|---|
| 152 | * dma_fence_allocate_private_stub - return a private, signaled fence | 
|---|
| 153 | * @timestamp: timestamp when the fence was signaled | 
|---|
| 154 | * | 
|---|
| 155 | * Return a newly allocated and signaled stub fence. | 
|---|
| 156 | */ | 
|---|
| 157 | struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp) | 
|---|
| 158 | { | 
|---|
| 159 | struct dma_fence *fence; | 
|---|
| 160 |  | 
|---|
| 161 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); | 
|---|
| 162 | if (fence == NULL) | 
|---|
| 163 | return NULL; | 
|---|
| 164 |  | 
|---|
| 165 | dma_fence_init(fence, | 
|---|
| 166 | ops: &dma_fence_stub_ops, | 
|---|
| 167 | lock: &dma_fence_stub_lock, | 
|---|
| 168 | context: 0, seqno: 0); | 
|---|
| 169 |  | 
|---|
| 170 | set_bit(nr: DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, | 
|---|
| 171 | addr: &fence->flags); | 
|---|
| 172 |  | 
|---|
| 173 | dma_fence_signal_timestamp(fence, timestamp); | 
|---|
| 174 |  | 
|---|
| 175 | return fence; | 
|---|
| 176 | } | 
|---|
| 177 | EXPORT_SYMBOL(dma_fence_allocate_private_stub); | 
|---|
| 178 |  | 
|---|
| 179 | /** | 
|---|
| 180 | * dma_fence_context_alloc - allocate an array of fence contexts | 
|---|
| 181 | * @num: amount of contexts to allocate | 
|---|
| 182 | * | 
|---|
| 183 | * This function will return the first index of the number of fence contexts | 
|---|
| 184 | * allocated.  The fence context is used for setting &dma_fence.context to a | 
|---|
| 185 | * unique number by passing the context to dma_fence_init(). | 
|---|
| 186 | */ | 
|---|
| 187 | u64 dma_fence_context_alloc(unsigned num) | 
|---|
| 188 | { | 
|---|
| 189 | WARN_ON(!num); | 
|---|
| 190 | return atomic64_fetch_add(i: num, v: &dma_fence_context_counter); | 
|---|
| 191 | } | 
|---|
| 192 | EXPORT_SYMBOL(dma_fence_context_alloc); | 
|---|
| 193 |  | 
|---|
| 194 | /** | 
|---|
| 195 | * DOC: fence signalling annotation | 
|---|
| 196 | * | 
|---|
| 197 | * Proving correctness of all the kernel code around &dma_fence through code | 
|---|
| 198 | * review and testing is tricky for a few reasons: | 
|---|
| 199 | * | 
|---|
| 200 | * * It is a cross-driver contract, and therefore all drivers must follow the | 
|---|
| 201 | *   same rules for lock nesting order, calling contexts for various functions | 
|---|
| 202 | *   and anything else significant for in-kernel interfaces. But it is also | 
|---|
| 203 | *   impossible to test all drivers in a single machine, hence brute-force N vs. | 
|---|
| 204 | *   N testing of all combinations is impossible. Even just limiting to the | 
|---|
| 205 | *   possible combinations is infeasible. | 
|---|
| 206 | * | 
|---|
| 207 | * * There is an enormous amount of driver code involved. For render drivers | 
|---|
| 208 | *   there's the tail of command submission, after fences are published, | 
|---|
| 209 | *   scheduler code, interrupt and workers to process job completion, | 
|---|
| 210 | *   and timeout, gpu reset and gpu hang recovery code. Plus for integration | 
|---|
| 211 | *   with core mm with have &mmu_notifier, respectively &mmu_interval_notifier, | 
|---|
| 212 | *   and &shrinker. For modesetting drivers there's the commit tail functions | 
|---|
| 213 | *   between when fences for an atomic modeset are published, and when the | 
|---|
| 214 | *   corresponding vblank completes, including any interrupt processing and | 
|---|
| 215 | *   related workers. Auditing all that code, across all drivers, is not | 
|---|
| 216 | *   feasible. | 
|---|
| 217 | * | 
|---|
| 218 | * * Due to how many other subsystems are involved and the locking hierarchies | 
|---|
| 219 | *   this pulls in there is extremely thin wiggle-room for driver-specific | 
|---|
| 220 | *   differences. &dma_fence interacts with almost all of the core memory | 
|---|
| 221 | *   handling through page fault handlers via &dma_resv, dma_resv_lock() and | 
|---|
| 222 | *   dma_resv_unlock(). On the other side it also interacts through all | 
|---|
| 223 | *   allocation sites through &mmu_notifier and &shrinker. | 
|---|
| 224 | * | 
|---|
| 225 | * Furthermore lockdep does not handle cross-release dependencies, which means | 
|---|
| 226 | * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught | 
|---|
| 227 | * at runtime with some quick testing. The simplest example is one thread | 
|---|
| 228 | * waiting on a &dma_fence while holding a lock:: | 
|---|
| 229 | * | 
|---|
| 230 | *     lock(A); | 
|---|
| 231 | *     dma_fence_wait(B); | 
|---|
| 232 | *     unlock(A); | 
|---|
| 233 | * | 
|---|
| 234 | * while the other thread is stuck trying to acquire the same lock, which | 
|---|
| 235 | * prevents it from signalling the fence the previous thread is stuck waiting | 
|---|
| 236 | * on:: | 
|---|
| 237 | * | 
|---|
| 238 | *     lock(A); | 
|---|
| 239 | *     unlock(A); | 
|---|
| 240 | *     dma_fence_signal(B); | 
|---|
| 241 | * | 
|---|
| 242 | * By manually annotating all code relevant to signalling a &dma_fence we can | 
|---|
| 243 | * teach lockdep about these dependencies, which also helps with the validation | 
|---|
| 244 | * headache since now lockdep can check all the rules for us:: | 
|---|
| 245 | * | 
|---|
| 246 | *    cookie = dma_fence_begin_signalling(); | 
|---|
| 247 | *    lock(A); | 
|---|
| 248 | *    unlock(A); | 
|---|
| 249 | *    dma_fence_signal(B); | 
|---|
| 250 | *    dma_fence_end_signalling(cookie); | 
|---|
| 251 | * | 
|---|
| 252 | * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to | 
|---|
| 253 | * annotate critical sections the following rules need to be observed: | 
|---|
| 254 | * | 
|---|
| 255 | * * All code necessary to complete a &dma_fence must be annotated, from the | 
|---|
| 256 | *   point where a fence is accessible to other threads, to the point where | 
|---|
| 257 | *   dma_fence_signal() is called. Un-annotated code can contain deadlock issues, | 
|---|
| 258 | *   and due to the very strict rules and many corner cases it is infeasible to | 
|---|
| 259 | *   catch these just with review or normal stress testing. | 
|---|
| 260 | * | 
|---|
| 261 | * * &struct dma_resv deserves a special note, since the readers are only | 
|---|
| 262 | *   protected by rcu. This means the signalling critical section starts as soon | 
|---|
| 263 | *   as the new fences are installed, even before dma_resv_unlock() is called. | 
|---|
| 264 | * | 
|---|
| 265 | * * The only exception are fast paths and opportunistic signalling code, which | 
|---|
| 266 | *   calls dma_fence_signal() purely as an optimization, but is not required to | 
|---|
| 267 | *   guarantee completion of a &dma_fence. The usual example is a wait IOCTL | 
|---|
| 268 | *   which calls dma_fence_signal(), while the mandatory completion path goes | 
|---|
| 269 | *   through a hardware interrupt and possible job completion worker. | 
|---|
| 270 | * | 
|---|
| 271 | * * To aid composability of code, the annotations can be freely nested, as long | 
|---|
| 272 | *   as the overall locking hierarchy is consistent. The annotations also work | 
|---|
| 273 | *   both in interrupt and process context. Due to implementation details this | 
|---|
| 274 | *   requires that callers pass an opaque cookie from | 
|---|
| 275 | *   dma_fence_begin_signalling() to dma_fence_end_signalling(). | 
|---|
| 276 | * | 
|---|
| 277 | * * Validation against the cross driver contract is implemented by priming | 
|---|
| 278 | *   lockdep with the relevant hierarchy at boot-up. This means even just | 
|---|
| 279 | *   testing with a single device is enough to validate a driver, at least as | 
|---|
| 280 | *   far as deadlocks with dma_fence_wait() against dma_fence_signal() are | 
|---|
| 281 | *   concerned. | 
|---|
| 282 | */ | 
|---|
| 283 | #ifdef CONFIG_LOCKDEP | 
|---|
| 284 | static struct lockdep_map dma_fence_lockdep_map = { | 
|---|
| 285 | .name = "dma_fence_map" | 
|---|
| 286 | }; | 
|---|
| 287 |  | 
|---|
| 288 | /** | 
|---|
| 289 | * dma_fence_begin_signalling - begin a critical DMA fence signalling section | 
|---|
| 290 | * | 
|---|
| 291 | * Drivers should use this to annotate the beginning of any code section | 
|---|
| 292 | * required to eventually complete &dma_fence by calling dma_fence_signal(). | 
|---|
| 293 | * | 
|---|
| 294 | * The end of these critical sections are annotated with | 
|---|
| 295 | * dma_fence_end_signalling(). | 
|---|
| 296 | * | 
|---|
| 297 | * Returns: | 
|---|
| 298 | * | 
|---|
| 299 | * Opaque cookie needed by the implementation, which needs to be passed to | 
|---|
| 300 | * dma_fence_end_signalling(). | 
|---|
| 301 | */ | 
|---|
| 302 | bool dma_fence_begin_signalling(void) | 
|---|
| 303 | { | 
|---|
| 304 | /* explicitly nesting ... */ | 
|---|
| 305 | if (lock_is_held_type(&dma_fence_lockdep_map, 1)) | 
|---|
| 306 | return true; | 
|---|
| 307 |  | 
|---|
| 308 | /* rely on might_sleep check for soft/hardirq locks */ | 
|---|
| 309 | if (in_atomic()) | 
|---|
| 310 | return true; | 
|---|
| 311 |  | 
|---|
| 312 | /* ... and non-recursive successful read_trylock */ | 
|---|
| 313 | lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _RET_IP_); | 
|---|
| 314 |  | 
|---|
| 315 | return false; | 
|---|
| 316 | } | 
|---|
| 317 | EXPORT_SYMBOL(dma_fence_begin_signalling); | 
|---|
| 318 |  | 
|---|
| 319 | /** | 
|---|
| 320 | * dma_fence_end_signalling - end a critical DMA fence signalling section | 
|---|
| 321 | * @cookie: opaque cookie from dma_fence_begin_signalling() | 
|---|
| 322 | * | 
|---|
| 323 | * Closes a critical section annotation opened by dma_fence_begin_signalling(). | 
|---|
| 324 | */ | 
|---|
| 325 | void dma_fence_end_signalling(bool cookie) | 
|---|
| 326 | { | 
|---|
| 327 | if (cookie) | 
|---|
| 328 | return; | 
|---|
| 329 |  | 
|---|
| 330 | lock_release(&dma_fence_lockdep_map, _RET_IP_); | 
|---|
| 331 | } | 
|---|
| 332 | EXPORT_SYMBOL(dma_fence_end_signalling); | 
|---|
| 333 |  | 
|---|
| 334 | void __dma_fence_might_wait(void) | 
|---|
| 335 | { | 
|---|
| 336 | bool tmp; | 
|---|
| 337 |  | 
|---|
| 338 | tmp = lock_is_held_type(&dma_fence_lockdep_map, 1); | 
|---|
| 339 | if (tmp) | 
|---|
| 340 | lock_release(&dma_fence_lockdep_map, _THIS_IP_); | 
|---|
| 341 | lock_map_acquire(&dma_fence_lockdep_map); | 
|---|
| 342 | lock_map_release(&dma_fence_lockdep_map); | 
|---|
| 343 | if (tmp) | 
|---|
| 344 | lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _THIS_IP_); | 
|---|
| 345 | } | 
|---|
| 346 | #endif | 
|---|
| 347 |  | 
|---|
| 348 |  | 
|---|
| 349 | /** | 
|---|
| 350 | * dma_fence_signal_timestamp_locked - signal completion of a fence | 
|---|
| 351 | * @fence: the fence to signal | 
|---|
| 352 | * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain | 
|---|
| 353 | * | 
|---|
| 354 | * Signal completion for software callbacks on a fence, this will unblock | 
|---|
| 355 | * dma_fence_wait() calls and run all the callbacks added with | 
|---|
| 356 | * dma_fence_add_callback(). Can be called multiple times, but since a fence | 
|---|
| 357 | * can only go from the unsignaled to the signaled state and not back, it will | 
|---|
| 358 | * only be effective the first time. Set the timestamp provided as the fence | 
|---|
| 359 | * signal timestamp. | 
|---|
| 360 | * | 
|---|
| 361 | * Unlike dma_fence_signal_timestamp(), this function must be called with | 
|---|
| 362 | * &dma_fence.lock held. | 
|---|
| 363 | * | 
|---|
| 364 | * Returns 0 on success and a negative error value when @fence has been | 
|---|
| 365 | * signalled already. | 
|---|
| 366 | */ | 
|---|
| 367 | int dma_fence_signal_timestamp_locked(struct dma_fence *fence, | 
|---|
| 368 | ktime_t timestamp) | 
|---|
| 369 | { | 
|---|
| 370 | struct dma_fence_cb *cur, *tmp; | 
|---|
| 371 | struct list_head cb_list; | 
|---|
| 372 |  | 
|---|
| 373 | lockdep_assert_held(fence->lock); | 
|---|
| 374 |  | 
|---|
| 375 | if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, | 
|---|
| 376 | &fence->flags))) | 
|---|
| 377 | return -EINVAL; | 
|---|
| 378 |  | 
|---|
| 379 | /* Stash the cb_list before replacing it with the timestamp */ | 
|---|
| 380 | list_replace(old: &fence->cb_list, new: &cb_list); | 
|---|
| 381 |  | 
|---|
| 382 | fence->timestamp = timestamp; | 
|---|
| 383 | set_bit(nr: DMA_FENCE_FLAG_TIMESTAMP_BIT, addr: &fence->flags); | 
|---|
| 384 | trace_dma_fence_signaled(fence); | 
|---|
| 385 |  | 
|---|
| 386 | list_for_each_entry_safe(cur, tmp, &cb_list, node) { | 
|---|
| 387 | INIT_LIST_HEAD(list: &cur->node); | 
|---|
| 388 | cur->func(fence, cur); | 
|---|
| 389 | } | 
|---|
| 390 |  | 
|---|
| 391 | return 0; | 
|---|
| 392 | } | 
|---|
| 393 | EXPORT_SYMBOL(dma_fence_signal_timestamp_locked); | 
|---|
| 394 |  | 
|---|
| 395 | /** | 
|---|
| 396 | * dma_fence_signal_timestamp - signal completion of a fence | 
|---|
| 397 | * @fence: the fence to signal | 
|---|
| 398 | * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain | 
|---|
| 399 | * | 
|---|
| 400 | * Signal completion for software callbacks on a fence, this will unblock | 
|---|
| 401 | * dma_fence_wait() calls and run all the callbacks added with | 
|---|
| 402 | * dma_fence_add_callback(). Can be called multiple times, but since a fence | 
|---|
| 403 | * can only go from the unsignaled to the signaled state and not back, it will | 
|---|
| 404 | * only be effective the first time. Set the timestamp provided as the fence | 
|---|
| 405 | * signal timestamp. | 
|---|
| 406 | * | 
|---|
| 407 | * Returns 0 on success and a negative error value when @fence has been | 
|---|
| 408 | * signalled already. | 
|---|
| 409 | */ | 
|---|
| 410 | int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) | 
|---|
| 411 | { | 
|---|
| 412 | unsigned long flags; | 
|---|
| 413 | int ret; | 
|---|
| 414 |  | 
|---|
| 415 | if (WARN_ON(!fence)) | 
|---|
| 416 | return -EINVAL; | 
|---|
| 417 |  | 
|---|
| 418 | spin_lock_irqsave(fence->lock, flags); | 
|---|
| 419 | ret = dma_fence_signal_timestamp_locked(fence, timestamp); | 
|---|
| 420 | spin_unlock_irqrestore(lock: fence->lock, flags); | 
|---|
| 421 |  | 
|---|
| 422 | return ret; | 
|---|
| 423 | } | 
|---|
| 424 | EXPORT_SYMBOL(dma_fence_signal_timestamp); | 
|---|
| 425 |  | 
|---|
| 426 | /** | 
|---|
| 427 | * dma_fence_signal_locked - signal completion of a fence | 
|---|
| 428 | * @fence: the fence to signal | 
|---|
| 429 | * | 
|---|
| 430 | * Signal completion for software callbacks on a fence, this will unblock | 
|---|
| 431 | * dma_fence_wait() calls and run all the callbacks added with | 
|---|
| 432 | * dma_fence_add_callback(). Can be called multiple times, but since a fence | 
|---|
| 433 | * can only go from the unsignaled to the signaled state and not back, it will | 
|---|
| 434 | * only be effective the first time. | 
|---|
| 435 | * | 
|---|
| 436 | * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock | 
|---|
| 437 | * held. | 
|---|
| 438 | * | 
|---|
| 439 | * Returns 0 on success and a negative error value when @fence has been | 
|---|
| 440 | * signalled already. | 
|---|
| 441 | */ | 
|---|
| 442 | int dma_fence_signal_locked(struct dma_fence *fence) | 
|---|
| 443 | { | 
|---|
| 444 | return dma_fence_signal_timestamp_locked(fence, ktime_get()); | 
|---|
| 445 | } | 
|---|
| 446 | EXPORT_SYMBOL(dma_fence_signal_locked); | 
|---|
| 447 |  | 
|---|
| 448 | /** | 
|---|
| 449 | * dma_fence_signal - signal completion of a fence | 
|---|
| 450 | * @fence: the fence to signal | 
|---|
| 451 | * | 
|---|
| 452 | * Signal completion for software callbacks on a fence, this will unblock | 
|---|
| 453 | * dma_fence_wait() calls and run all the callbacks added with | 
|---|
| 454 | * dma_fence_add_callback(). Can be called multiple times, but since a fence | 
|---|
| 455 | * can only go from the unsignaled to the signaled state and not back, it will | 
|---|
| 456 | * only be effective the first time. | 
|---|
| 457 | * | 
|---|
| 458 | * Returns 0 on success and a negative error value when @fence has been | 
|---|
| 459 | * signalled already. | 
|---|
| 460 | */ | 
|---|
| 461 | int dma_fence_signal(struct dma_fence *fence) | 
|---|
| 462 | { | 
|---|
| 463 | unsigned long flags; | 
|---|
| 464 | int ret; | 
|---|
| 465 | bool tmp; | 
|---|
| 466 |  | 
|---|
| 467 | if (WARN_ON(!fence)) | 
|---|
| 468 | return -EINVAL; | 
|---|
| 469 |  | 
|---|
| 470 | tmp = dma_fence_begin_signalling(); | 
|---|
| 471 |  | 
|---|
| 472 | spin_lock_irqsave(fence->lock, flags); | 
|---|
| 473 | ret = dma_fence_signal_timestamp_locked(fence, ktime_get()); | 
|---|
| 474 | spin_unlock_irqrestore(lock: fence->lock, flags); | 
|---|
| 475 |  | 
|---|
| 476 | dma_fence_end_signalling(cookie: tmp); | 
|---|
| 477 |  | 
|---|
| 478 | return ret; | 
|---|
| 479 | } | 
|---|
| 480 | EXPORT_SYMBOL(dma_fence_signal); | 
|---|
| 481 |  | 
|---|
| 482 | /** | 
|---|
| 483 | * dma_fence_wait_timeout - sleep until the fence gets signaled | 
|---|
| 484 | * or until timeout elapses | 
|---|
| 485 | * @fence: the fence to wait on | 
|---|
| 486 | * @intr: if true, do an interruptible wait | 
|---|
| 487 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT | 
|---|
| 488 | * | 
|---|
| 489 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the | 
|---|
| 490 | * remaining timeout in jiffies on success. Other error values may be | 
|---|
| 491 | * returned on custom implementations. | 
|---|
| 492 | * | 
|---|
| 493 | * Performs a synchronous wait on this fence. It is assumed the caller | 
|---|
| 494 | * directly or indirectly (buf-mgr between reservation and committing) | 
|---|
| 495 | * holds a reference to the fence, otherwise the fence might be | 
|---|
| 496 | * freed before return, resulting in undefined behavior. | 
|---|
| 497 | * | 
|---|
| 498 | * See also dma_fence_wait() and dma_fence_wait_any_timeout(). | 
|---|
| 499 | */ | 
|---|
| 500 | signed long | 
|---|
| 501 | dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) | 
|---|
| 502 | { | 
|---|
| 503 | signed long ret; | 
|---|
| 504 |  | 
|---|
| 505 | if (WARN_ON(timeout < 0)) | 
|---|
| 506 | return -EINVAL; | 
|---|
| 507 |  | 
|---|
| 508 | might_sleep(); | 
|---|
| 509 |  | 
|---|
| 510 | __dma_fence_might_wait(); | 
|---|
| 511 |  | 
|---|
| 512 | dma_fence_enable_sw_signaling(fence); | 
|---|
| 513 |  | 
|---|
| 514 | if (trace_dma_fence_wait_start_enabled()) { | 
|---|
| 515 | rcu_read_lock(); | 
|---|
| 516 | trace_dma_fence_wait_start(fence); | 
|---|
| 517 | rcu_read_unlock(); | 
|---|
| 518 | } | 
|---|
| 519 | if (fence->ops->wait) | 
|---|
| 520 | ret = fence->ops->wait(fence, intr, timeout); | 
|---|
| 521 | else | 
|---|
| 522 | ret = dma_fence_default_wait(fence, intr, timeout); | 
|---|
| 523 | if (trace_dma_fence_wait_end_enabled()) { | 
|---|
| 524 | rcu_read_lock(); | 
|---|
| 525 | trace_dma_fence_wait_end(fence); | 
|---|
| 526 | rcu_read_unlock(); | 
|---|
| 527 | } | 
|---|
| 528 | return ret; | 
|---|
| 529 | } | 
|---|
| 530 | EXPORT_SYMBOL(dma_fence_wait_timeout); | 
|---|
| 531 |  | 
|---|
| 532 | /** | 
|---|
| 533 | * dma_fence_release - default release function for fences | 
|---|
| 534 | * @kref: &dma_fence.recfount | 
|---|
| 535 | * | 
|---|
| 536 | * This is the default release functions for &dma_fence. Drivers shouldn't call | 
|---|
| 537 | * this directly, but instead call dma_fence_put(). | 
|---|
| 538 | */ | 
|---|
| 539 | void dma_fence_release(struct kref *kref) | 
|---|
| 540 | { | 
|---|
| 541 | struct dma_fence *fence = | 
|---|
| 542 | container_of(kref, struct dma_fence, refcount); | 
|---|
| 543 |  | 
|---|
| 544 | rcu_read_lock(); | 
|---|
| 545 | trace_dma_fence_destroy(fence); | 
|---|
| 546 |  | 
|---|
| 547 | if (!list_empty(head: &fence->cb_list) && | 
|---|
| 548 | !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | 
|---|
| 549 | const char __rcu *timeline; | 
|---|
| 550 | const char __rcu *driver; | 
|---|
| 551 | unsigned long flags; | 
|---|
| 552 |  | 
|---|
| 553 | driver = dma_fence_driver_name(fence); | 
|---|
| 554 | timeline = dma_fence_timeline_name(fence); | 
|---|
| 555 |  | 
|---|
| 556 | WARN(1, | 
|---|
| 557 | "Fence %s:%s:%llx:%llx released with pending signals!\n", | 
|---|
| 558 | rcu_dereference(driver), rcu_dereference(timeline), | 
|---|
| 559 | fence->context, fence->seqno); | 
|---|
| 560 |  | 
|---|
| 561 | /* | 
|---|
| 562 | * Failed to signal before release, likely a refcounting issue. | 
|---|
| 563 | * | 
|---|
| 564 | * This should never happen, but if it does make sure that we | 
|---|
| 565 | * don't leave chains dangling. We set the error flag first | 
|---|
| 566 | * so that the callbacks know this signal is due to an error. | 
|---|
| 567 | */ | 
|---|
| 568 | spin_lock_irqsave(fence->lock, flags); | 
|---|
| 569 | fence->error = -EDEADLK; | 
|---|
| 570 | dma_fence_signal_locked(fence); | 
|---|
| 571 | spin_unlock_irqrestore(lock: fence->lock, flags); | 
|---|
| 572 | } | 
|---|
| 573 |  | 
|---|
| 574 | rcu_read_unlock(); | 
|---|
| 575 |  | 
|---|
| 576 | if (fence->ops->release) | 
|---|
| 577 | fence->ops->release(fence); | 
|---|
| 578 | else | 
|---|
| 579 | dma_fence_free(fence); | 
|---|
| 580 | } | 
|---|
| 581 | EXPORT_SYMBOL(dma_fence_release); | 
|---|
| 582 |  | 
|---|
| 583 | /** | 
|---|
| 584 | * dma_fence_free - default release function for &dma_fence. | 
|---|
| 585 | * @fence: fence to release | 
|---|
| 586 | * | 
|---|
| 587 | * This is the default implementation for &dma_fence_ops.release. It calls | 
|---|
| 588 | * kfree_rcu() on @fence. | 
|---|
| 589 | */ | 
|---|
| 590 | void dma_fence_free(struct dma_fence *fence) | 
|---|
| 591 | { | 
|---|
| 592 | kfree_rcu(fence, rcu); | 
|---|
| 593 | } | 
|---|
| 594 | EXPORT_SYMBOL(dma_fence_free); | 
|---|
| 595 |  | 
|---|
| 596 | static bool __dma_fence_enable_signaling(struct dma_fence *fence) | 
|---|
| 597 | { | 
|---|
| 598 | bool was_set; | 
|---|
| 599 |  | 
|---|
| 600 | lockdep_assert_held(fence->lock); | 
|---|
| 601 |  | 
|---|
| 602 | was_set = test_and_set_bit(nr: DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, | 
|---|
| 603 | addr: &fence->flags); | 
|---|
| 604 |  | 
|---|
| 605 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 
|---|
| 606 | return false; | 
|---|
| 607 |  | 
|---|
| 608 | if (!was_set && fence->ops->enable_signaling) { | 
|---|
| 609 | trace_dma_fence_enable_signal(fence); | 
|---|
| 610 |  | 
|---|
| 611 | if (!fence->ops->enable_signaling(fence)) { | 
|---|
| 612 | dma_fence_signal_locked(fence); | 
|---|
| 613 | return false; | 
|---|
| 614 | } | 
|---|
| 615 | } | 
|---|
| 616 |  | 
|---|
| 617 | return true; | 
|---|
| 618 | } | 
|---|
| 619 |  | 
|---|
| 620 | /** | 
|---|
| 621 | * dma_fence_enable_sw_signaling - enable signaling on fence | 
|---|
| 622 | * @fence: the fence to enable | 
|---|
| 623 | * | 
|---|
| 624 | * This will request for sw signaling to be enabled, to make the fence | 
|---|
| 625 | * complete as soon as possible. This calls &dma_fence_ops.enable_signaling | 
|---|
| 626 | * internally. | 
|---|
| 627 | */ | 
|---|
| 628 | void dma_fence_enable_sw_signaling(struct dma_fence *fence) | 
|---|
| 629 | { | 
|---|
| 630 | unsigned long flags; | 
|---|
| 631 |  | 
|---|
| 632 | spin_lock_irqsave(fence->lock, flags); | 
|---|
| 633 | __dma_fence_enable_signaling(fence); | 
|---|
| 634 | spin_unlock_irqrestore(lock: fence->lock, flags); | 
|---|
| 635 | } | 
|---|
| 636 | EXPORT_SYMBOL(dma_fence_enable_sw_signaling); | 
|---|
| 637 |  | 
|---|
| 638 | /** | 
|---|
| 639 | * dma_fence_add_callback - add a callback to be called when the fence | 
|---|
| 640 | * is signaled | 
|---|
| 641 | * @fence: the fence to wait on | 
|---|
| 642 | * @cb: the callback to register | 
|---|
| 643 | * @func: the function to call | 
|---|
| 644 | * | 
|---|
| 645 | * Add a software callback to the fence. The caller should keep a reference to | 
|---|
| 646 | * the fence. | 
|---|
| 647 | * | 
|---|
| 648 | * @cb will be initialized by dma_fence_add_callback(), no initialization | 
|---|
| 649 | * by the caller is required. Any number of callbacks can be registered | 
|---|
| 650 | * to a fence, but a callback can only be registered to one fence at a time. | 
|---|
| 651 | * | 
|---|
| 652 | * If fence is already signaled, this function will return -ENOENT (and | 
|---|
| 653 | * *not* call the callback). | 
|---|
| 654 | * | 
|---|
| 655 | * Note that the callback can be called from an atomic context or irq context. | 
|---|
| 656 | * | 
|---|
| 657 | * Returns 0 in case of success, -ENOENT if the fence is already signaled | 
|---|
| 658 | * and -EINVAL in case of error. | 
|---|
| 659 | */ | 
|---|
| 660 | int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, | 
|---|
| 661 | dma_fence_func_t func) | 
|---|
| 662 | { | 
|---|
| 663 | unsigned long flags; | 
|---|
| 664 | int ret = 0; | 
|---|
| 665 |  | 
|---|
| 666 | if (WARN_ON(!fence || !func)) | 
|---|
| 667 | return -EINVAL; | 
|---|
| 668 |  | 
|---|
| 669 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | 
|---|
| 670 | INIT_LIST_HEAD(list: &cb->node); | 
|---|
| 671 | return -ENOENT; | 
|---|
| 672 | } | 
|---|
| 673 |  | 
|---|
| 674 | spin_lock_irqsave(fence->lock, flags); | 
|---|
| 675 |  | 
|---|
| 676 | if (__dma_fence_enable_signaling(fence)) { | 
|---|
| 677 | cb->func = func; | 
|---|
| 678 | list_add_tail(new: &cb->node, head: &fence->cb_list); | 
|---|
| 679 | } else { | 
|---|
| 680 | INIT_LIST_HEAD(list: &cb->node); | 
|---|
| 681 | ret = -ENOENT; | 
|---|
| 682 | } | 
|---|
| 683 |  | 
|---|
| 684 | spin_unlock_irqrestore(lock: fence->lock, flags); | 
|---|
| 685 |  | 
|---|
| 686 | return ret; | 
|---|
| 687 | } | 
|---|
| 688 | EXPORT_SYMBOL(dma_fence_add_callback); | 
|---|
| 689 |  | 
|---|
| 690 | /** | 
|---|
| 691 | * dma_fence_get_status - returns the status upon completion | 
|---|
| 692 | * @fence: the dma_fence to query | 
|---|
| 693 | * | 
|---|
| 694 | * This wraps dma_fence_get_status_locked() to return the error status | 
|---|
| 695 | * condition on a signaled fence. See dma_fence_get_status_locked() for more | 
|---|
| 696 | * details. | 
|---|
| 697 | * | 
|---|
| 698 | * Returns 0 if the fence has not yet been signaled, 1 if the fence has | 
|---|
| 699 | * been signaled without an error condition, or a negative error code | 
|---|
| 700 | * if the fence has been completed in err. | 
|---|
| 701 | */ | 
|---|
| 702 | int dma_fence_get_status(struct dma_fence *fence) | 
|---|
| 703 | { | 
|---|
| 704 | unsigned long flags; | 
|---|
| 705 | int status; | 
|---|
| 706 |  | 
|---|
| 707 | spin_lock_irqsave(fence->lock, flags); | 
|---|
| 708 | status = dma_fence_get_status_locked(fence); | 
|---|
| 709 | spin_unlock_irqrestore(lock: fence->lock, flags); | 
|---|
| 710 |  | 
|---|
| 711 | return status; | 
|---|
| 712 | } | 
|---|
| 713 | EXPORT_SYMBOL(dma_fence_get_status); | 
|---|
| 714 |  | 
|---|
| 715 | /** | 
|---|
| 716 | * dma_fence_remove_callback - remove a callback from the signaling list | 
|---|
| 717 | * @fence: the fence to wait on | 
|---|
| 718 | * @cb: the callback to remove | 
|---|
| 719 | * | 
|---|
| 720 | * Remove a previously queued callback from the fence. This function returns | 
|---|
| 721 | * true if the callback is successfully removed, or false if the fence has | 
|---|
| 722 | * already been signaled. | 
|---|
| 723 | * | 
|---|
| 724 | * *WARNING*: | 
|---|
| 725 | * Cancelling a callback should only be done if you really know what you're | 
|---|
| 726 | * doing, since deadlocks and race conditions could occur all too easily. For | 
|---|
| 727 | * this reason, it should only ever be done on hardware lockup recovery, | 
|---|
| 728 | * with a reference held to the fence. | 
|---|
| 729 | * | 
|---|
| 730 | * Behaviour is undefined if @cb has not been added to @fence using | 
|---|
| 731 | * dma_fence_add_callback() beforehand. | 
|---|
| 732 | */ | 
|---|
| 733 | bool | 
|---|
| 734 | dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) | 
|---|
| 735 | { | 
|---|
| 736 | unsigned long flags; | 
|---|
| 737 | bool ret; | 
|---|
| 738 |  | 
|---|
| 739 | spin_lock_irqsave(fence->lock, flags); | 
|---|
| 740 |  | 
|---|
| 741 | ret = !list_empty(head: &cb->node); | 
|---|
| 742 | if (ret) | 
|---|
| 743 | list_del_init(entry: &cb->node); | 
|---|
| 744 |  | 
|---|
| 745 | spin_unlock_irqrestore(lock: fence->lock, flags); | 
|---|
| 746 |  | 
|---|
| 747 | return ret; | 
|---|
| 748 | } | 
|---|
| 749 | EXPORT_SYMBOL(dma_fence_remove_callback); | 
|---|
| 750 |  | 
|---|
| 751 | struct default_wait_cb { | 
|---|
| 752 | struct dma_fence_cb base; | 
|---|
| 753 | struct task_struct *task; | 
|---|
| 754 | }; | 
|---|
| 755 |  | 
|---|
| 756 | static void | 
|---|
| 757 | dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) | 
|---|
| 758 | { | 
|---|
| 759 | struct default_wait_cb *wait = | 
|---|
| 760 | container_of(cb, struct default_wait_cb, base); | 
|---|
| 761 |  | 
|---|
| 762 | wake_up_state(tsk: wait->task, TASK_NORMAL); | 
|---|
| 763 | } | 
|---|
| 764 |  | 
|---|
| 765 | /** | 
|---|
| 766 | * dma_fence_default_wait - default sleep until the fence gets signaled | 
|---|
| 767 | * or until timeout elapses | 
|---|
| 768 | * @fence: the fence to wait on | 
|---|
| 769 | * @intr: if true, do an interruptible wait | 
|---|
| 770 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT | 
|---|
| 771 | * | 
|---|
| 772 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the | 
|---|
| 773 | * remaining timeout in jiffies on success. If timeout is zero the value one is | 
|---|
| 774 | * returned if the fence is already signaled for consistency with other | 
|---|
| 775 | * functions taking a jiffies timeout. | 
|---|
| 776 | */ | 
|---|
| 777 | signed long | 
|---|
| 778 | dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) | 
|---|
| 779 | { | 
|---|
| 780 | struct default_wait_cb cb; | 
|---|
| 781 | unsigned long flags; | 
|---|
| 782 | signed long ret = timeout ? timeout : 1; | 
|---|
| 783 |  | 
|---|
| 784 | spin_lock_irqsave(fence->lock, flags); | 
|---|
| 785 |  | 
|---|
| 786 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 
|---|
| 787 | goto out; | 
|---|
| 788 |  | 
|---|
| 789 | if (intr && signal_pending(current)) { | 
|---|
| 790 | ret = -ERESTARTSYS; | 
|---|
| 791 | goto out; | 
|---|
| 792 | } | 
|---|
| 793 |  | 
|---|
| 794 | if (!timeout) { | 
|---|
| 795 | ret = 0; | 
|---|
| 796 | goto out; | 
|---|
| 797 | } | 
|---|
| 798 |  | 
|---|
| 799 | cb.base.func = dma_fence_default_wait_cb; | 
|---|
| 800 | cb.task = current; | 
|---|
| 801 | list_add(new: &cb.base.node, head: &fence->cb_list); | 
|---|
| 802 |  | 
|---|
| 803 | while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { | 
|---|
| 804 | if (intr) | 
|---|
| 805 | __set_current_state(TASK_INTERRUPTIBLE); | 
|---|
| 806 | else | 
|---|
| 807 | __set_current_state(TASK_UNINTERRUPTIBLE); | 
|---|
| 808 | spin_unlock_irqrestore(lock: fence->lock, flags); | 
|---|
| 809 |  | 
|---|
| 810 | ret = schedule_timeout(timeout: ret); | 
|---|
| 811 |  | 
|---|
| 812 | spin_lock_irqsave(fence->lock, flags); | 
|---|
| 813 | if (ret > 0 && intr && signal_pending(current)) | 
|---|
| 814 | ret = -ERESTARTSYS; | 
|---|
| 815 | } | 
|---|
| 816 |  | 
|---|
| 817 | if (!list_empty(head: &cb.base.node)) | 
|---|
| 818 | list_del(entry: &cb.base.node); | 
|---|
| 819 | __set_current_state(TASK_RUNNING); | 
|---|
| 820 |  | 
|---|
| 821 | out: | 
|---|
| 822 | spin_unlock_irqrestore(lock: fence->lock, flags); | 
|---|
| 823 | return ret; | 
|---|
| 824 | } | 
|---|
| 825 | EXPORT_SYMBOL(dma_fence_default_wait); | 
|---|
| 826 |  | 
|---|
| 827 | static bool | 
|---|
| 828 | dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, | 
|---|
| 829 | uint32_t *idx) | 
|---|
| 830 | { | 
|---|
| 831 | int i; | 
|---|
| 832 |  | 
|---|
| 833 | for (i = 0; i < count; ++i) { | 
|---|
| 834 | struct dma_fence *fence = fences[i]; | 
|---|
| 835 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { | 
|---|
| 836 | if (idx) | 
|---|
| 837 | *idx = i; | 
|---|
| 838 | return true; | 
|---|
| 839 | } | 
|---|
| 840 | } | 
|---|
| 841 | return false; | 
|---|
| 842 | } | 
|---|
| 843 |  | 
|---|
| 844 | /** | 
|---|
| 845 | * dma_fence_wait_any_timeout - sleep until any fence gets signaled | 
|---|
| 846 | * or until timeout elapses | 
|---|
| 847 | * @fences: array of fences to wait on | 
|---|
| 848 | * @count: number of fences to wait on | 
|---|
| 849 | * @intr: if true, do an interruptible wait | 
|---|
| 850 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT | 
|---|
| 851 | * @idx: used to store the first signaled fence index, meaningful only on | 
|---|
| 852 | *	positive return | 
|---|
| 853 | * | 
|---|
| 854 | * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if | 
|---|
| 855 | * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies | 
|---|
| 856 | * on success. | 
|---|
| 857 | * | 
|---|
| 858 | * Synchronous waits for the first fence in the array to be signaled. The | 
|---|
| 859 | * caller needs to hold a reference to all fences in the array, otherwise a | 
|---|
| 860 | * fence might be freed before return, resulting in undefined behavior. | 
|---|
| 861 | * | 
|---|
| 862 | * See also dma_fence_wait() and dma_fence_wait_timeout(). | 
|---|
| 863 | */ | 
|---|
| 864 | signed long | 
|---|
| 865 | dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, | 
|---|
| 866 | bool intr, signed long timeout, uint32_t *idx) | 
|---|
| 867 | { | 
|---|
| 868 | struct default_wait_cb *cb; | 
|---|
| 869 | signed long ret = timeout; | 
|---|
| 870 | unsigned i; | 
|---|
| 871 |  | 
|---|
| 872 | if (WARN_ON(!fences || !count || timeout < 0)) | 
|---|
| 873 | return -EINVAL; | 
|---|
| 874 |  | 
|---|
| 875 | if (timeout == 0) { | 
|---|
| 876 | for (i = 0; i < count; ++i) | 
|---|
| 877 | if (dma_fence_is_signaled(fence: fences[i])) { | 
|---|
| 878 | if (idx) | 
|---|
| 879 | *idx = i; | 
|---|
| 880 | return 1; | 
|---|
| 881 | } | 
|---|
| 882 |  | 
|---|
| 883 | return 0; | 
|---|
| 884 | } | 
|---|
| 885 |  | 
|---|
| 886 | cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL); | 
|---|
| 887 | if (cb == NULL) { | 
|---|
| 888 | ret = -ENOMEM; | 
|---|
| 889 | goto err_free_cb; | 
|---|
| 890 | } | 
|---|
| 891 |  | 
|---|
| 892 | for (i = 0; i < count; ++i) { | 
|---|
| 893 | struct dma_fence *fence = fences[i]; | 
|---|
| 894 |  | 
|---|
| 895 | cb[i].task = current; | 
|---|
| 896 | if (dma_fence_add_callback(fence, &cb[i].base, | 
|---|
| 897 | dma_fence_default_wait_cb)) { | 
|---|
| 898 | /* This fence is already signaled */ | 
|---|
| 899 | if (idx) | 
|---|
| 900 | *idx = i; | 
|---|
| 901 | goto fence_rm_cb; | 
|---|
| 902 | } | 
|---|
| 903 | } | 
|---|
| 904 |  | 
|---|
| 905 | while (ret > 0) { | 
|---|
| 906 | if (intr) | 
|---|
| 907 | set_current_state(TASK_INTERRUPTIBLE); | 
|---|
| 908 | else | 
|---|
| 909 | set_current_state(TASK_UNINTERRUPTIBLE); | 
|---|
| 910 |  | 
|---|
| 911 | if (dma_fence_test_signaled_any(fences, count, idx)) | 
|---|
| 912 | break; | 
|---|
| 913 |  | 
|---|
| 914 | ret = schedule_timeout(timeout: ret); | 
|---|
| 915 |  | 
|---|
| 916 | if (ret > 0 && intr && signal_pending(current)) | 
|---|
| 917 | ret = -ERESTARTSYS; | 
|---|
| 918 | } | 
|---|
| 919 |  | 
|---|
| 920 | __set_current_state(TASK_RUNNING); | 
|---|
| 921 |  | 
|---|
| 922 | fence_rm_cb: | 
|---|
| 923 | while (i-- > 0) | 
|---|
| 924 | dma_fence_remove_callback(fences[i], &cb[i].base); | 
|---|
| 925 |  | 
|---|
| 926 | err_free_cb: | 
|---|
| 927 | kfree(objp: cb); | 
|---|
| 928 |  | 
|---|
| 929 | return ret; | 
|---|
| 930 | } | 
|---|
| 931 | EXPORT_SYMBOL(dma_fence_wait_any_timeout); | 
|---|
| 932 |  | 
|---|
| 933 | /** | 
|---|
| 934 | * DOC: deadline hints | 
|---|
| 935 | * | 
|---|
| 936 | * In an ideal world, it would be possible to pipeline a workload sufficiently | 
|---|
| 937 | * that a utilization based device frequency governor could arrive at a minimum | 
|---|
| 938 | * frequency that meets the requirements of the use-case, in order to minimize | 
|---|
| 939 | * power consumption.  But in the real world there are many workloads which | 
|---|
| 940 | * defy this ideal.  For example, but not limited to: | 
|---|
| 941 | * | 
|---|
| 942 | * * Workloads that ping-pong between device and CPU, with alternating periods | 
|---|
| 943 | *   of CPU waiting for device, and device waiting on CPU.  This can result in | 
|---|
| 944 | *   devfreq and cpufreq seeing idle time in their respective domains and in | 
|---|
| 945 | *   result reduce frequency. | 
|---|
| 946 | * | 
|---|
| 947 | * * Workloads that interact with a periodic time based deadline, such as double | 
|---|
| 948 | *   buffered GPU rendering vs vblank sync'd page flipping.  In this scenario, | 
|---|
| 949 | *   missing a vblank deadline results in an *increase* in idle time on the GPU | 
|---|
| 950 | *   (since it has to wait an additional vblank period), sending a signal to | 
|---|
| 951 | *   the GPU's devfreq to reduce frequency, when in fact the opposite is what is | 
|---|
| 952 | *   needed. | 
|---|
| 953 | * | 
|---|
| 954 | * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline | 
|---|
| 955 | * (or indirectly via userspace facing ioctls like &sync_set_deadline). | 
|---|
| 956 | * The deadline hint provides a way for the waiting driver, or userspace, to | 
|---|
| 957 | * convey an appropriate sense of urgency to the signaling driver. | 
|---|
| 958 | * | 
|---|
| 959 | * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace | 
|---|
| 960 | * facing APIs).  The time could either be some point in the future (such as | 
|---|
| 961 | * the vblank based deadline for page-flipping, or the start of a compositor's | 
|---|
| 962 | * composition cycle), or the current time to indicate an immediate deadline | 
|---|
| 963 | * hint (Ie. forward progress cannot be made until this fence is signaled). | 
|---|
| 964 | * | 
|---|
| 965 | * Multiple deadlines may be set on a given fence, even in parallel.  See the | 
|---|
| 966 | * documentation for &dma_fence_ops.set_deadline. | 
|---|
| 967 | * | 
|---|
| 968 | * The deadline hint is just that, a hint.  The driver that created the fence | 
|---|
| 969 | * may react by increasing frequency, making different scheduling choices, etc. | 
|---|
| 970 | * Or doing nothing at all. | 
|---|
| 971 | */ | 
|---|
| 972 |  | 
|---|
| 973 | /** | 
|---|
| 974 | * dma_fence_set_deadline - set desired fence-wait deadline hint | 
|---|
| 975 | * @fence:    the fence that is to be waited on | 
|---|
| 976 | * @deadline: the time by which the waiter hopes for the fence to be | 
|---|
| 977 | *            signaled | 
|---|
| 978 | * | 
|---|
| 979 | * Give the fence signaler a hint about an upcoming deadline, such as | 
|---|
| 980 | * vblank, by which point the waiter would prefer the fence to be | 
|---|
| 981 | * signaled by.  This is intended to give feedback to the fence signaler | 
|---|
| 982 | * to aid in power management decisions, such as boosting GPU frequency | 
|---|
| 983 | * if a periodic vblank deadline is approaching but the fence is not | 
|---|
| 984 | * yet signaled.. | 
|---|
| 985 | */ | 
|---|
| 986 | void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) | 
|---|
| 987 | { | 
|---|
| 988 | if (fence->ops->set_deadline && !dma_fence_is_signaled(fence)) | 
|---|
| 989 | fence->ops->set_deadline(fence, deadline); | 
|---|
| 990 | } | 
|---|
| 991 | EXPORT_SYMBOL(dma_fence_set_deadline); | 
|---|
| 992 |  | 
|---|
| 993 | /** | 
|---|
| 994 | * dma_fence_describe - Dump fence description into seq_file | 
|---|
| 995 | * @fence: the fence to describe | 
|---|
| 996 | * @seq: the seq_file to put the textual description into | 
|---|
| 997 | * | 
|---|
| 998 | * Dump a textual description of the fence and it's state into the seq_file. | 
|---|
| 999 | */ | 
|---|
| 1000 | void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) | 
|---|
| 1001 | { | 
|---|
| 1002 | const char __rcu *timeline; | 
|---|
| 1003 | const char __rcu *driver; | 
|---|
| 1004 |  | 
|---|
| 1005 | rcu_read_lock(); | 
|---|
| 1006 |  | 
|---|
| 1007 | timeline = dma_fence_timeline_name(fence); | 
|---|
| 1008 | driver = dma_fence_driver_name(fence); | 
|---|
| 1009 |  | 
|---|
| 1010 | seq_printf(m: seq, fmt: "%s %s seq %llu %ssignalled\n", | 
|---|
| 1011 | rcu_dereference(driver), | 
|---|
| 1012 | rcu_dereference(timeline), | 
|---|
| 1013 | fence->seqno, | 
|---|
| 1014 | dma_fence_is_signaled(fence) ? "": "un"); | 
|---|
| 1015 |  | 
|---|
| 1016 | rcu_read_unlock(); | 
|---|
| 1017 | } | 
|---|
| 1018 | EXPORT_SYMBOL(dma_fence_describe); | 
|---|
| 1019 |  | 
|---|
| 1020 | static void | 
|---|
| 1021 | __dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, | 
|---|
| 1022 | spinlock_t *lock, u64 context, u64 seqno, unsigned long flags) | 
|---|
| 1023 | { | 
|---|
| 1024 | BUG_ON(!lock); | 
|---|
| 1025 | BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); | 
|---|
| 1026 |  | 
|---|
| 1027 | kref_init(kref: &fence->refcount); | 
|---|
| 1028 | fence->ops = ops; | 
|---|
| 1029 | INIT_LIST_HEAD(list: &fence->cb_list); | 
|---|
| 1030 | fence->lock = lock; | 
|---|
| 1031 | fence->context = context; | 
|---|
| 1032 | fence->seqno = seqno; | 
|---|
| 1033 | fence->flags = flags; | 
|---|
| 1034 | fence->error = 0; | 
|---|
| 1035 |  | 
|---|
| 1036 | trace_dma_fence_init(fence); | 
|---|
| 1037 | } | 
|---|
| 1038 |  | 
|---|
| 1039 | /** | 
|---|
| 1040 | * dma_fence_init - Initialize a custom fence. | 
|---|
| 1041 | * @fence: the fence to initialize | 
|---|
| 1042 | * @ops: the dma_fence_ops for operations on this fence | 
|---|
| 1043 | * @lock: the irqsafe spinlock to use for locking this fence | 
|---|
| 1044 | * @context: the execution context this fence is run on | 
|---|
| 1045 | * @seqno: a linear increasing sequence number for this context | 
|---|
| 1046 | * | 
|---|
| 1047 | * Initializes an allocated fence, the caller doesn't have to keep its | 
|---|
| 1048 | * refcount after committing with this fence, but it will need to hold a | 
|---|
| 1049 | * refcount again if &dma_fence_ops.enable_signaling gets called. | 
|---|
| 1050 | * | 
|---|
| 1051 | * context and seqno are used for easy comparison between fences, allowing | 
|---|
| 1052 | * to check which fence is later by simply using dma_fence_later(). | 
|---|
| 1053 | */ | 
|---|
| 1054 | void | 
|---|
| 1055 | dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, | 
|---|
| 1056 | spinlock_t *lock, u64 context, u64 seqno) | 
|---|
| 1057 | { | 
|---|
| 1058 | __dma_fence_init(fence, ops, lock, context, seqno, flags: 0UL); | 
|---|
| 1059 | } | 
|---|
| 1060 | EXPORT_SYMBOL(dma_fence_init); | 
|---|
| 1061 |  | 
|---|
| 1062 | /** | 
|---|
| 1063 | * dma_fence_init64 - Initialize a custom fence with 64-bit seqno support. | 
|---|
| 1064 | * @fence: the fence to initialize | 
|---|
| 1065 | * @ops: the dma_fence_ops for operations on this fence | 
|---|
| 1066 | * @lock: the irqsafe spinlock to use for locking this fence | 
|---|
| 1067 | * @context: the execution context this fence is run on | 
|---|
| 1068 | * @seqno: a linear increasing sequence number for this context | 
|---|
| 1069 | * | 
|---|
| 1070 | * Initializes an allocated fence, the caller doesn't have to keep its | 
|---|
| 1071 | * refcount after committing with this fence, but it will need to hold a | 
|---|
| 1072 | * refcount again if &dma_fence_ops.enable_signaling gets called. | 
|---|
| 1073 | * | 
|---|
| 1074 | * Context and seqno are used for easy comparison between fences, allowing | 
|---|
| 1075 | * to check which fence is later by simply using dma_fence_later(). | 
|---|
| 1076 | */ | 
|---|
| 1077 | void | 
|---|
| 1078 | dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops, | 
|---|
| 1079 | spinlock_t *lock, u64 context, u64 seqno) | 
|---|
| 1080 | { | 
|---|
| 1081 | __dma_fence_init(fence, ops, lock, context, seqno, | 
|---|
| 1082 | BIT(DMA_FENCE_FLAG_SEQNO64_BIT)); | 
|---|
| 1083 | } | 
|---|
| 1084 | EXPORT_SYMBOL(dma_fence_init64); | 
|---|
| 1085 |  | 
|---|
| 1086 | /** | 
|---|
| 1087 | * dma_fence_driver_name - Access the driver name | 
|---|
| 1088 | * @fence: the fence to query | 
|---|
| 1089 | * | 
|---|
| 1090 | * Returns a driver name backing the dma-fence implementation. | 
|---|
| 1091 | * | 
|---|
| 1092 | * IMPORTANT CONSIDERATION: | 
|---|
| 1093 | * Dma-fence contract stipulates that access to driver provided data (data not | 
|---|
| 1094 | * directly embedded into the object itself), such as the &dma_fence.lock and | 
|---|
| 1095 | * memory potentially accessed by the &dma_fence.ops functions, is forbidden | 
|---|
| 1096 | * after the fence has been signalled. Drivers are allowed to free that data, | 
|---|
| 1097 | * and some do. | 
|---|
| 1098 | * | 
|---|
| 1099 | * To allow safe access drivers are mandated to guarantee a RCU grace period | 
|---|
| 1100 | * between signalling the fence and freeing said data. | 
|---|
| 1101 | * | 
|---|
| 1102 | * As such access to the driver name is only valid inside a RCU locked section. | 
|---|
| 1103 | * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded | 
|---|
| 1104 | * by the &rcu_read_lock and &rcu_read_unlock pair. | 
|---|
| 1105 | */ | 
|---|
| 1106 | const char __rcu *dma_fence_driver_name(struct dma_fence *fence) | 
|---|
| 1107 | { | 
|---|
| 1108 | RCU_LOCKDEP_WARN(!rcu_read_lock_held(), | 
|---|
| 1109 | "RCU protection is required for safe access to returned string"); | 
|---|
| 1110 |  | 
|---|
| 1111 | if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 
|---|
| 1112 | return fence->ops->get_driver_name(fence); | 
|---|
| 1113 | else | 
|---|
| 1114 | return "detached-driver"; | 
|---|
| 1115 | } | 
|---|
| 1116 | EXPORT_SYMBOL(dma_fence_driver_name); | 
|---|
| 1117 |  | 
|---|
| 1118 | /** | 
|---|
| 1119 | * dma_fence_timeline_name - Access the timeline name | 
|---|
| 1120 | * @fence: the fence to query | 
|---|
| 1121 | * | 
|---|
| 1122 | * Returns a timeline name provided by the dma-fence implementation. | 
|---|
| 1123 | * | 
|---|
| 1124 | * IMPORTANT CONSIDERATION: | 
|---|
| 1125 | * Dma-fence contract stipulates that access to driver provided data (data not | 
|---|
| 1126 | * directly embedded into the object itself), such as the &dma_fence.lock and | 
|---|
| 1127 | * memory potentially accessed by the &dma_fence.ops functions, is forbidden | 
|---|
| 1128 | * after the fence has been signalled. Drivers are allowed to free that data, | 
|---|
| 1129 | * and some do. | 
|---|
| 1130 | * | 
|---|
| 1131 | * To allow safe access drivers are mandated to guarantee a RCU grace period | 
|---|
| 1132 | * between signalling the fence and freeing said data. | 
|---|
| 1133 | * | 
|---|
| 1134 | * As such access to the driver name is only valid inside a RCU locked section. | 
|---|
| 1135 | * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded | 
|---|
| 1136 | * by the &rcu_read_lock and &rcu_read_unlock pair. | 
|---|
| 1137 | */ | 
|---|
| 1138 | const char __rcu *dma_fence_timeline_name(struct dma_fence *fence) | 
|---|
| 1139 | { | 
|---|
| 1140 | RCU_LOCKDEP_WARN(!rcu_read_lock_held(), | 
|---|
| 1141 | "RCU protection is required for safe access to returned string"); | 
|---|
| 1142 |  | 
|---|
| 1143 | if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | 
|---|
| 1144 | return fence->ops->get_driver_name(fence); | 
|---|
| 1145 | else | 
|---|
| 1146 | return "signaled-timeline"; | 
|---|
| 1147 | } | 
|---|
| 1148 | EXPORT_SYMBOL(dma_fence_timeline_name); | 
|---|
| 1149 |  | 
|---|