| 1 | // SPDX-License-Identifier: MIT | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright © 2008,2010 Intel Corporation | 
|---|
| 4 | */ | 
|---|
| 5 |  | 
|---|
| 6 | #include <linux/dma-resv.h> | 
|---|
| 7 | #include <linux/highmem.h> | 
|---|
| 8 | #include <linux/sync_file.h> | 
|---|
| 9 | #include <linux/uaccess.h> | 
|---|
| 10 |  | 
|---|
| 11 | #include <drm/drm_auth.h> | 
|---|
| 12 | #include <drm/drm_syncobj.h> | 
|---|
| 13 |  | 
|---|
| 14 | #include "gem/i915_gem_ioctls.h" | 
|---|
| 15 | #include "gt/intel_context.h" | 
|---|
| 16 | #include "gt/intel_gpu_commands.h" | 
|---|
| 17 | #include "gt/intel_gt.h" | 
|---|
| 18 | #include "gt/intel_gt_buffer_pool.h" | 
|---|
| 19 | #include "gt/intel_gt_pm.h" | 
|---|
| 20 | #include "gt/intel_ring.h" | 
|---|
| 21 |  | 
|---|
| 22 | #include "pxp/intel_pxp.h" | 
|---|
| 23 |  | 
|---|
| 24 | #include "i915_cmd_parser.h" | 
|---|
| 25 | #include "i915_drv.h" | 
|---|
| 26 | #include "i915_file_private.h" | 
|---|
| 27 | #include "i915_gem_clflush.h" | 
|---|
| 28 | #include "i915_gem_context.h" | 
|---|
| 29 | #include "i915_gem_evict.h" | 
|---|
| 30 | #include "i915_gem_ioctls.h" | 
|---|
| 31 | #include "i915_reg.h" | 
|---|
| 32 | #include "i915_trace.h" | 
|---|
| 33 | #include "i915_user_extensions.h" | 
|---|
| 34 |  | 
|---|
| 35 | struct eb_vma { | 
|---|
| 36 | struct i915_vma *vma; | 
|---|
| 37 | unsigned int flags; | 
|---|
| 38 |  | 
|---|
| 39 | /** This vma's place in the execbuf reservation list */ | 
|---|
| 40 | struct drm_i915_gem_exec_object2 *exec; | 
|---|
| 41 | struct list_head bind_link; | 
|---|
| 42 | struct list_head reloc_link; | 
|---|
| 43 |  | 
|---|
| 44 | struct hlist_node node; | 
|---|
| 45 | u32 handle; | 
|---|
| 46 | }; | 
|---|
| 47 |  | 
|---|
| 48 | enum { | 
|---|
| 49 | FORCE_CPU_RELOC = 1, | 
|---|
| 50 | FORCE_GTT_RELOC, | 
|---|
| 51 | FORCE_GPU_RELOC, | 
|---|
| 52 | #define DBG_FORCE_RELOC 0 /* choose one of the above! */ | 
|---|
| 53 | }; | 
|---|
| 54 |  | 
|---|
| 55 | /* __EXEC_OBJECT_ flags > BIT(29) defined in i915_vma.h */ | 
|---|
| 56 | #define __EXEC_OBJECT_HAS_PIN		BIT(29) | 
|---|
| 57 | #define __EXEC_OBJECT_HAS_FENCE		BIT(28) | 
|---|
| 58 | #define __EXEC_OBJECT_USERPTR_INIT	BIT(27) | 
|---|
| 59 | #define __EXEC_OBJECT_NEEDS_MAP		BIT(26) | 
|---|
| 60 | #define __EXEC_OBJECT_NEEDS_BIAS	BIT(25) | 
|---|
| 61 | #define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 25) /* all of the above + */ | 
|---|
| 62 | #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) | 
|---|
| 63 |  | 
|---|
| 64 | #define __EXEC_HAS_RELOC	BIT(31) | 
|---|
| 65 | #define __EXEC_ENGINE_PINNED	BIT(30) | 
|---|
| 66 | #define __EXEC_USERPTR_USED	BIT(29) | 
|---|
| 67 | #define __EXEC_INTERNAL_FLAGS	(~0u << 29) | 
|---|
| 68 | #define UPDATE			PIN_OFFSET_FIXED | 
|---|
| 69 |  | 
|---|
| 70 | #define BATCH_OFFSET_BIAS (256*1024) | 
|---|
| 71 |  | 
|---|
| 72 | #define __I915_EXEC_ILLEGAL_FLAGS \ | 
|---|
| 73 | (__I915_EXEC_UNKNOWN_FLAGS | \ | 
|---|
| 74 | I915_EXEC_CONSTANTS_MASK  | \ | 
|---|
| 75 | I915_EXEC_RESOURCE_STREAMER) | 
|---|
| 76 |  | 
|---|
| 77 | /* Catch emission of unexpected errors for CI! */ | 
|---|
| 78 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) | 
|---|
| 79 | #undef EINVAL | 
|---|
| 80 | #define EINVAL ({ \ | 
|---|
| 81 | DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ | 
|---|
| 82 | 22; \ | 
|---|
| 83 | }) | 
|---|
| 84 | #endif | 
|---|
| 85 |  | 
|---|
| 86 | /** | 
|---|
| 87 | * DOC: User command execution | 
|---|
| 88 | * | 
|---|
| 89 | * Userspace submits commands to be executed on the GPU as an instruction | 
|---|
| 90 | * stream within a GEM object we call a batchbuffer. This instructions may | 
|---|
| 91 | * refer to other GEM objects containing auxiliary state such as kernels, | 
|---|
| 92 | * samplers, render targets and even secondary batchbuffers. Userspace does | 
|---|
| 93 | * not know where in the GPU memory these objects reside and so before the | 
|---|
| 94 | * batchbuffer is passed to the GPU for execution, those addresses in the | 
|---|
| 95 | * batchbuffer and auxiliary objects are updated. This is known as relocation, | 
|---|
| 96 | * or patching. To try and avoid having to relocate each object on the next | 
|---|
| 97 | * execution, userspace is told the location of those objects in this pass, | 
|---|
| 98 | * but this remains just a hint as the kernel may choose a new location for | 
|---|
| 99 | * any object in the future. | 
|---|
| 100 | * | 
|---|
| 101 | * At the level of talking to the hardware, submitting a batchbuffer for the | 
|---|
| 102 | * GPU to execute is to add content to a buffer from which the HW | 
|---|
| 103 | * command streamer is reading. | 
|---|
| 104 | * | 
|---|
| 105 | * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e. | 
|---|
| 106 | *    Execlists, this command is not placed on the same buffer as the | 
|---|
| 107 | *    remaining items. | 
|---|
| 108 | * | 
|---|
| 109 | * 2. Add a command to invalidate caches to the buffer. | 
|---|
| 110 | * | 
|---|
| 111 | * 3. Add a batchbuffer start command to the buffer; the start command is | 
|---|
| 112 | *    essentially a token together with the GPU address of the batchbuffer | 
|---|
| 113 | *    to be executed. | 
|---|
| 114 | * | 
|---|
| 115 | * 4. Add a pipeline flush to the buffer. | 
|---|
| 116 | * | 
|---|
| 117 | * 5. Add a memory write command to the buffer to record when the GPU | 
|---|
| 118 | *    is done executing the batchbuffer. The memory write writes the | 
|---|
| 119 | *    global sequence number of the request, ``i915_request::global_seqno``; | 
|---|
| 120 | *    the i915 driver uses the current value in the register to determine | 
|---|
| 121 | *    if the GPU has completed the batchbuffer. | 
|---|
| 122 | * | 
|---|
| 123 | * 6. Add a user interrupt command to the buffer. This command instructs | 
|---|
| 124 | *    the GPU to issue an interrupt when the command, pipeline flush and | 
|---|
| 125 | *    memory write are completed. | 
|---|
| 126 | * | 
|---|
| 127 | * 7. Inform the hardware of the additional commands added to the buffer | 
|---|
| 128 | *    (by updating the tail pointer). | 
|---|
| 129 | * | 
|---|
| 130 | * Processing an execbuf ioctl is conceptually split up into a few phases. | 
|---|
| 131 | * | 
|---|
| 132 | * 1. Validation - Ensure all the pointers, handles and flags are valid. | 
|---|
| 133 | * 2. Reservation - Assign GPU address space for every object | 
|---|
| 134 | * 3. Relocation - Update any addresses to point to the final locations | 
|---|
| 135 | * 4. Serialisation - Order the request with respect to its dependencies | 
|---|
| 136 | * 5. Construction - Construct a request to execute the batchbuffer | 
|---|
| 137 | * 6. Submission (at some point in the future execution) | 
|---|
| 138 | * | 
|---|
| 139 | * Reserving resources for the execbuf is the most complicated phase. We | 
|---|
| 140 | * neither want to have to migrate the object in the address space, nor do | 
|---|
| 141 | * we want to have to update any relocations pointing to this object. Ideally, | 
|---|
| 142 | * we want to leave the object where it is and for all the existing relocations | 
|---|
| 143 | * to match. If the object is given a new address, or if userspace thinks the | 
|---|
| 144 | * object is elsewhere, we have to parse all the relocation entries and update | 
|---|
| 145 | * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that | 
|---|
| 146 | * all the target addresses in all of its objects match the value in the | 
|---|
| 147 | * relocation entries and that they all match the presumed offsets given by the | 
|---|
| 148 | * list of execbuffer objects. Using this knowledge, we know that if we haven't | 
|---|
| 149 | * moved any buffers, all the relocation entries are valid and we can skip | 
|---|
| 150 | * the update. (If userspace is wrong, the likely outcome is an impromptu GPU | 
|---|
| 151 | * hang.) The requirement for using I915_EXEC_NO_RELOC are: | 
|---|
| 152 | * | 
|---|
| 153 | *      The addresses written in the objects must match the corresponding | 
|---|
| 154 | *      reloc.presumed_offset which in turn must match the corresponding | 
|---|
| 155 | *      execobject.offset. | 
|---|
| 156 | * | 
|---|
| 157 | *      Any render targets written to in the batch must be flagged with | 
|---|
| 158 | *      EXEC_OBJECT_WRITE. | 
|---|
| 159 | * | 
|---|
| 160 | *      To avoid stalling, execobject.offset should match the current | 
|---|
| 161 | *      address of that object within the active context. | 
|---|
| 162 | * | 
|---|
| 163 | * The reservation is done is multiple phases. First we try and keep any | 
|---|
| 164 | * object already bound in its current location - so as long as meets the | 
|---|
| 165 | * constraints imposed by the new execbuffer. Any object left unbound after the | 
|---|
| 166 | * first pass is then fitted into any available idle space. If an object does | 
|---|
| 167 | * not fit, all objects are removed from the reservation and the process rerun | 
|---|
| 168 | * after sorting the objects into a priority order (more difficult to fit | 
|---|
| 169 | * objects are tried first). Failing that, the entire VM is cleared and we try | 
|---|
| 170 | * to fit the execbuf once last time before concluding that it simply will not | 
|---|
| 171 | * fit. | 
|---|
| 172 | * | 
|---|
| 173 | * A small complication to all of this is that we allow userspace not only to | 
|---|
| 174 | * specify an alignment and a size for the object in the address space, but | 
|---|
| 175 | * we also allow userspace to specify the exact offset. This objects are | 
|---|
| 176 | * simpler to place (the location is known a priori) all we have to do is make | 
|---|
| 177 | * sure the space is available. | 
|---|
| 178 | * | 
|---|
| 179 | * Once all the objects are in place, patching up the buried pointers to point | 
|---|
| 180 | * to the final locations is a fairly simple job of walking over the relocation | 
|---|
| 181 | * entry arrays, looking up the right address and rewriting the value into | 
|---|
| 182 | * the object. Simple! ... The relocation entries are stored in user memory | 
|---|
| 183 | * and so to access them we have to copy them into a local buffer. That copy | 
|---|
| 184 | * has to avoid taking any pagefaults as they may lead back to a GEM object | 
|---|
| 185 | * requiring the vm->mutex (i.e. recursive deadlock). So once again we split | 
|---|
| 186 | * the relocation into multiple passes. First we try to do everything within an | 
|---|
| 187 | * atomic context (avoid the pagefaults) which requires that we never wait. If | 
|---|
| 188 | * we detect that we may wait, or if we need to fault, then we have to fallback | 
|---|
| 189 | * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm | 
|---|
| 190 | * bells yet?) Dropping the mutex means that we lose all the state we have | 
|---|
| 191 | * built up so far for the execbuf and we must reset any global data. However, | 
|---|
| 192 | * we do leave the objects pinned in their final locations - which is a | 
|---|
| 193 | * potential issue for concurrent execbufs. Once we have left the mutex, we can | 
|---|
| 194 | * allocate and copy all the relocation entries into a large array at our | 
|---|
| 195 | * leisure, reacquire the mutex, reclaim all the objects and other state and | 
|---|
| 196 | * then proceed to update any incorrect addresses with the objects. | 
|---|
| 197 | * | 
|---|
| 198 | * As we process the relocation entries, we maintain a record of whether the | 
|---|
| 199 | * object is being written to. Using NORELOC, we expect userspace to provide | 
|---|
| 200 | * this information instead. We also check whether we can skip the relocation | 
|---|
| 201 | * by comparing the expected value inside the relocation entry with the target's | 
|---|
| 202 | * final address. If they differ, we have to map the current object and rewrite | 
|---|
| 203 | * the 4 or 8 byte pointer within. | 
|---|
| 204 | * | 
|---|
| 205 | * Serialising an execbuf is quite simple according to the rules of the GEM | 
|---|
| 206 | * ABI. Execution within each context is ordered by the order of submission. | 
|---|
| 207 | * Writes to any GEM object are in order of submission and are exclusive. Reads | 
|---|
| 208 | * from a GEM object are unordered with respect to other reads, but ordered by | 
|---|
| 209 | * writes. A write submitted after a read cannot occur before the read, and | 
|---|
| 210 | * similarly any read submitted after a write cannot occur before the write. | 
|---|
| 211 | * Writes are ordered between engines such that only one write occurs at any | 
|---|
| 212 | * time (completing any reads beforehand) - using semaphores where available | 
|---|
| 213 | * and CPU serialisation otherwise. Other GEM access obey the same rules, any | 
|---|
| 214 | * write (either via mmaps using set-domain, or via pwrite) must flush all GPU | 
|---|
| 215 | * reads before starting, and any read (either using set-domain or pread) must | 
|---|
| 216 | * flush all GPU writes before starting. (Note we only employ a barrier before, | 
|---|
| 217 | * we currently rely on userspace not concurrently starting a new execution | 
|---|
| 218 | * whilst reading or writing to an object. This may be an advantage or not | 
|---|
| 219 | * depending on how much you trust userspace not to shoot themselves in the | 
|---|
| 220 | * foot.) Serialisation may just result in the request being inserted into | 
|---|
| 221 | * a DAG awaiting its turn, but most simple is to wait on the CPU until | 
|---|
| 222 | * all dependencies are resolved. | 
|---|
| 223 | * | 
|---|
| 224 | * After all of that, is just a matter of closing the request and handing it to | 
|---|
| 225 | * the hardware (well, leaving it in a queue to be executed). However, we also | 
|---|
| 226 | * offer the ability for batchbuffers to be run with elevated privileges so | 
|---|
| 227 | * that they access otherwise hidden registers. (Used to adjust L3 cache etc.) | 
|---|
| 228 | * Before any batch is given extra privileges we first must check that it | 
|---|
| 229 | * contains no nefarious instructions, we check that each instruction is from | 
|---|
| 230 | * our whitelist and all registers are also from an allowed list. We first | 
|---|
| 231 | * copy the user's batchbuffer to a shadow (so that the user doesn't have | 
|---|
| 232 | * access to it, either by the CPU or GPU as we scan it) and then parse each | 
|---|
| 233 | * instruction. If everything is ok, we set a flag telling the hardware to run | 
|---|
| 234 | * the batchbuffer in trusted mode, otherwise the ioctl is rejected. | 
|---|
| 235 | */ | 
|---|
| 236 |  | 
|---|
| 237 | struct eb_fence { | 
|---|
| 238 | struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */ | 
|---|
| 239 | struct dma_fence *dma_fence; | 
|---|
| 240 | u64 value; | 
|---|
| 241 | struct dma_fence_chain *chain_fence; | 
|---|
| 242 | }; | 
|---|
| 243 |  | 
|---|
| 244 | struct i915_execbuffer { | 
|---|
| 245 | struct drm_i915_private *i915; /** i915 backpointer */ | 
|---|
| 246 | struct drm_file *file; /** per-file lookup tables and limits */ | 
|---|
| 247 | struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */ | 
|---|
| 248 | struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ | 
|---|
| 249 | struct eb_vma *vma; | 
|---|
| 250 |  | 
|---|
| 251 | struct intel_gt *gt; /* gt for the execbuf */ | 
|---|
| 252 | struct intel_context *context; /* logical state for the request */ | 
|---|
| 253 | struct i915_gem_context *gem_context; /** caller's context */ | 
|---|
| 254 | intel_wakeref_t wakeref; | 
|---|
| 255 | intel_wakeref_t wakeref_gt0; | 
|---|
| 256 |  | 
|---|
| 257 | /** our requests to build */ | 
|---|
| 258 | struct i915_request *requests[MAX_ENGINE_INSTANCE + 1]; | 
|---|
| 259 | /** identity of the batch obj/vma */ | 
|---|
| 260 | struct eb_vma *batches[MAX_ENGINE_INSTANCE + 1]; | 
|---|
| 261 | struct i915_vma *trampoline; /** trampoline used for chaining */ | 
|---|
| 262 |  | 
|---|
| 263 | /** used for excl fence in dma_resv objects when > 1 BB submitted */ | 
|---|
| 264 | struct dma_fence *composite_fence; | 
|---|
| 265 |  | 
|---|
| 266 | /** actual size of execobj[] as we may extend it for the cmdparser */ | 
|---|
| 267 | unsigned int buffer_count; | 
|---|
| 268 |  | 
|---|
| 269 | /* number of batches in execbuf IOCTL */ | 
|---|
| 270 | unsigned int num_batches; | 
|---|
| 271 |  | 
|---|
| 272 | /** list of vma not yet bound during reservation phase */ | 
|---|
| 273 | struct list_head unbound; | 
|---|
| 274 |  | 
|---|
| 275 | /** list of vma that have execobj.relocation_count */ | 
|---|
| 276 | struct list_head relocs; | 
|---|
| 277 |  | 
|---|
| 278 | struct i915_gem_ww_ctx ww; | 
|---|
| 279 |  | 
|---|
| 280 | /** | 
|---|
| 281 | * Track the most recently used object for relocations, as we | 
|---|
| 282 | * frequently have to perform multiple relocations within the same | 
|---|
| 283 | * obj/page | 
|---|
| 284 | */ | 
|---|
| 285 | struct reloc_cache { | 
|---|
| 286 | struct drm_mm_node node; /** temporary GTT binding */ | 
|---|
| 287 | unsigned long vaddr; /** Current kmap address */ | 
|---|
| 288 | unsigned long page; /** Currently mapped page index */ | 
|---|
| 289 | unsigned int graphics_ver; /** Cached value of GRAPHICS_VER */ | 
|---|
| 290 | bool use_64bit_reloc : 1; | 
|---|
| 291 | bool has_llc : 1; | 
|---|
| 292 | bool has_fence : 1; | 
|---|
| 293 | bool needs_unfenced : 1; | 
|---|
| 294 | } reloc_cache; | 
|---|
| 295 |  | 
|---|
| 296 | u64 invalid_flags; /** Set of execobj.flags that are invalid */ | 
|---|
| 297 |  | 
|---|
| 298 | /** Length of batch within object */ | 
|---|
| 299 | u64 batch_len[MAX_ENGINE_INSTANCE + 1]; | 
|---|
| 300 | u32 batch_start_offset; /** Location within object of batch */ | 
|---|
| 301 | u32 batch_flags; /** Flags composed for emit_bb_start() */ | 
|---|
| 302 | struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */ | 
|---|
| 303 |  | 
|---|
| 304 | /** | 
|---|
| 305 | * Indicate either the size of the hashtable used to resolve | 
|---|
| 306 | * relocation handles, or if negative that we are using a direct | 
|---|
| 307 | * index into the execobj[]. | 
|---|
| 308 | */ | 
|---|
| 309 | int lut_size; | 
|---|
| 310 | struct hlist_head *buckets; /** ht for relocation handles */ | 
|---|
| 311 |  | 
|---|
| 312 | struct eb_fence *fences; | 
|---|
| 313 | unsigned long num_fences; | 
|---|
| 314 | #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) | 
|---|
| 315 | struct i915_capture_list *capture_lists[MAX_ENGINE_INSTANCE + 1]; | 
|---|
| 316 | #endif | 
|---|
| 317 | }; | 
|---|
| 318 |  | 
|---|
| 319 | static int eb_parse(struct i915_execbuffer *eb); | 
|---|
| 320 | static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle); | 
|---|
| 321 | static void eb_unpin_engine(struct i915_execbuffer *eb); | 
|---|
| 322 | static void eb_capture_release(struct i915_execbuffer *eb); | 
|---|
| 323 |  | 
|---|
| 324 | static bool eb_use_cmdparser(const struct i915_execbuffer *eb) | 
|---|
| 325 | { | 
|---|
| 326 | return intel_engine_requires_cmd_parser(engine: eb->context->engine) || | 
|---|
| 327 | (intel_engine_using_cmd_parser(engine: eb->context->engine) && | 
|---|
| 328 | eb->args->batch_len); | 
|---|
| 329 | } | 
|---|
| 330 |  | 
|---|
| 331 | static int eb_create(struct i915_execbuffer *eb) | 
|---|
| 332 | { | 
|---|
| 333 | if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { | 
|---|
| 334 | unsigned int size = 1 + ilog2(eb->buffer_count); | 
|---|
| 335 |  | 
|---|
| 336 | /* | 
|---|
| 337 | * Without a 1:1 association between relocation handles and | 
|---|
| 338 | * the execobject[] index, we instead create a hashtable. | 
|---|
| 339 | * We size it dynamically based on available memory, starting | 
|---|
| 340 | * first with 1:1 associative hash and scaling back until | 
|---|
| 341 | * the allocation succeeds. | 
|---|
| 342 | * | 
|---|
| 343 | * Later on we use a positive lut_size to indicate we are | 
|---|
| 344 | * using this hashtable, and a negative value to indicate a | 
|---|
| 345 | * direct lookup. | 
|---|
| 346 | */ | 
|---|
| 347 | do { | 
|---|
| 348 | gfp_t flags; | 
|---|
| 349 |  | 
|---|
| 350 | /* While we can still reduce the allocation size, don't | 
|---|
| 351 | * raise a warning and allow the allocation to fail. | 
|---|
| 352 | * On the last pass though, we want to try as hard | 
|---|
| 353 | * as possible to perform the allocation and warn | 
|---|
| 354 | * if it fails. | 
|---|
| 355 | */ | 
|---|
| 356 | flags = GFP_KERNEL; | 
|---|
| 357 | if (size > 1) | 
|---|
| 358 | flags |= __GFP_NORETRY | __GFP_NOWARN; | 
|---|
| 359 |  | 
|---|
| 360 | eb->buckets = kzalloc(sizeof(struct hlist_head) << size, | 
|---|
| 361 | flags); | 
|---|
| 362 | if (eb->buckets) | 
|---|
| 363 | break; | 
|---|
| 364 | } while (--size); | 
|---|
| 365 |  | 
|---|
| 366 | if (unlikely(!size)) | 
|---|
| 367 | return -ENOMEM; | 
|---|
| 368 |  | 
|---|
| 369 | eb->lut_size = size; | 
|---|
| 370 | } else { | 
|---|
| 371 | eb->lut_size = -eb->buffer_count; | 
|---|
| 372 | } | 
|---|
| 373 |  | 
|---|
| 374 | return 0; | 
|---|
| 375 | } | 
|---|
| 376 |  | 
|---|
| 377 | static bool | 
|---|
| 378 | eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, | 
|---|
| 379 | const struct i915_vma *vma, | 
|---|
| 380 | unsigned int flags) | 
|---|
| 381 | { | 
|---|
| 382 | const u64 start = i915_vma_offset(vma); | 
|---|
| 383 | const u64 size = i915_vma_size(vma); | 
|---|
| 384 |  | 
|---|
| 385 | if (size < entry->pad_to_size) | 
|---|
| 386 | return true; | 
|---|
| 387 |  | 
|---|
| 388 | if (entry->alignment && !IS_ALIGNED(start, entry->alignment)) | 
|---|
| 389 | return true; | 
|---|
| 390 |  | 
|---|
| 391 | if (flags & EXEC_OBJECT_PINNED && | 
|---|
| 392 | start != entry->offset) | 
|---|
| 393 | return true; | 
|---|
| 394 |  | 
|---|
| 395 | if (flags & __EXEC_OBJECT_NEEDS_BIAS && | 
|---|
| 396 | start < BATCH_OFFSET_BIAS) | 
|---|
| 397 | return true; | 
|---|
| 398 |  | 
|---|
| 399 | if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) && | 
|---|
| 400 | (start + size + 4095) >> 32) | 
|---|
| 401 | return true; | 
|---|
| 402 |  | 
|---|
| 403 | if (flags & __EXEC_OBJECT_NEEDS_MAP && | 
|---|
| 404 | !i915_vma_is_map_and_fenceable(vma)) | 
|---|
| 405 | return true; | 
|---|
| 406 |  | 
|---|
| 407 | return false; | 
|---|
| 408 | } | 
|---|
| 409 |  | 
|---|
| 410 | static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry, | 
|---|
| 411 | unsigned int exec_flags) | 
|---|
| 412 | { | 
|---|
| 413 | u64 pin_flags = 0; | 
|---|
| 414 |  | 
|---|
| 415 | if (exec_flags & EXEC_OBJECT_NEEDS_GTT) | 
|---|
| 416 | pin_flags |= PIN_GLOBAL; | 
|---|
| 417 |  | 
|---|
| 418 | /* | 
|---|
| 419 | * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, | 
|---|
| 420 | * limit address to the first 4GBs for unflagged objects. | 
|---|
| 421 | */ | 
|---|
| 422 | if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) | 
|---|
| 423 | pin_flags |= PIN_ZONE_4G; | 
|---|
| 424 |  | 
|---|
| 425 | if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) | 
|---|
| 426 | pin_flags |= PIN_MAPPABLE; | 
|---|
| 427 |  | 
|---|
| 428 | if (exec_flags & EXEC_OBJECT_PINNED) | 
|---|
| 429 | pin_flags |= entry->offset | PIN_OFFSET_FIXED; | 
|---|
| 430 | else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) | 
|---|
| 431 | pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; | 
|---|
| 432 |  | 
|---|
| 433 | return pin_flags; | 
|---|
| 434 | } | 
|---|
| 435 |  | 
|---|
| 436 | static int | 
|---|
| 437 | eb_pin_vma(struct i915_execbuffer *eb, | 
|---|
| 438 | const struct drm_i915_gem_exec_object2 *entry, | 
|---|
| 439 | struct eb_vma *ev) | 
|---|
| 440 | { | 
|---|
| 441 | struct i915_vma *vma = ev->vma; | 
|---|
| 442 | u64 pin_flags; | 
|---|
| 443 | int err; | 
|---|
| 444 |  | 
|---|
| 445 | if (vma->node.size) | 
|---|
| 446 | pin_flags =  __i915_vma_offset(vma); | 
|---|
| 447 | else | 
|---|
| 448 | pin_flags = entry->offset & PIN_OFFSET_MASK; | 
|---|
| 449 |  | 
|---|
| 450 | pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED | PIN_VALIDATE; | 
|---|
| 451 | if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT)) | 
|---|
| 452 | pin_flags |= PIN_GLOBAL; | 
|---|
| 453 |  | 
|---|
| 454 | /* Attempt to reuse the current location if available */ | 
|---|
| 455 | err = i915_vma_pin_ww(vma, ww: &eb->ww, size: 0, alignment: 0, flags: pin_flags); | 
|---|
| 456 | if (err == -EDEADLK) | 
|---|
| 457 | return err; | 
|---|
| 458 |  | 
|---|
| 459 | if (unlikely(err)) { | 
|---|
| 460 | if (entry->flags & EXEC_OBJECT_PINNED) | 
|---|
| 461 | return err; | 
|---|
| 462 |  | 
|---|
| 463 | /* Failing that pick any _free_ space if suitable */ | 
|---|
| 464 | err = i915_vma_pin_ww(vma, ww: &eb->ww, | 
|---|
| 465 | size: entry->pad_to_size, | 
|---|
| 466 | alignment: entry->alignment, | 
|---|
| 467 | flags: eb_pin_flags(entry, exec_flags: ev->flags) | | 
|---|
| 468 | PIN_USER | PIN_NOEVICT | PIN_VALIDATE); | 
|---|
| 469 | if (unlikely(err)) | 
|---|
| 470 | return err; | 
|---|
| 471 | } | 
|---|
| 472 |  | 
|---|
| 473 | if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { | 
|---|
| 474 | err = i915_vma_pin_fence(vma); | 
|---|
| 475 | if (unlikely(err)) | 
|---|
| 476 | return err; | 
|---|
| 477 |  | 
|---|
| 478 | if (vma->fence) | 
|---|
| 479 | ev->flags |= __EXEC_OBJECT_HAS_FENCE; | 
|---|
| 480 | } | 
|---|
| 481 |  | 
|---|
| 482 | ev->flags |= __EXEC_OBJECT_HAS_PIN; | 
|---|
| 483 | if (eb_vma_misplaced(entry, vma, flags: ev->flags)) | 
|---|
| 484 | return -EBADSLT; | 
|---|
| 485 |  | 
|---|
| 486 | return 0; | 
|---|
| 487 | } | 
|---|
| 488 |  | 
|---|
| 489 | static void | 
|---|
| 490 | eb_unreserve_vma(struct eb_vma *ev) | 
|---|
| 491 | { | 
|---|
| 492 | if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE)) | 
|---|
| 493 | __i915_vma_unpin_fence(vma: ev->vma); | 
|---|
| 494 |  | 
|---|
| 495 | ev->flags &= ~__EXEC_OBJECT_RESERVED; | 
|---|
| 496 | } | 
|---|
| 497 |  | 
|---|
| 498 | static int | 
|---|
| 499 | eb_validate_vma(struct i915_execbuffer *eb, | 
|---|
| 500 | struct drm_i915_gem_exec_object2 *entry, | 
|---|
| 501 | struct i915_vma *vma) | 
|---|
| 502 | { | 
|---|
| 503 | /* Relocations are disallowed for all platforms after TGL-LP.  This | 
|---|
| 504 | * also covers all platforms with local memory. | 
|---|
| 505 | */ | 
|---|
| 506 | if (entry->relocation_count && | 
|---|
| 507 | GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915)) | 
|---|
| 508 | return -EINVAL; | 
|---|
| 509 |  | 
|---|
| 510 | if (unlikely(entry->flags & eb->invalid_flags)) | 
|---|
| 511 | return -EINVAL; | 
|---|
| 512 |  | 
|---|
| 513 | if (unlikely(entry->alignment && | 
|---|
| 514 | !is_power_of_2_u64(entry->alignment))) | 
|---|
| 515 | return -EINVAL; | 
|---|
| 516 |  | 
|---|
| 517 | /* | 
|---|
| 518 | * Offset can be used as input (EXEC_OBJECT_PINNED), reject | 
|---|
| 519 | * any non-page-aligned or non-canonical addresses. | 
|---|
| 520 | */ | 
|---|
| 521 | if (unlikely(entry->flags & EXEC_OBJECT_PINNED && | 
|---|
| 522 | entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) | 
|---|
| 523 | return -EINVAL; | 
|---|
| 524 |  | 
|---|
| 525 | /* pad_to_size was once a reserved field, so sanitize it */ | 
|---|
| 526 | if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) { | 
|---|
| 527 | if (unlikely(offset_in_page(entry->pad_to_size))) | 
|---|
| 528 | return -EINVAL; | 
|---|
| 529 | } else { | 
|---|
| 530 | entry->pad_to_size = 0; | 
|---|
| 531 | } | 
|---|
| 532 | /* | 
|---|
| 533 | * From drm_mm perspective address space is continuous, | 
|---|
| 534 | * so from this point we're always using non-canonical | 
|---|
| 535 | * form internally. | 
|---|
| 536 | */ | 
|---|
| 537 | entry->offset = gen8_noncanonical_addr(address: entry->offset); | 
|---|
| 538 |  | 
|---|
| 539 | if (!eb->reloc_cache.has_fence) { | 
|---|
| 540 | entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; | 
|---|
| 541 | } else { | 
|---|
| 542 | if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE || | 
|---|
| 543 | eb->reloc_cache.needs_unfenced) && | 
|---|
| 544 | i915_gem_object_is_tiled(obj: vma->obj)) | 
|---|
| 545 | entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP; | 
|---|
| 546 | } | 
|---|
| 547 |  | 
|---|
| 548 | return 0; | 
|---|
| 549 | } | 
|---|
| 550 |  | 
|---|
| 551 | static bool | 
|---|
| 552 | is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx) | 
|---|
| 553 | { | 
|---|
| 554 | return eb->args->flags & I915_EXEC_BATCH_FIRST ? | 
|---|
| 555 | buffer_idx < eb->num_batches : | 
|---|
| 556 | buffer_idx >= eb->args->buffer_count - eb->num_batches; | 
|---|
| 557 | } | 
|---|
| 558 |  | 
|---|
| 559 | static int | 
|---|
| 560 | eb_add_vma(struct i915_execbuffer *eb, | 
|---|
| 561 | unsigned int *current_batch, | 
|---|
| 562 | unsigned int i, | 
|---|
| 563 | struct i915_vma *vma) | 
|---|
| 564 | { | 
|---|
| 565 | struct drm_i915_private *i915 = eb->i915; | 
|---|
| 566 | struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; | 
|---|
| 567 | struct eb_vma *ev = &eb->vma[i]; | 
|---|
| 568 |  | 
|---|
| 569 | ev->vma = vma; | 
|---|
| 570 | ev->exec = entry; | 
|---|
| 571 | ev->flags = entry->flags; | 
|---|
| 572 |  | 
|---|
| 573 | if (eb->lut_size > 0) { | 
|---|
| 574 | ev->handle = entry->handle; | 
|---|
| 575 | hlist_add_head(n: &ev->node, | 
|---|
| 576 | h: &eb->buckets[hash_32(val: entry->handle, | 
|---|
| 577 | bits: eb->lut_size)]); | 
|---|
| 578 | } | 
|---|
| 579 |  | 
|---|
| 580 | if (entry->relocation_count) | 
|---|
| 581 | list_add_tail(new: &ev->reloc_link, head: &eb->relocs); | 
|---|
| 582 |  | 
|---|
| 583 | /* | 
|---|
| 584 | * SNA is doing fancy tricks with compressing batch buffers, which leads | 
|---|
| 585 | * to negative relocation deltas. Usually that works out ok since the | 
|---|
| 586 | * relocate address is still positive, except when the batch is placed | 
|---|
| 587 | * very low in the GTT. Ensure this doesn't happen. | 
|---|
| 588 | * | 
|---|
| 589 | * Note that actual hangs have only been observed on gen7, but for | 
|---|
| 590 | * paranoia do it everywhere. | 
|---|
| 591 | */ | 
|---|
| 592 | if (is_batch_buffer(eb, buffer_idx: i)) { | 
|---|
| 593 | if (entry->relocation_count && | 
|---|
| 594 | !(ev->flags & EXEC_OBJECT_PINNED)) | 
|---|
| 595 | ev->flags |= __EXEC_OBJECT_NEEDS_BIAS; | 
|---|
| 596 | if (eb->reloc_cache.has_fence) | 
|---|
| 597 | ev->flags |= EXEC_OBJECT_NEEDS_FENCE; | 
|---|
| 598 |  | 
|---|
| 599 | eb->batches[*current_batch] = ev; | 
|---|
| 600 |  | 
|---|
| 601 | if (unlikely(ev->flags & EXEC_OBJECT_WRITE)) { | 
|---|
| 602 | drm_dbg(&i915->drm, | 
|---|
| 603 | "Attempting to use self-modifying batch buffer\n"); | 
|---|
| 604 | return -EINVAL; | 
|---|
| 605 | } | 
|---|
| 606 |  | 
|---|
| 607 | if (range_overflows_t(u64, | 
|---|
| 608 | eb->batch_start_offset, | 
|---|
| 609 | eb->args->batch_len, | 
|---|
| 610 | ev->vma->size)) { | 
|---|
| 611 | drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); | 
|---|
| 612 | return -EINVAL; | 
|---|
| 613 | } | 
|---|
| 614 |  | 
|---|
| 615 | if (eb->args->batch_len == 0) | 
|---|
| 616 | eb->batch_len[*current_batch] = ev->vma->size - | 
|---|
| 617 | eb->batch_start_offset; | 
|---|
| 618 | else | 
|---|
| 619 | eb->batch_len[*current_batch] = eb->args->batch_len; | 
|---|
| 620 | if (unlikely(eb->batch_len[*current_batch] == 0)) { /* impossible! */ | 
|---|
| 621 | drm_dbg(&i915->drm, "Invalid batch length\n"); | 
|---|
| 622 | return -EINVAL; | 
|---|
| 623 | } | 
|---|
| 624 |  | 
|---|
| 625 | ++*current_batch; | 
|---|
| 626 | } | 
|---|
| 627 |  | 
|---|
| 628 | return 0; | 
|---|
| 629 | } | 
|---|
| 630 |  | 
|---|
| 631 | static int use_cpu_reloc(const struct reloc_cache *cache, | 
|---|
| 632 | const struct drm_i915_gem_object *obj) | 
|---|
| 633 | { | 
|---|
| 634 | if (!i915_gem_object_has_struct_page(obj)) | 
|---|
| 635 | return false; | 
|---|
| 636 |  | 
|---|
| 637 | if (DBG_FORCE_RELOC == FORCE_CPU_RELOC) | 
|---|
| 638 | return true; | 
|---|
| 639 |  | 
|---|
| 640 | if (DBG_FORCE_RELOC == FORCE_GTT_RELOC) | 
|---|
| 641 | return false; | 
|---|
| 642 |  | 
|---|
| 643 | /* | 
|---|
| 644 | * For objects created by userspace through GEM_CREATE with pat_index | 
|---|
| 645 | * set by set_pat extension, i915_gem_object_has_cache_level() always | 
|---|
| 646 | * return true, otherwise the call would fall back to checking whether | 
|---|
| 647 | * the object is un-cached. | 
|---|
| 648 | */ | 
|---|
| 649 | return (cache->has_llc || | 
|---|
| 650 | obj->cache_dirty || | 
|---|
| 651 | !i915_gem_object_has_cache_level(obj, lvl: I915_CACHE_NONE)); | 
|---|
| 652 | } | 
|---|
| 653 |  | 
|---|
| 654 | static int eb_reserve_vma(struct i915_execbuffer *eb, | 
|---|
| 655 | struct eb_vma *ev, | 
|---|
| 656 | u64 pin_flags) | 
|---|
| 657 | { | 
|---|
| 658 | struct drm_i915_gem_exec_object2 *entry = ev->exec; | 
|---|
| 659 | struct i915_vma *vma = ev->vma; | 
|---|
| 660 | int err; | 
|---|
| 661 |  | 
|---|
| 662 | if (drm_mm_node_allocated(node: &vma->node) && | 
|---|
| 663 | eb_vma_misplaced(entry, vma, flags: ev->flags)) { | 
|---|
| 664 | err = i915_vma_unbind(vma); | 
|---|
| 665 | if (err) | 
|---|
| 666 | return err; | 
|---|
| 667 | } | 
|---|
| 668 |  | 
|---|
| 669 | err = i915_vma_pin_ww(vma, ww: &eb->ww, | 
|---|
| 670 | size: entry->pad_to_size, alignment: entry->alignment, | 
|---|
| 671 | flags: eb_pin_flags(entry, exec_flags: ev->flags) | pin_flags); | 
|---|
| 672 | if (err) | 
|---|
| 673 | return err; | 
|---|
| 674 |  | 
|---|
| 675 | if (entry->offset != i915_vma_offset(vma)) { | 
|---|
| 676 | entry->offset = i915_vma_offset(vma) | UPDATE; | 
|---|
| 677 | eb->args->flags |= __EXEC_HAS_RELOC; | 
|---|
| 678 | } | 
|---|
| 679 |  | 
|---|
| 680 | if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { | 
|---|
| 681 | err = i915_vma_pin_fence(vma); | 
|---|
| 682 | if (unlikely(err)) | 
|---|
| 683 | return err; | 
|---|
| 684 |  | 
|---|
| 685 | if (vma->fence) | 
|---|
| 686 | ev->flags |= __EXEC_OBJECT_HAS_FENCE; | 
|---|
| 687 | } | 
|---|
| 688 |  | 
|---|
| 689 | ev->flags |= __EXEC_OBJECT_HAS_PIN; | 
|---|
| 690 | GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags)); | 
|---|
| 691 |  | 
|---|
| 692 | return 0; | 
|---|
| 693 | } | 
|---|
| 694 |  | 
|---|
| 695 | static bool eb_unbind(struct i915_execbuffer *eb, bool force) | 
|---|
| 696 | { | 
|---|
| 697 | const unsigned int count = eb->buffer_count; | 
|---|
| 698 | unsigned int i; | 
|---|
| 699 | struct list_head last; | 
|---|
| 700 | bool unpinned = false; | 
|---|
| 701 |  | 
|---|
| 702 | /* Resort *all* the objects into priority order */ | 
|---|
| 703 | INIT_LIST_HEAD(list: &eb->unbound); | 
|---|
| 704 | INIT_LIST_HEAD(list: &last); | 
|---|
| 705 |  | 
|---|
| 706 | for (i = 0; i < count; i++) { | 
|---|
| 707 | struct eb_vma *ev = &eb->vma[i]; | 
|---|
| 708 | unsigned int flags = ev->flags; | 
|---|
| 709 |  | 
|---|
| 710 | if (!force && flags & EXEC_OBJECT_PINNED && | 
|---|
| 711 | flags & __EXEC_OBJECT_HAS_PIN) | 
|---|
| 712 | continue; | 
|---|
| 713 |  | 
|---|
| 714 | unpinned = true; | 
|---|
| 715 | eb_unreserve_vma(ev); | 
|---|
| 716 |  | 
|---|
| 717 | if (flags & EXEC_OBJECT_PINNED) | 
|---|
| 718 | /* Pinned must have their slot */ | 
|---|
| 719 | list_add(new: &ev->bind_link, head: &eb->unbound); | 
|---|
| 720 | else if (flags & __EXEC_OBJECT_NEEDS_MAP) | 
|---|
| 721 | /* Map require the lowest 256MiB (aperture) */ | 
|---|
| 722 | list_add_tail(new: &ev->bind_link, head: &eb->unbound); | 
|---|
| 723 | else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) | 
|---|
| 724 | /* Prioritise 4GiB region for restricted bo */ | 
|---|
| 725 | list_add(new: &ev->bind_link, head: &last); | 
|---|
| 726 | else | 
|---|
| 727 | list_add_tail(new: &ev->bind_link, head: &last); | 
|---|
| 728 | } | 
|---|
| 729 |  | 
|---|
| 730 | list_splice_tail(list: &last, head: &eb->unbound); | 
|---|
| 731 | return unpinned; | 
|---|
| 732 | } | 
|---|
| 733 |  | 
|---|
| 734 | static int eb_reserve(struct i915_execbuffer *eb) | 
|---|
| 735 | { | 
|---|
| 736 | struct eb_vma *ev; | 
|---|
| 737 | unsigned int pass; | 
|---|
| 738 | int err = 0; | 
|---|
| 739 |  | 
|---|
| 740 | /* | 
|---|
| 741 | * We have one more buffers that we couldn't bind, which could be due to | 
|---|
| 742 | * various reasons. To resolve this we have 4 passes, with every next | 
|---|
| 743 | * level turning the screws tighter: | 
|---|
| 744 | * | 
|---|
| 745 | * 0. Unbind all objects that do not match the GTT constraints for the | 
|---|
| 746 | * execbuffer (fenceable, mappable, alignment etc). Bind all new | 
|---|
| 747 | * objects.  This avoids unnecessary unbinding of later objects in order | 
|---|
| 748 | * to make room for the earlier objects *unless* we need to defragment. | 
|---|
| 749 | * | 
|---|
| 750 | * 1. Reorder the buffers, where objects with the most restrictive | 
|---|
| 751 | * placement requirements go first (ignoring fixed location buffers for | 
|---|
| 752 | * now).  For example, objects needing the mappable aperture (the first | 
|---|
| 753 | * 256M of GTT), should go first vs objects that can be placed just | 
|---|
| 754 | * about anywhere. Repeat the previous pass. | 
|---|
| 755 | * | 
|---|
| 756 | * 2. Consider buffers that are pinned at a fixed location. Also try to | 
|---|
| 757 | * evict the entire VM this time, leaving only objects that we were | 
|---|
| 758 | * unable to lock. Try again to bind the buffers. (still using the new | 
|---|
| 759 | * buffer order). | 
|---|
| 760 | * | 
|---|
| 761 | * 3. We likely have object lock contention for one or more stubborn | 
|---|
| 762 | * objects in the VM, for which we need to evict to make forward | 
|---|
| 763 | * progress (perhaps we are fighting the shrinker?). When evicting the | 
|---|
| 764 | * VM this time around, anything that we can't lock we now track using | 
|---|
| 765 | * the busy_bo, using the full lock (after dropping the vm->mutex to | 
|---|
| 766 | * prevent deadlocks), instead of trylock. We then continue to evict the | 
|---|
| 767 | * VM, this time with the stubborn object locked, which we can now | 
|---|
| 768 | * hopefully unbind (if still bound in the VM). Repeat until the VM is | 
|---|
| 769 | * evicted. Finally we should be able bind everything. | 
|---|
| 770 | */ | 
|---|
| 771 | for (pass = 0; pass <= 3; pass++) { | 
|---|
| 772 | int pin_flags = PIN_USER | PIN_VALIDATE; | 
|---|
| 773 |  | 
|---|
| 774 | if (pass == 0) | 
|---|
| 775 | pin_flags |= PIN_NONBLOCK; | 
|---|
| 776 |  | 
|---|
| 777 | if (pass >= 1) | 
|---|
| 778 | eb_unbind(eb, force: pass >= 2); | 
|---|
| 779 |  | 
|---|
| 780 | if (pass == 2) { | 
|---|
| 781 | err = mutex_lock_interruptible(lock: &eb->context->vm->mutex); | 
|---|
| 782 | if (!err) { | 
|---|
| 783 | err = i915_gem_evict_vm(vm: eb->context->vm, ww: &eb->ww, NULL); | 
|---|
| 784 | mutex_unlock(lock: &eb->context->vm->mutex); | 
|---|
| 785 | } | 
|---|
| 786 | if (err) | 
|---|
| 787 | return err; | 
|---|
| 788 | } | 
|---|
| 789 |  | 
|---|
| 790 | if (pass == 3) { | 
|---|
| 791 | retry: | 
|---|
| 792 | err = mutex_lock_interruptible(lock: &eb->context->vm->mutex); | 
|---|
| 793 | if (!err) { | 
|---|
| 794 | struct drm_i915_gem_object *busy_bo = NULL; | 
|---|
| 795 |  | 
|---|
| 796 | err = i915_gem_evict_vm(vm: eb->context->vm, ww: &eb->ww, busy_bo: &busy_bo); | 
|---|
| 797 | mutex_unlock(lock: &eb->context->vm->mutex); | 
|---|
| 798 | if (err && busy_bo) { | 
|---|
| 799 | err = i915_gem_object_lock(obj: busy_bo, ww: &eb->ww); | 
|---|
| 800 | i915_gem_object_put(obj: busy_bo); | 
|---|
| 801 | if (!err) | 
|---|
| 802 | goto retry; | 
|---|
| 803 | } | 
|---|
| 804 | } | 
|---|
| 805 | if (err) | 
|---|
| 806 | return err; | 
|---|
| 807 | } | 
|---|
| 808 |  | 
|---|
| 809 | list_for_each_entry(ev, &eb->unbound, bind_link) { | 
|---|
| 810 | err = eb_reserve_vma(eb, ev, pin_flags); | 
|---|
| 811 | if (err) | 
|---|
| 812 | break; | 
|---|
| 813 | } | 
|---|
| 814 |  | 
|---|
| 815 | if (err != -ENOSPC) | 
|---|
| 816 | break; | 
|---|
| 817 | } | 
|---|
| 818 |  | 
|---|
| 819 | return err; | 
|---|
| 820 | } | 
|---|
| 821 |  | 
|---|
| 822 | static int eb_select_context(struct i915_execbuffer *eb) | 
|---|
| 823 | { | 
|---|
| 824 | struct i915_gem_context *ctx; | 
|---|
| 825 |  | 
|---|
| 826 | ctx = i915_gem_context_lookup(file_priv: eb->file->driver_priv, id: eb->args->rsvd1); | 
|---|
| 827 | if (IS_ERR(ptr: ctx)) | 
|---|
| 828 | return PTR_ERR(ptr: ctx); | 
|---|
| 829 |  | 
|---|
| 830 | eb->gem_context = ctx; | 
|---|
| 831 | if (i915_gem_context_has_full_ppgtt(ctx)) | 
|---|
| 832 | eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; | 
|---|
| 833 |  | 
|---|
| 834 | return 0; | 
|---|
| 835 | } | 
|---|
| 836 |  | 
|---|
| 837 | static int __eb_add_lut(struct i915_execbuffer *eb, | 
|---|
| 838 | u32 handle, struct i915_vma *vma) | 
|---|
| 839 | { | 
|---|
| 840 | struct i915_gem_context *ctx = eb->gem_context; | 
|---|
| 841 | struct i915_lut_handle *lut; | 
|---|
| 842 | int err; | 
|---|
| 843 |  | 
|---|
| 844 | lut = i915_lut_handle_alloc(); | 
|---|
| 845 | if (unlikely(!lut)) | 
|---|
| 846 | return -ENOMEM; | 
|---|
| 847 |  | 
|---|
| 848 | i915_vma_get(vma); | 
|---|
| 849 | if (!atomic_fetch_inc(v: &vma->open_count)) | 
|---|
| 850 | i915_vma_reopen(vma); | 
|---|
| 851 | lut->handle = handle; | 
|---|
| 852 | lut->ctx = ctx; | 
|---|
| 853 |  | 
|---|
| 854 | /* Check that the context hasn't been closed in the meantime */ | 
|---|
| 855 | err = -EINTR; | 
|---|
| 856 | if (!mutex_lock_interruptible(lock: &ctx->lut_mutex)) { | 
|---|
| 857 | if (likely(!i915_gem_context_is_closed(ctx))) | 
|---|
| 858 | err = radix_tree_insert(&ctx->handles_vma, index: handle, vma); | 
|---|
| 859 | else | 
|---|
| 860 | err = -ENOENT; | 
|---|
| 861 | if (err == 0) { /* And nor has this handle */ | 
|---|
| 862 | struct drm_i915_gem_object *obj = vma->obj; | 
|---|
| 863 |  | 
|---|
| 864 | spin_lock(lock: &obj->lut_lock); | 
|---|
| 865 | if (idr_find(&eb->file->object_idr, id: handle) == obj) { | 
|---|
| 866 | list_add(new: &lut->obj_link, head: &obj->lut_list); | 
|---|
| 867 | } else { | 
|---|
| 868 | radix_tree_delete(&ctx->handles_vma, handle); | 
|---|
| 869 | err = -ENOENT; | 
|---|
| 870 | } | 
|---|
| 871 | spin_unlock(lock: &obj->lut_lock); | 
|---|
| 872 | } | 
|---|
| 873 | mutex_unlock(lock: &ctx->lut_mutex); | 
|---|
| 874 | } | 
|---|
| 875 | if (unlikely(err)) | 
|---|
| 876 | goto err; | 
|---|
| 877 |  | 
|---|
| 878 | return 0; | 
|---|
| 879 |  | 
|---|
| 880 | err: | 
|---|
| 881 | i915_vma_close(vma); | 
|---|
| 882 | i915_vma_put(vma); | 
|---|
| 883 | i915_lut_handle_free(lut); | 
|---|
| 884 | return err; | 
|---|
| 885 | } | 
|---|
| 886 |  | 
|---|
| 887 | static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) | 
|---|
| 888 | { | 
|---|
| 889 | struct i915_address_space *vm = eb->context->vm; | 
|---|
| 890 |  | 
|---|
| 891 | do { | 
|---|
| 892 | struct drm_i915_gem_object *obj; | 
|---|
| 893 | struct i915_vma *vma; | 
|---|
| 894 | int err; | 
|---|
| 895 |  | 
|---|
| 896 | rcu_read_lock(); | 
|---|
| 897 | vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle); | 
|---|
| 898 | if (likely(vma && vma->vm == vm)) | 
|---|
| 899 | vma = i915_vma_tryget(vma); | 
|---|
| 900 | rcu_read_unlock(); | 
|---|
| 901 | if (likely(vma)) | 
|---|
| 902 | return vma; | 
|---|
| 903 |  | 
|---|
| 904 | obj = i915_gem_object_lookup(file: eb->file, handle); | 
|---|
| 905 | if (unlikely(!obj)) | 
|---|
| 906 | return ERR_PTR(error: -ENOENT); | 
|---|
| 907 |  | 
|---|
| 908 | /* | 
|---|
| 909 | * If the user has opted-in for protected-object tracking, make | 
|---|
| 910 | * sure the object encryption can be used. | 
|---|
| 911 | * We only need to do this when the object is first used with | 
|---|
| 912 | * this context, because the context itself will be banned when | 
|---|
| 913 | * the protected objects become invalid. | 
|---|
| 914 | */ | 
|---|
| 915 | if (i915_gem_context_uses_protected_content(ctx: eb->gem_context) && | 
|---|
| 916 | i915_gem_object_is_protected(obj)) { | 
|---|
| 917 | err = intel_pxp_key_check(intel_bo_to_drm_bo(obj), assign: true); | 
|---|
| 918 | if (err) { | 
|---|
| 919 | i915_gem_object_put(obj); | 
|---|
| 920 | return ERR_PTR(error: err); | 
|---|
| 921 | } | 
|---|
| 922 | } | 
|---|
| 923 |  | 
|---|
| 924 | vma = i915_vma_instance(obj, vm, NULL); | 
|---|
| 925 | if (IS_ERR(ptr: vma)) { | 
|---|
| 926 | i915_gem_object_put(obj); | 
|---|
| 927 | return vma; | 
|---|
| 928 | } | 
|---|
| 929 |  | 
|---|
| 930 | err = __eb_add_lut(eb, handle, vma); | 
|---|
| 931 | if (likely(!err)) | 
|---|
| 932 | return vma; | 
|---|
| 933 |  | 
|---|
| 934 | i915_gem_object_put(obj); | 
|---|
| 935 | if (err != -EEXIST) | 
|---|
| 936 | return ERR_PTR(error: err); | 
|---|
| 937 | } while (1); | 
|---|
| 938 | } | 
|---|
| 939 |  | 
|---|
| 940 | static int eb_lookup_vmas(struct i915_execbuffer *eb) | 
|---|
| 941 | { | 
|---|
| 942 | unsigned int i, current_batch = 0; | 
|---|
| 943 | int err = 0; | 
|---|
| 944 |  | 
|---|
| 945 | INIT_LIST_HEAD(list: &eb->relocs); | 
|---|
| 946 |  | 
|---|
| 947 | for (i = 0; i < eb->buffer_count; i++) { | 
|---|
| 948 | struct i915_vma *vma; | 
|---|
| 949 |  | 
|---|
| 950 | vma = eb_lookup_vma(eb, handle: eb->exec[i].handle); | 
|---|
| 951 | if (IS_ERR(ptr: vma)) { | 
|---|
| 952 | err = PTR_ERR(ptr: vma); | 
|---|
| 953 | goto err; | 
|---|
| 954 | } | 
|---|
| 955 |  | 
|---|
| 956 | err = eb_validate_vma(eb, entry: &eb->exec[i], vma); | 
|---|
| 957 | if (unlikely(err)) { | 
|---|
| 958 | i915_vma_put(vma); | 
|---|
| 959 | goto err; | 
|---|
| 960 | } | 
|---|
| 961 |  | 
|---|
| 962 | err = eb_add_vma(eb, current_batch: ¤t_batch, i, vma); | 
|---|
| 963 | if (err) | 
|---|
| 964 | return err; | 
|---|
| 965 |  | 
|---|
| 966 | if (i915_gem_object_is_userptr(obj: vma->obj)) { | 
|---|
| 967 | err = i915_gem_object_userptr_submit_init(obj: vma->obj); | 
|---|
| 968 | if (err) { | 
|---|
| 969 | if (i + 1 < eb->buffer_count) { | 
|---|
| 970 | /* | 
|---|
| 971 | * Execbuffer code expects last vma entry to be NULL, | 
|---|
| 972 | * since we already initialized this entry, | 
|---|
| 973 | * set the next value to NULL or we mess up | 
|---|
| 974 | * cleanup handling. | 
|---|
| 975 | */ | 
|---|
| 976 | eb->vma[i + 1].vma = NULL; | 
|---|
| 977 | } | 
|---|
| 978 |  | 
|---|
| 979 | return err; | 
|---|
| 980 | } | 
|---|
| 981 |  | 
|---|
| 982 | eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT; | 
|---|
| 983 | eb->args->flags |= __EXEC_USERPTR_USED; | 
|---|
| 984 | } | 
|---|
| 985 | } | 
|---|
| 986 |  | 
|---|
| 987 | return 0; | 
|---|
| 988 |  | 
|---|
| 989 | err: | 
|---|
| 990 | eb->vma[i].vma = NULL; | 
|---|
| 991 | return err; | 
|---|
| 992 | } | 
|---|
| 993 |  | 
|---|
| 994 | static int eb_lock_vmas(struct i915_execbuffer *eb) | 
|---|
| 995 | { | 
|---|
| 996 | unsigned int i; | 
|---|
| 997 | int err; | 
|---|
| 998 |  | 
|---|
| 999 | for (i = 0; i < eb->buffer_count; i++) { | 
|---|
| 1000 | struct eb_vma *ev = &eb->vma[i]; | 
|---|
| 1001 | struct i915_vma *vma = ev->vma; | 
|---|
| 1002 |  | 
|---|
| 1003 | err = i915_gem_object_lock(obj: vma->obj, ww: &eb->ww); | 
|---|
| 1004 | if (err) | 
|---|
| 1005 | return err; | 
|---|
| 1006 | } | 
|---|
| 1007 |  | 
|---|
| 1008 | return 0; | 
|---|
| 1009 | } | 
|---|
| 1010 |  | 
|---|
| 1011 | static int eb_validate_vmas(struct i915_execbuffer *eb) | 
|---|
| 1012 | { | 
|---|
| 1013 | unsigned int i; | 
|---|
| 1014 | int err; | 
|---|
| 1015 |  | 
|---|
| 1016 | INIT_LIST_HEAD(list: &eb->unbound); | 
|---|
| 1017 |  | 
|---|
| 1018 | err = eb_lock_vmas(eb); | 
|---|
| 1019 | if (err) | 
|---|
| 1020 | return err; | 
|---|
| 1021 |  | 
|---|
| 1022 | for (i = 0; i < eb->buffer_count; i++) { | 
|---|
| 1023 | struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; | 
|---|
| 1024 | struct eb_vma *ev = &eb->vma[i]; | 
|---|
| 1025 | struct i915_vma *vma = ev->vma; | 
|---|
| 1026 |  | 
|---|
| 1027 | err = eb_pin_vma(eb, entry, ev); | 
|---|
| 1028 | if (err == -EDEADLK) | 
|---|
| 1029 | return err; | 
|---|
| 1030 |  | 
|---|
| 1031 | if (!err) { | 
|---|
| 1032 | if (entry->offset != i915_vma_offset(vma)) { | 
|---|
| 1033 | entry->offset = i915_vma_offset(vma) | UPDATE; | 
|---|
| 1034 | eb->args->flags |= __EXEC_HAS_RELOC; | 
|---|
| 1035 | } | 
|---|
| 1036 | } else { | 
|---|
| 1037 | eb_unreserve_vma(ev); | 
|---|
| 1038 |  | 
|---|
| 1039 | list_add_tail(new: &ev->bind_link, head: &eb->unbound); | 
|---|
| 1040 | if (drm_mm_node_allocated(node: &vma->node)) { | 
|---|
| 1041 | err = i915_vma_unbind(vma); | 
|---|
| 1042 | if (err) | 
|---|
| 1043 | return err; | 
|---|
| 1044 | } | 
|---|
| 1045 | } | 
|---|
| 1046 |  | 
|---|
| 1047 | /* Reserve enough slots to accommodate composite fences */ | 
|---|
| 1048 | err = dma_resv_reserve_fences(obj: vma->obj->base.resv, num_fences: eb->num_batches); | 
|---|
| 1049 | if (err) | 
|---|
| 1050 | return err; | 
|---|
| 1051 |  | 
|---|
| 1052 | GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && | 
|---|
| 1053 | eb_vma_misplaced(&eb->exec[i], vma, ev->flags)); | 
|---|
| 1054 | } | 
|---|
| 1055 |  | 
|---|
| 1056 | if (!list_empty(head: &eb->unbound)) | 
|---|
| 1057 | return eb_reserve(eb); | 
|---|
| 1058 |  | 
|---|
| 1059 | return 0; | 
|---|
| 1060 | } | 
|---|
| 1061 |  | 
|---|
| 1062 | static struct eb_vma * | 
|---|
| 1063 | eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) | 
|---|
| 1064 | { | 
|---|
| 1065 | if (eb->lut_size < 0) { | 
|---|
| 1066 | if (handle >= -eb->lut_size) | 
|---|
| 1067 | return NULL; | 
|---|
| 1068 | return &eb->vma[handle]; | 
|---|
| 1069 | } else { | 
|---|
| 1070 | struct hlist_head *head; | 
|---|
| 1071 | struct eb_vma *ev; | 
|---|
| 1072 |  | 
|---|
| 1073 | head = &eb->buckets[hash_32(val: handle, bits: eb->lut_size)]; | 
|---|
| 1074 | hlist_for_each_entry(ev, head, node) { | 
|---|
| 1075 | if (ev->handle == handle) | 
|---|
| 1076 | return ev; | 
|---|
| 1077 | } | 
|---|
| 1078 | return NULL; | 
|---|
| 1079 | } | 
|---|
| 1080 | } | 
|---|
| 1081 |  | 
|---|
| 1082 | static void eb_release_vmas(struct i915_execbuffer *eb, bool final) | 
|---|
| 1083 | { | 
|---|
| 1084 | const unsigned int count = eb->buffer_count; | 
|---|
| 1085 | unsigned int i; | 
|---|
| 1086 |  | 
|---|
| 1087 | for (i = 0; i < count; i++) { | 
|---|
| 1088 | struct eb_vma *ev = &eb->vma[i]; | 
|---|
| 1089 | struct i915_vma *vma = ev->vma; | 
|---|
| 1090 |  | 
|---|
| 1091 | if (!vma) | 
|---|
| 1092 | break; | 
|---|
| 1093 |  | 
|---|
| 1094 | eb_unreserve_vma(ev); | 
|---|
| 1095 |  | 
|---|
| 1096 | if (final) | 
|---|
| 1097 | i915_vma_put(vma); | 
|---|
| 1098 | } | 
|---|
| 1099 |  | 
|---|
| 1100 | eb_capture_release(eb); | 
|---|
| 1101 | eb_unpin_engine(eb); | 
|---|
| 1102 | } | 
|---|
| 1103 |  | 
|---|
| 1104 | static void eb_destroy(const struct i915_execbuffer *eb) | 
|---|
| 1105 | { | 
|---|
| 1106 | if (eb->lut_size > 0) | 
|---|
| 1107 | kfree(objp: eb->buckets); | 
|---|
| 1108 | } | 
|---|
| 1109 |  | 
|---|
| 1110 | static u64 | 
|---|
| 1111 | relocation_target(const struct drm_i915_gem_relocation_entry *reloc, | 
|---|
| 1112 | const struct i915_vma *target) | 
|---|
| 1113 | { | 
|---|
| 1114 | return gen8_canonical_addr(address: (int)reloc->delta + i915_vma_offset(vma: target)); | 
|---|
| 1115 | } | 
|---|
| 1116 |  | 
|---|
| 1117 | static void reloc_cache_init(struct reloc_cache *cache, | 
|---|
| 1118 | struct drm_i915_private *i915) | 
|---|
| 1119 | { | 
|---|
| 1120 | cache->page = -1; | 
|---|
| 1121 | cache->vaddr = 0; | 
|---|
| 1122 | /* Must be a variable in the struct to allow GCC to unroll. */ | 
|---|
| 1123 | cache->graphics_ver = GRAPHICS_VER(i915); | 
|---|
| 1124 | cache->has_llc = HAS_LLC(i915); | 
|---|
| 1125 | cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); | 
|---|
| 1126 | cache->has_fence = cache->graphics_ver < 4; | 
|---|
| 1127 | cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; | 
|---|
| 1128 | cache->node.flags = 0; | 
|---|
| 1129 | } | 
|---|
| 1130 |  | 
|---|
| 1131 | static void *unmask_page(unsigned long p) | 
|---|
| 1132 | { | 
|---|
| 1133 | return (void *)(uintptr_t)(p & PAGE_MASK); | 
|---|
| 1134 | } | 
|---|
| 1135 |  | 
|---|
| 1136 | static unsigned int unmask_flags(unsigned long p) | 
|---|
| 1137 | { | 
|---|
| 1138 | return p & ~PAGE_MASK; | 
|---|
| 1139 | } | 
|---|
| 1140 |  | 
|---|
| 1141 | #define KMAP 0x4 /* after CLFLUSH_FLAGS */ | 
|---|
| 1142 |  | 
|---|
| 1143 | static struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) | 
|---|
| 1144 | { | 
|---|
| 1145 | struct drm_i915_private *i915 = | 
|---|
| 1146 | container_of(cache, struct i915_execbuffer, reloc_cache)->i915; | 
|---|
| 1147 | return to_gt(i915)->ggtt; | 
|---|
| 1148 | } | 
|---|
| 1149 |  | 
|---|
| 1150 | static void reloc_cache_unmap(struct reloc_cache *cache) | 
|---|
| 1151 | { | 
|---|
| 1152 | void *vaddr; | 
|---|
| 1153 |  | 
|---|
| 1154 | if (!cache->vaddr) | 
|---|
| 1155 | return; | 
|---|
| 1156 |  | 
|---|
| 1157 | vaddr = unmask_page(p: cache->vaddr); | 
|---|
| 1158 | if (cache->vaddr & KMAP) | 
|---|
| 1159 | kunmap_local(vaddr); | 
|---|
| 1160 | else | 
|---|
| 1161 | io_mapping_unmap_atomic(vaddr: (void __iomem *)vaddr); | 
|---|
| 1162 | } | 
|---|
| 1163 |  | 
|---|
| 1164 | static void reloc_cache_remap(struct reloc_cache *cache, | 
|---|
| 1165 | struct drm_i915_gem_object *obj) | 
|---|
| 1166 | { | 
|---|
| 1167 | void *vaddr; | 
|---|
| 1168 |  | 
|---|
| 1169 | if (!cache->vaddr) | 
|---|
| 1170 | return; | 
|---|
| 1171 |  | 
|---|
| 1172 | if (cache->vaddr & KMAP) { | 
|---|
| 1173 | struct page *page = i915_gem_object_get_page(obj, cache->page); | 
|---|
| 1174 |  | 
|---|
| 1175 | vaddr = kmap_local_page(page); | 
|---|
| 1176 | cache->vaddr = unmask_flags(p: cache->vaddr) | | 
|---|
| 1177 | (unsigned long)vaddr; | 
|---|
| 1178 | } else { | 
|---|
| 1179 | struct i915_ggtt *ggtt = cache_to_ggtt(cache); | 
|---|
| 1180 | unsigned long offset; | 
|---|
| 1181 |  | 
|---|
| 1182 | offset = cache->node.start; | 
|---|
| 1183 | if (!drm_mm_node_allocated(node: &cache->node)) | 
|---|
| 1184 | offset += cache->page << PAGE_SHIFT; | 
|---|
| 1185 |  | 
|---|
| 1186 | cache->vaddr = (unsigned long) | 
|---|
| 1187 | io_mapping_map_atomic_wc(mapping: &ggtt->iomap, offset); | 
|---|
| 1188 | } | 
|---|
| 1189 | } | 
|---|
| 1190 |  | 
|---|
| 1191 | static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb) | 
|---|
| 1192 | { | 
|---|
| 1193 | void *vaddr; | 
|---|
| 1194 |  | 
|---|
| 1195 | if (!cache->vaddr) | 
|---|
| 1196 | return; | 
|---|
| 1197 |  | 
|---|
| 1198 | vaddr = unmask_page(p: cache->vaddr); | 
|---|
| 1199 | if (cache->vaddr & KMAP) { | 
|---|
| 1200 | struct drm_i915_gem_object *obj = | 
|---|
| 1201 | (struct drm_i915_gem_object *)cache->node.mm; | 
|---|
| 1202 | if (cache->vaddr & CLFLUSH_AFTER) | 
|---|
| 1203 | mb(); | 
|---|
| 1204 |  | 
|---|
| 1205 | kunmap_local(vaddr); | 
|---|
| 1206 | i915_gem_object_finish_access(obj); | 
|---|
| 1207 | } else { | 
|---|
| 1208 | struct i915_ggtt *ggtt = cache_to_ggtt(cache); | 
|---|
| 1209 |  | 
|---|
| 1210 | intel_gt_flush_ggtt_writes(gt: ggtt->vm.gt); | 
|---|
| 1211 | io_mapping_unmap_atomic(vaddr: (void __iomem *)vaddr); | 
|---|
| 1212 |  | 
|---|
| 1213 | if (drm_mm_node_allocated(node: &cache->node)) { | 
|---|
| 1214 | ggtt->vm.clear_range(&ggtt->vm, | 
|---|
| 1215 | cache->node.start, | 
|---|
| 1216 | cache->node.size); | 
|---|
| 1217 | mutex_lock(lock: &ggtt->vm.mutex); | 
|---|
| 1218 | drm_mm_remove_node(node: &cache->node); | 
|---|
| 1219 | mutex_unlock(lock: &ggtt->vm.mutex); | 
|---|
| 1220 | } else { | 
|---|
| 1221 | i915_vma_unpin(vma: (struct i915_vma *)cache->node.mm); | 
|---|
| 1222 | } | 
|---|
| 1223 | } | 
|---|
| 1224 |  | 
|---|
| 1225 | cache->vaddr = 0; | 
|---|
| 1226 | cache->page = -1; | 
|---|
| 1227 | } | 
|---|
| 1228 |  | 
|---|
| 1229 | static void *reloc_kmap(struct drm_i915_gem_object *obj, | 
|---|
| 1230 | struct reloc_cache *cache, | 
|---|
| 1231 | unsigned long pageno) | 
|---|
| 1232 | { | 
|---|
| 1233 | void *vaddr; | 
|---|
| 1234 | struct page *page; | 
|---|
| 1235 |  | 
|---|
| 1236 | if (cache->vaddr) { | 
|---|
| 1237 | kunmap_local(unmask_page(cache->vaddr)); | 
|---|
| 1238 | } else { | 
|---|
| 1239 | unsigned int flushes; | 
|---|
| 1240 | int err; | 
|---|
| 1241 |  | 
|---|
| 1242 | err = i915_gem_object_prepare_write(obj, needs_clflush: &flushes); | 
|---|
| 1243 | if (err) | 
|---|
| 1244 | return ERR_PTR(error: err); | 
|---|
| 1245 |  | 
|---|
| 1246 | BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS); | 
|---|
| 1247 | BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK); | 
|---|
| 1248 |  | 
|---|
| 1249 | cache->vaddr = flushes | KMAP; | 
|---|
| 1250 | cache->node.mm = (void *)obj; | 
|---|
| 1251 | if (flushes) | 
|---|
| 1252 | mb(); | 
|---|
| 1253 | } | 
|---|
| 1254 |  | 
|---|
| 1255 | page = i915_gem_object_get_page(obj, pageno); | 
|---|
| 1256 | if (!obj->mm.dirty) | 
|---|
| 1257 | set_page_dirty(page); | 
|---|
| 1258 |  | 
|---|
| 1259 | vaddr = kmap_local_page(page); | 
|---|
| 1260 | cache->vaddr = unmask_flags(p: cache->vaddr) | (unsigned long)vaddr; | 
|---|
| 1261 | cache->page = pageno; | 
|---|
| 1262 |  | 
|---|
| 1263 | return vaddr; | 
|---|
| 1264 | } | 
|---|
| 1265 |  | 
|---|
| 1266 | static void *reloc_iomap(struct i915_vma *batch, | 
|---|
| 1267 | struct i915_execbuffer *eb, | 
|---|
| 1268 | unsigned long page) | 
|---|
| 1269 | { | 
|---|
| 1270 | struct drm_i915_gem_object *obj = batch->obj; | 
|---|
| 1271 | struct reloc_cache *cache = &eb->reloc_cache; | 
|---|
| 1272 | struct i915_ggtt *ggtt = cache_to_ggtt(cache); | 
|---|
| 1273 | unsigned long offset; | 
|---|
| 1274 | void *vaddr; | 
|---|
| 1275 |  | 
|---|
| 1276 | if (cache->vaddr) { | 
|---|
| 1277 | intel_gt_flush_ggtt_writes(gt: ggtt->vm.gt); | 
|---|
| 1278 | io_mapping_unmap_atomic(vaddr: (void __force __iomem *) unmask_page(p: cache->vaddr)); | 
|---|
| 1279 | } else { | 
|---|
| 1280 | struct i915_vma *vma = ERR_PTR(error: -ENODEV); | 
|---|
| 1281 | int err; | 
|---|
| 1282 |  | 
|---|
| 1283 | if (i915_gem_object_is_tiled(obj)) | 
|---|
| 1284 | return ERR_PTR(error: -EINVAL); | 
|---|
| 1285 |  | 
|---|
| 1286 | if (use_cpu_reloc(cache, obj)) | 
|---|
| 1287 | return NULL; | 
|---|
| 1288 |  | 
|---|
| 1289 | err = i915_gem_object_set_to_gtt_domain(obj, write: true); | 
|---|
| 1290 | if (err) | 
|---|
| 1291 | return ERR_PTR(error: err); | 
|---|
| 1292 |  | 
|---|
| 1293 | /* | 
|---|
| 1294 | * i915_gem_object_ggtt_pin_ww may attempt to remove the batch | 
|---|
| 1295 | * VMA from the object list because we no longer pin. | 
|---|
| 1296 | * | 
|---|
| 1297 | * Only attempt to pin the batch buffer to ggtt if the current batch | 
|---|
| 1298 | * is not inside ggtt, or the batch buffer is not misplaced. | 
|---|
| 1299 | */ | 
|---|
| 1300 | if (!i915_is_ggtt(batch->vm) || | 
|---|
| 1301 | !i915_vma_misplaced(vma: batch, size: 0, alignment: 0, PIN_MAPPABLE)) { | 
|---|
| 1302 | vma = i915_gem_object_ggtt_pin_ww(obj, ww: &eb->ww, NULL, size: 0, alignment: 0, | 
|---|
| 1303 | PIN_MAPPABLE | | 
|---|
| 1304 | PIN_NONBLOCK /* NOWARN */ | | 
|---|
| 1305 | PIN_NOEVICT); | 
|---|
| 1306 | } | 
|---|
| 1307 |  | 
|---|
| 1308 | if (vma == ERR_PTR(error: -EDEADLK)) | 
|---|
| 1309 | return vma; | 
|---|
| 1310 |  | 
|---|
| 1311 | if (IS_ERR(ptr: vma)) { | 
|---|
| 1312 | memset(s: &cache->node, c: 0, n: sizeof(cache->node)); | 
|---|
| 1313 | mutex_lock(lock: &ggtt->vm.mutex); | 
|---|
| 1314 | err = drm_mm_insert_node_in_range | 
|---|
| 1315 | (mm: &ggtt->vm.mm, node: &cache->node, | 
|---|
| 1316 | PAGE_SIZE, alignment: 0, I915_COLOR_UNEVICTABLE, | 
|---|
| 1317 | start: 0, end: ggtt->mappable_end, | 
|---|
| 1318 | mode: DRM_MM_INSERT_LOW); | 
|---|
| 1319 | mutex_unlock(lock: &ggtt->vm.mutex); | 
|---|
| 1320 | if (err) /* no inactive aperture space, use cpu reloc */ | 
|---|
| 1321 | return NULL; | 
|---|
| 1322 | } else { | 
|---|
| 1323 | cache->node.start = i915_ggtt_offset(vma); | 
|---|
| 1324 | cache->node.mm = (void *)vma; | 
|---|
| 1325 | } | 
|---|
| 1326 | } | 
|---|
| 1327 |  | 
|---|
| 1328 | offset = cache->node.start; | 
|---|
| 1329 | if (drm_mm_node_allocated(node: &cache->node)) { | 
|---|
| 1330 | ggtt->vm.insert_page(&ggtt->vm, | 
|---|
| 1331 | i915_gem_object_get_dma_address(obj, page), | 
|---|
| 1332 | offset, | 
|---|
| 1333 | i915_gem_get_pat_index(i915: ggtt->vm.i915, | 
|---|
| 1334 | level: I915_CACHE_NONE), | 
|---|
| 1335 | 0); | 
|---|
| 1336 | } else { | 
|---|
| 1337 | offset += page << PAGE_SHIFT; | 
|---|
| 1338 | } | 
|---|
| 1339 |  | 
|---|
| 1340 | vaddr = (void __force *)io_mapping_map_atomic_wc(mapping: &ggtt->iomap, | 
|---|
| 1341 | offset); | 
|---|
| 1342 | cache->page = page; | 
|---|
| 1343 | cache->vaddr = (unsigned long)vaddr; | 
|---|
| 1344 |  | 
|---|
| 1345 | return vaddr; | 
|---|
| 1346 | } | 
|---|
| 1347 |  | 
|---|
| 1348 | static void *reloc_vaddr(struct i915_vma *vma, | 
|---|
| 1349 | struct i915_execbuffer *eb, | 
|---|
| 1350 | unsigned long page) | 
|---|
| 1351 | { | 
|---|
| 1352 | struct reloc_cache *cache = &eb->reloc_cache; | 
|---|
| 1353 | void *vaddr; | 
|---|
| 1354 |  | 
|---|
| 1355 | if (cache->page == page) { | 
|---|
| 1356 | vaddr = unmask_page(p: cache->vaddr); | 
|---|
| 1357 | } else { | 
|---|
| 1358 | vaddr = NULL; | 
|---|
| 1359 | if ((cache->vaddr & KMAP) == 0) | 
|---|
| 1360 | vaddr = reloc_iomap(batch: vma, eb, page); | 
|---|
| 1361 | if (!vaddr) | 
|---|
| 1362 | vaddr = reloc_kmap(obj: vma->obj, cache, pageno: page); | 
|---|
| 1363 | } | 
|---|
| 1364 |  | 
|---|
| 1365 | return vaddr; | 
|---|
| 1366 | } | 
|---|
| 1367 |  | 
|---|
| 1368 | static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) | 
|---|
| 1369 | { | 
|---|
| 1370 | if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) { | 
|---|
| 1371 | if (flushes & CLFLUSH_BEFORE) | 
|---|
| 1372 | drm_clflush_virt_range(addr, length: sizeof(*addr)); | 
|---|
| 1373 |  | 
|---|
| 1374 | *addr = value; | 
|---|
| 1375 |  | 
|---|
| 1376 | /* | 
|---|
| 1377 | * Writes to the same cacheline are serialised by the CPU | 
|---|
| 1378 | * (including clflush). On the write path, we only require | 
|---|
| 1379 | * that it hits memory in an orderly fashion and place | 
|---|
| 1380 | * mb barriers at the start and end of the relocation phase | 
|---|
| 1381 | * to ensure ordering of clflush wrt to the system. | 
|---|
| 1382 | */ | 
|---|
| 1383 | if (flushes & CLFLUSH_AFTER) | 
|---|
| 1384 | drm_clflush_virt_range(addr, length: sizeof(*addr)); | 
|---|
| 1385 | } else { | 
|---|
| 1386 | *addr = value; | 
|---|
| 1387 | } | 
|---|
| 1388 | } | 
|---|
| 1389 |  | 
|---|
| 1390 | static u64 | 
|---|
| 1391 | relocate_entry(struct i915_vma *vma, | 
|---|
| 1392 | const struct drm_i915_gem_relocation_entry *reloc, | 
|---|
| 1393 | struct i915_execbuffer *eb, | 
|---|
| 1394 | const struct i915_vma *target) | 
|---|
| 1395 | { | 
|---|
| 1396 | u64 target_addr = relocation_target(reloc, target); | 
|---|
| 1397 | u64 offset = reloc->offset; | 
|---|
| 1398 | bool wide = eb->reloc_cache.use_64bit_reloc; | 
|---|
| 1399 | void *vaddr; | 
|---|
| 1400 |  | 
|---|
| 1401 | repeat: | 
|---|
| 1402 | vaddr = reloc_vaddr(vma, eb, | 
|---|
| 1403 | page: offset >> PAGE_SHIFT); | 
|---|
| 1404 | if (IS_ERR(ptr: vaddr)) | 
|---|
| 1405 | return PTR_ERR(ptr: vaddr); | 
|---|
| 1406 |  | 
|---|
| 1407 | GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32))); | 
|---|
| 1408 | clflush_write32(addr: vaddr + offset_in_page(offset), | 
|---|
| 1409 | lower_32_bits(target_addr), | 
|---|
| 1410 | flushes: eb->reloc_cache.vaddr); | 
|---|
| 1411 |  | 
|---|
| 1412 | if (wide) { | 
|---|
| 1413 | offset += sizeof(u32); | 
|---|
| 1414 | target_addr >>= 32; | 
|---|
| 1415 | wide = false; | 
|---|
| 1416 | goto repeat; | 
|---|
| 1417 | } | 
|---|
| 1418 |  | 
|---|
| 1419 | return target->node.start | UPDATE; | 
|---|
| 1420 | } | 
|---|
| 1421 |  | 
|---|
| 1422 | static u64 | 
|---|
| 1423 | eb_relocate_entry(struct i915_execbuffer *eb, | 
|---|
| 1424 | struct eb_vma *ev, | 
|---|
| 1425 | const struct drm_i915_gem_relocation_entry *reloc) | 
|---|
| 1426 | { | 
|---|
| 1427 | struct drm_i915_private *i915 = eb->i915; | 
|---|
| 1428 | struct eb_vma *target; | 
|---|
| 1429 | int err; | 
|---|
| 1430 |  | 
|---|
| 1431 | /* we've already hold a reference to all valid objects */ | 
|---|
| 1432 | target = eb_get_vma(eb, handle: reloc->target_handle); | 
|---|
| 1433 | if (unlikely(!target)) | 
|---|
| 1434 | return -ENOENT; | 
|---|
| 1435 |  | 
|---|
| 1436 | /* Validate that the target is in a valid r/w GPU domain */ | 
|---|
| 1437 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { | 
|---|
| 1438 | drm_dbg(&i915->drm, "reloc with multiple write domains: " | 
|---|
| 1439 | "target %d offset %d " | 
|---|
| 1440 | "read %08x write %08x\n", | 
|---|
| 1441 | reloc->target_handle, | 
|---|
| 1442 | (int) reloc->offset, | 
|---|
| 1443 | reloc->read_domains, | 
|---|
| 1444 | reloc->write_domain); | 
|---|
| 1445 | return -EINVAL; | 
|---|
| 1446 | } | 
|---|
| 1447 | if (unlikely((reloc->write_domain | reloc->read_domains) | 
|---|
| 1448 | & ~I915_GEM_GPU_DOMAINS)) { | 
|---|
| 1449 | drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: " | 
|---|
| 1450 | "target %d offset %d " | 
|---|
| 1451 | "read %08x write %08x\n", | 
|---|
| 1452 | reloc->target_handle, | 
|---|
| 1453 | (int) reloc->offset, | 
|---|
| 1454 | reloc->read_domains, | 
|---|
| 1455 | reloc->write_domain); | 
|---|
| 1456 | return -EINVAL; | 
|---|
| 1457 | } | 
|---|
| 1458 |  | 
|---|
| 1459 | if (reloc->write_domain) { | 
|---|
| 1460 | target->flags |= EXEC_OBJECT_WRITE; | 
|---|
| 1461 |  | 
|---|
| 1462 | /* | 
|---|
| 1463 | * Sandybridge PPGTT errata: We need a global gtt mapping | 
|---|
| 1464 | * for MI and pipe_control writes because the gpu doesn't | 
|---|
| 1465 | * properly redirect them through the ppgtt for non_secure | 
|---|
| 1466 | * batchbuffers. | 
|---|
| 1467 | */ | 
|---|
| 1468 | if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && | 
|---|
| 1469 | GRAPHICS_VER(eb->i915) == 6 && | 
|---|
| 1470 | !i915_vma_is_bound(vma: target->vma, I915_VMA_GLOBAL_BIND)) { | 
|---|
| 1471 | struct i915_vma *vma = target->vma; | 
|---|
| 1472 |  | 
|---|
| 1473 | reloc_cache_unmap(cache: &eb->reloc_cache); | 
|---|
| 1474 | mutex_lock(lock: &vma->vm->mutex); | 
|---|
| 1475 | err = i915_vma_bind(vma: target->vma, | 
|---|
| 1476 | pat_index: target->vma->obj->pat_index, | 
|---|
| 1477 | PIN_GLOBAL, NULL, NULL); | 
|---|
| 1478 | mutex_unlock(lock: &vma->vm->mutex); | 
|---|
| 1479 | reloc_cache_remap(cache: &eb->reloc_cache, obj: ev->vma->obj); | 
|---|
| 1480 | if (err) | 
|---|
| 1481 | return err; | 
|---|
| 1482 | } | 
|---|
| 1483 | } | 
|---|
| 1484 |  | 
|---|
| 1485 | /* | 
|---|
| 1486 | * If the relocation already has the right value in it, no | 
|---|
| 1487 | * more work needs to be done. | 
|---|
| 1488 | */ | 
|---|
| 1489 | if (!DBG_FORCE_RELOC && | 
|---|
| 1490 | gen8_canonical_addr(address: i915_vma_offset(vma: target->vma)) == reloc->presumed_offset) | 
|---|
| 1491 | return 0; | 
|---|
| 1492 |  | 
|---|
| 1493 | /* Check that the relocation address is valid... */ | 
|---|
| 1494 | if (unlikely(reloc->offset > | 
|---|
| 1495 | ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { | 
|---|
| 1496 | drm_dbg(&i915->drm, "Relocation beyond object bounds: " | 
|---|
| 1497 | "target %d offset %d size %d.\n", | 
|---|
| 1498 | reloc->target_handle, | 
|---|
| 1499 | (int)reloc->offset, | 
|---|
| 1500 | (int)ev->vma->size); | 
|---|
| 1501 | return -EINVAL; | 
|---|
| 1502 | } | 
|---|
| 1503 | if (unlikely(reloc->offset & 3)) { | 
|---|
| 1504 | drm_dbg(&i915->drm, "Relocation not 4-byte aligned: " | 
|---|
| 1505 | "target %d offset %d.\n", | 
|---|
| 1506 | reloc->target_handle, | 
|---|
| 1507 | (int)reloc->offset); | 
|---|
| 1508 | return -EINVAL; | 
|---|
| 1509 | } | 
|---|
| 1510 |  | 
|---|
| 1511 | /* | 
|---|
| 1512 | * If we write into the object, we need to force the synchronisation | 
|---|
| 1513 | * barrier, either with an asynchronous clflush or if we executed the | 
|---|
| 1514 | * patching using the GPU (though that should be serialised by the | 
|---|
| 1515 | * timeline). To be completely sure, and since we are required to | 
|---|
| 1516 | * do relocations we are already stalling, disable the user's opt | 
|---|
| 1517 | * out of our synchronisation. | 
|---|
| 1518 | */ | 
|---|
| 1519 | ev->flags &= ~EXEC_OBJECT_ASYNC; | 
|---|
| 1520 |  | 
|---|
| 1521 | /* and update the user's relocation entry */ | 
|---|
| 1522 | return relocate_entry(vma: ev->vma, reloc, eb, target: target->vma); | 
|---|
| 1523 | } | 
|---|
| 1524 |  | 
|---|
| 1525 | static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) | 
|---|
| 1526 | { | 
|---|
| 1527 | #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) | 
|---|
| 1528 | struct drm_i915_gem_relocation_entry stack[N_RELOC(512)]; | 
|---|
| 1529 | const struct drm_i915_gem_exec_object2 *entry = ev->exec; | 
|---|
| 1530 | struct drm_i915_gem_relocation_entry __user *urelocs = | 
|---|
| 1531 | u64_to_user_ptr(entry->relocs_ptr); | 
|---|
| 1532 | unsigned long remain = entry->relocation_count; | 
|---|
| 1533 |  | 
|---|
| 1534 | if (unlikely(remain > N_RELOC(INT_MAX))) | 
|---|
| 1535 | return -EINVAL; | 
|---|
| 1536 |  | 
|---|
| 1537 | /* | 
|---|
| 1538 | * We must check that the entire relocation array is safe | 
|---|
| 1539 | * to read. However, if the array is not writable the user loses | 
|---|
| 1540 | * the updated relocation values. | 
|---|
| 1541 | */ | 
|---|
| 1542 | if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs)))) | 
|---|
| 1543 | return -EFAULT; | 
|---|
| 1544 |  | 
|---|
| 1545 | do { | 
|---|
| 1546 | struct drm_i915_gem_relocation_entry *r = stack; | 
|---|
| 1547 | unsigned int count = | 
|---|
| 1548 | min_t(unsigned long, remain, ARRAY_SIZE(stack)); | 
|---|
| 1549 | unsigned int copied; | 
|---|
| 1550 |  | 
|---|
| 1551 | /* | 
|---|
| 1552 | * This is the fast path and we cannot handle a pagefault | 
|---|
| 1553 | * whilst holding the struct mutex lest the user pass in the | 
|---|
| 1554 | * relocations contained within a mmaped bo. For in such a case | 
|---|
| 1555 | * we, the page fault handler would call i915_gem_fault() and | 
|---|
| 1556 | * we would try to acquire the struct mutex again. Obviously | 
|---|
| 1557 | * this is bad and so lockdep complains vehemently. | 
|---|
| 1558 | */ | 
|---|
| 1559 | pagefault_disable(); | 
|---|
| 1560 | copied = __copy_from_user_inatomic(to: r, from: urelocs, n: count * sizeof(r[0])); | 
|---|
| 1561 | pagefault_enable(); | 
|---|
| 1562 | if (unlikely(copied)) { | 
|---|
| 1563 | remain = -EFAULT; | 
|---|
| 1564 | goto out; | 
|---|
| 1565 | } | 
|---|
| 1566 |  | 
|---|
| 1567 | remain -= count; | 
|---|
| 1568 | do { | 
|---|
| 1569 | u64 offset = eb_relocate_entry(eb, ev, reloc: r); | 
|---|
| 1570 |  | 
|---|
| 1571 | if (likely(offset == 0)) | 
|---|
| 1572 | continue; | 
|---|
| 1573 |  | 
|---|
| 1574 | if ((s64)offset < 0) { | 
|---|
| 1575 | remain = (int)offset; | 
|---|
| 1576 | goto out; | 
|---|
| 1577 | } | 
|---|
| 1578 | /* | 
|---|
| 1579 | * Note that reporting an error now | 
|---|
| 1580 | * leaves everything in an inconsistent | 
|---|
| 1581 | * state as we have *already* changed | 
|---|
| 1582 | * the relocation value inside the | 
|---|
| 1583 | * object. As we have not changed the | 
|---|
| 1584 | * reloc.presumed_offset or will not | 
|---|
| 1585 | * change the execobject.offset, on the | 
|---|
| 1586 | * call we may not rewrite the value | 
|---|
| 1587 | * inside the object, leaving it | 
|---|
| 1588 | * dangling and causing a GPU hang. Unless | 
|---|
| 1589 | * userspace dynamically rebuilds the | 
|---|
| 1590 | * relocations on each execbuf rather than | 
|---|
| 1591 | * presume a static tree. | 
|---|
| 1592 | * | 
|---|
| 1593 | * We did previously check if the relocations | 
|---|
| 1594 | * were writable (access_ok), an error now | 
|---|
| 1595 | * would be a strange race with mprotect, | 
|---|
| 1596 | * having already demonstrated that we | 
|---|
| 1597 | * can read from this userspace address. | 
|---|
| 1598 | */ | 
|---|
| 1599 | offset = gen8_canonical_addr(address: offset & ~UPDATE); | 
|---|
| 1600 | __put_user(offset, &urelocs[r - stack].presumed_offset); | 
|---|
| 1601 | } while (r++, --count); | 
|---|
| 1602 | urelocs += ARRAY_SIZE(stack); | 
|---|
| 1603 | } while (remain); | 
|---|
| 1604 | out: | 
|---|
| 1605 | reloc_cache_reset(cache: &eb->reloc_cache, eb); | 
|---|
| 1606 | return remain; | 
|---|
| 1607 | } | 
|---|
| 1608 |  | 
|---|
| 1609 | static int | 
|---|
| 1610 | eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev) | 
|---|
| 1611 | { | 
|---|
| 1612 | const struct drm_i915_gem_exec_object2 *entry = ev->exec; | 
|---|
| 1613 | struct drm_i915_gem_relocation_entry *relocs = | 
|---|
| 1614 | u64_to_ptr(typeof(*relocs), entry->relocs_ptr); | 
|---|
| 1615 | unsigned int i; | 
|---|
| 1616 | int err; | 
|---|
| 1617 |  | 
|---|
| 1618 | for (i = 0; i < entry->relocation_count; i++) { | 
|---|
| 1619 | u64 offset = eb_relocate_entry(eb, ev, reloc: &relocs[i]); | 
|---|
| 1620 |  | 
|---|
| 1621 | if ((s64)offset < 0) { | 
|---|
| 1622 | err = (int)offset; | 
|---|
| 1623 | goto err; | 
|---|
| 1624 | } | 
|---|
| 1625 | } | 
|---|
| 1626 | err = 0; | 
|---|
| 1627 | err: | 
|---|
| 1628 | reloc_cache_reset(cache: &eb->reloc_cache, eb); | 
|---|
| 1629 | return err; | 
|---|
| 1630 | } | 
|---|
| 1631 |  | 
|---|
| 1632 | static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) | 
|---|
| 1633 | { | 
|---|
| 1634 | const char __user *addr, *end; | 
|---|
| 1635 | unsigned long size; | 
|---|
| 1636 | char __maybe_unused c; | 
|---|
| 1637 |  | 
|---|
| 1638 | size = entry->relocation_count; | 
|---|
| 1639 | if (size == 0) | 
|---|
| 1640 | return 0; | 
|---|
| 1641 |  | 
|---|
| 1642 | if (size > N_RELOC(INT_MAX)) | 
|---|
| 1643 | return -EINVAL; | 
|---|
| 1644 |  | 
|---|
| 1645 | addr = u64_to_user_ptr(entry->relocs_ptr); | 
|---|
| 1646 | size *= sizeof(struct drm_i915_gem_relocation_entry); | 
|---|
| 1647 | if (!access_ok(addr, size)) | 
|---|
| 1648 | return -EFAULT; | 
|---|
| 1649 |  | 
|---|
| 1650 | end = addr + size; | 
|---|
| 1651 | for (; addr < end; addr += PAGE_SIZE) { | 
|---|
| 1652 | int err = __get_user(c, addr); | 
|---|
| 1653 | if (err) | 
|---|
| 1654 | return err; | 
|---|
| 1655 | } | 
|---|
| 1656 | return __get_user(c, end - 1); | 
|---|
| 1657 | } | 
|---|
| 1658 |  | 
|---|
| 1659 | static int eb_copy_relocations(const struct i915_execbuffer *eb) | 
|---|
| 1660 | { | 
|---|
| 1661 | struct drm_i915_gem_relocation_entry *relocs; | 
|---|
| 1662 | const unsigned int count = eb->buffer_count; | 
|---|
| 1663 | unsigned int i; | 
|---|
| 1664 | int err; | 
|---|
| 1665 |  | 
|---|
| 1666 | for (i = 0; i < count; i++) { | 
|---|
| 1667 | const unsigned int nreloc = eb->exec[i].relocation_count; | 
|---|
| 1668 | struct drm_i915_gem_relocation_entry __user *urelocs; | 
|---|
| 1669 | unsigned long size; | 
|---|
| 1670 | unsigned long copied; | 
|---|
| 1671 |  | 
|---|
| 1672 | if (nreloc == 0) | 
|---|
| 1673 | continue; | 
|---|
| 1674 |  | 
|---|
| 1675 | err = check_relocations(entry: &eb->exec[i]); | 
|---|
| 1676 | if (err) | 
|---|
| 1677 | goto err; | 
|---|
| 1678 |  | 
|---|
| 1679 | urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); | 
|---|
| 1680 | size = nreloc * sizeof(*relocs); | 
|---|
| 1681 |  | 
|---|
| 1682 | relocs = kvmalloc_array(1, size, GFP_KERNEL); | 
|---|
| 1683 | if (!relocs) { | 
|---|
| 1684 | err = -ENOMEM; | 
|---|
| 1685 | goto err; | 
|---|
| 1686 | } | 
|---|
| 1687 |  | 
|---|
| 1688 | /* copy_from_user is limited to < 4GiB */ | 
|---|
| 1689 | copied = 0; | 
|---|
| 1690 | do { | 
|---|
| 1691 | unsigned int len = | 
|---|
| 1692 | min_t(u64, BIT_ULL(31), size - copied); | 
|---|
| 1693 |  | 
|---|
| 1694 | if (__copy_from_user(to: (char *)relocs + copied, | 
|---|
| 1695 | from: (char __user *)urelocs + copied, | 
|---|
| 1696 | n: len)) | 
|---|
| 1697 | goto end; | 
|---|
| 1698 |  | 
|---|
| 1699 | copied += len; | 
|---|
| 1700 | } while (copied < size); | 
|---|
| 1701 |  | 
|---|
| 1702 | /* | 
|---|
| 1703 | * As we do not update the known relocation offsets after | 
|---|
| 1704 | * relocating (due to the complexities in lock handling), | 
|---|
| 1705 | * we need to mark them as invalid now so that we force the | 
|---|
| 1706 | * relocation processing next time. Just in case the target | 
|---|
| 1707 | * object is evicted and then rebound into its old | 
|---|
| 1708 | * presumed_offset before the next execbuffer - if that | 
|---|
| 1709 | * happened we would make the mistake of assuming that the | 
|---|
| 1710 | * relocations were valid. | 
|---|
| 1711 | */ | 
|---|
| 1712 | if (!user_access_begin(urelocs, size)) | 
|---|
| 1713 | goto end; | 
|---|
| 1714 |  | 
|---|
| 1715 | for (copied = 0; copied < nreloc; copied++) | 
|---|
| 1716 | unsafe_put_user(-1, | 
|---|
| 1717 | &urelocs[copied].presumed_offset, | 
|---|
| 1718 | end_user); | 
|---|
| 1719 | user_access_end(); | 
|---|
| 1720 |  | 
|---|
| 1721 | eb->exec[i].relocs_ptr = (uintptr_t)relocs; | 
|---|
| 1722 | } | 
|---|
| 1723 |  | 
|---|
| 1724 | return 0; | 
|---|
| 1725 |  | 
|---|
| 1726 | end_user: | 
|---|
| 1727 | user_access_end(); | 
|---|
| 1728 | end: | 
|---|
| 1729 | kvfree(addr: relocs); | 
|---|
| 1730 | err = -EFAULT; | 
|---|
| 1731 | err: | 
|---|
| 1732 | while (i--) { | 
|---|
| 1733 | relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); | 
|---|
| 1734 | if (eb->exec[i].relocation_count) | 
|---|
| 1735 | kvfree(addr: relocs); | 
|---|
| 1736 | } | 
|---|
| 1737 | return err; | 
|---|
| 1738 | } | 
|---|
| 1739 |  | 
|---|
| 1740 | static int eb_prefault_relocations(const struct i915_execbuffer *eb) | 
|---|
| 1741 | { | 
|---|
| 1742 | const unsigned int count = eb->buffer_count; | 
|---|
| 1743 | unsigned int i; | 
|---|
| 1744 |  | 
|---|
| 1745 | for (i = 0; i < count; i++) { | 
|---|
| 1746 | int err; | 
|---|
| 1747 |  | 
|---|
| 1748 | err = check_relocations(entry: &eb->exec[i]); | 
|---|
| 1749 | if (err) | 
|---|
| 1750 | return err; | 
|---|
| 1751 | } | 
|---|
| 1752 |  | 
|---|
| 1753 | return 0; | 
|---|
| 1754 | } | 
|---|
| 1755 |  | 
|---|
| 1756 | static int eb_reinit_userptr(struct i915_execbuffer *eb) | 
|---|
| 1757 | { | 
|---|
| 1758 | const unsigned int count = eb->buffer_count; | 
|---|
| 1759 | unsigned int i; | 
|---|
| 1760 | int ret; | 
|---|
| 1761 |  | 
|---|
| 1762 | if (likely(!(eb->args->flags & __EXEC_USERPTR_USED))) | 
|---|
| 1763 | return 0; | 
|---|
| 1764 |  | 
|---|
| 1765 | for (i = 0; i < count; i++) { | 
|---|
| 1766 | struct eb_vma *ev = &eb->vma[i]; | 
|---|
| 1767 |  | 
|---|
| 1768 | if (!i915_gem_object_is_userptr(obj: ev->vma->obj)) | 
|---|
| 1769 | continue; | 
|---|
| 1770 |  | 
|---|
| 1771 | ret = i915_gem_object_userptr_submit_init(obj: ev->vma->obj); | 
|---|
| 1772 | if (ret) | 
|---|
| 1773 | return ret; | 
|---|
| 1774 |  | 
|---|
| 1775 | ev->flags |= __EXEC_OBJECT_USERPTR_INIT; | 
|---|
| 1776 | } | 
|---|
| 1777 |  | 
|---|
| 1778 | return 0; | 
|---|
| 1779 | } | 
|---|
| 1780 |  | 
|---|
| 1781 | static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb) | 
|---|
| 1782 | { | 
|---|
| 1783 | bool have_copy = false; | 
|---|
| 1784 | struct eb_vma *ev; | 
|---|
| 1785 | int err = 0; | 
|---|
| 1786 |  | 
|---|
| 1787 | repeat: | 
|---|
| 1788 | if (signal_pending(current)) { | 
|---|
| 1789 | err = -ERESTARTSYS; | 
|---|
| 1790 | goto out; | 
|---|
| 1791 | } | 
|---|
| 1792 |  | 
|---|
| 1793 | /* We may process another execbuffer during the unlock... */ | 
|---|
| 1794 | eb_release_vmas(eb, final: false); | 
|---|
| 1795 | i915_gem_ww_ctx_fini(ctx: &eb->ww); | 
|---|
| 1796 |  | 
|---|
| 1797 | /* | 
|---|
| 1798 | * We take 3 passes through the slowpatch. | 
|---|
| 1799 | * | 
|---|
| 1800 | * 1 - we try to just prefault all the user relocation entries and | 
|---|
| 1801 | * then attempt to reuse the atomic pagefault disabled fast path again. | 
|---|
| 1802 | * | 
|---|
| 1803 | * 2 - we copy the user entries to a local buffer here outside of the | 
|---|
| 1804 | * local and allow ourselves to wait upon any rendering before | 
|---|
| 1805 | * relocations | 
|---|
| 1806 | * | 
|---|
| 1807 | * 3 - we already have a local copy of the relocation entries, but | 
|---|
| 1808 | * were interrupted (EAGAIN) whilst waiting for the objects, try again. | 
|---|
| 1809 | */ | 
|---|
| 1810 | if (!err) { | 
|---|
| 1811 | err = eb_prefault_relocations(eb); | 
|---|
| 1812 | } else if (!have_copy) { | 
|---|
| 1813 | err = eb_copy_relocations(eb); | 
|---|
| 1814 | have_copy = err == 0; | 
|---|
| 1815 | } else { | 
|---|
| 1816 | cond_resched(); | 
|---|
| 1817 | err = 0; | 
|---|
| 1818 | } | 
|---|
| 1819 |  | 
|---|
| 1820 | if (!err) | 
|---|
| 1821 | err = eb_reinit_userptr(eb); | 
|---|
| 1822 |  | 
|---|
| 1823 | i915_gem_ww_ctx_init(ctx: &eb->ww, intr: true); | 
|---|
| 1824 | if (err) | 
|---|
| 1825 | goto out; | 
|---|
| 1826 |  | 
|---|
| 1827 | /* reacquire the objects */ | 
|---|
| 1828 | repeat_validate: | 
|---|
| 1829 | err = eb_pin_engine(eb, throttle: false); | 
|---|
| 1830 | if (err) | 
|---|
| 1831 | goto err; | 
|---|
| 1832 |  | 
|---|
| 1833 | err = eb_validate_vmas(eb); | 
|---|
| 1834 | if (err) | 
|---|
| 1835 | goto err; | 
|---|
| 1836 |  | 
|---|
| 1837 | GEM_BUG_ON(!eb->batches[0]); | 
|---|
| 1838 |  | 
|---|
| 1839 | list_for_each_entry(ev, &eb->relocs, reloc_link) { | 
|---|
| 1840 | if (!have_copy) { | 
|---|
| 1841 | err = eb_relocate_vma(eb, ev); | 
|---|
| 1842 | if (err) | 
|---|
| 1843 | break; | 
|---|
| 1844 | } else { | 
|---|
| 1845 | err = eb_relocate_vma_slow(eb, ev); | 
|---|
| 1846 | if (err) | 
|---|
| 1847 | break; | 
|---|
| 1848 | } | 
|---|
| 1849 | } | 
|---|
| 1850 |  | 
|---|
| 1851 | if (err == -EDEADLK) | 
|---|
| 1852 | goto err; | 
|---|
| 1853 |  | 
|---|
| 1854 | if (err && !have_copy) | 
|---|
| 1855 | goto repeat; | 
|---|
| 1856 |  | 
|---|
| 1857 | if (err) | 
|---|
| 1858 | goto err; | 
|---|
| 1859 |  | 
|---|
| 1860 | /* as last step, parse the command buffer */ | 
|---|
| 1861 | err = eb_parse(eb); | 
|---|
| 1862 | if (err) | 
|---|
| 1863 | goto err; | 
|---|
| 1864 |  | 
|---|
| 1865 | /* | 
|---|
| 1866 | * Leave the user relocations as are, this is the painfully slow path, | 
|---|
| 1867 | * and we want to avoid the complication of dropping the lock whilst | 
|---|
| 1868 | * having buffers reserved in the aperture and so causing spurious | 
|---|
| 1869 | * ENOSPC for random operations. | 
|---|
| 1870 | */ | 
|---|
| 1871 |  | 
|---|
| 1872 | err: | 
|---|
| 1873 | if (err == -EDEADLK) { | 
|---|
| 1874 | eb_release_vmas(eb, final: false); | 
|---|
| 1875 | err = i915_gem_ww_ctx_backoff(ctx: &eb->ww); | 
|---|
| 1876 | if (!err) | 
|---|
| 1877 | goto repeat_validate; | 
|---|
| 1878 | } | 
|---|
| 1879 |  | 
|---|
| 1880 | if (err == -EAGAIN) | 
|---|
| 1881 | goto repeat; | 
|---|
| 1882 |  | 
|---|
| 1883 | out: | 
|---|
| 1884 | if (have_copy) { | 
|---|
| 1885 | const unsigned int count = eb->buffer_count; | 
|---|
| 1886 | unsigned int i; | 
|---|
| 1887 |  | 
|---|
| 1888 | for (i = 0; i < count; i++) { | 
|---|
| 1889 | const struct drm_i915_gem_exec_object2 *entry = | 
|---|
| 1890 | &eb->exec[i]; | 
|---|
| 1891 | struct drm_i915_gem_relocation_entry *relocs; | 
|---|
| 1892 |  | 
|---|
| 1893 | if (!entry->relocation_count) | 
|---|
| 1894 | continue; | 
|---|
| 1895 |  | 
|---|
| 1896 | relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr); | 
|---|
| 1897 | kvfree(addr: relocs); | 
|---|
| 1898 | } | 
|---|
| 1899 | } | 
|---|
| 1900 |  | 
|---|
| 1901 | return err; | 
|---|
| 1902 | } | 
|---|
| 1903 |  | 
|---|
| 1904 | static int eb_relocate_parse(struct i915_execbuffer *eb) | 
|---|
| 1905 | { | 
|---|
| 1906 | int err; | 
|---|
| 1907 | bool throttle = true; | 
|---|
| 1908 |  | 
|---|
| 1909 | retry: | 
|---|
| 1910 | err = eb_pin_engine(eb, throttle); | 
|---|
| 1911 | if (err) { | 
|---|
| 1912 | if (err != -EDEADLK) | 
|---|
| 1913 | return err; | 
|---|
| 1914 |  | 
|---|
| 1915 | goto err; | 
|---|
| 1916 | } | 
|---|
| 1917 |  | 
|---|
| 1918 | /* only throttle once, even if we didn't need to throttle */ | 
|---|
| 1919 | throttle = false; | 
|---|
| 1920 |  | 
|---|
| 1921 | err = eb_validate_vmas(eb); | 
|---|
| 1922 | if (err == -EAGAIN) | 
|---|
| 1923 | goto slow; | 
|---|
| 1924 | else if (err) | 
|---|
| 1925 | goto err; | 
|---|
| 1926 |  | 
|---|
| 1927 | /* The objects are in their final locations, apply the relocations. */ | 
|---|
| 1928 | if (eb->args->flags & __EXEC_HAS_RELOC) { | 
|---|
| 1929 | struct eb_vma *ev; | 
|---|
| 1930 |  | 
|---|
| 1931 | list_for_each_entry(ev, &eb->relocs, reloc_link) { | 
|---|
| 1932 | err = eb_relocate_vma(eb, ev); | 
|---|
| 1933 | if (err) | 
|---|
| 1934 | break; | 
|---|
| 1935 | } | 
|---|
| 1936 |  | 
|---|
| 1937 | if (err == -EDEADLK) | 
|---|
| 1938 | goto err; | 
|---|
| 1939 | else if (err) | 
|---|
| 1940 | goto slow; | 
|---|
| 1941 | } | 
|---|
| 1942 |  | 
|---|
| 1943 | if (!err) | 
|---|
| 1944 | err = eb_parse(eb); | 
|---|
| 1945 |  | 
|---|
| 1946 | err: | 
|---|
| 1947 | if (err == -EDEADLK) { | 
|---|
| 1948 | eb_release_vmas(eb, final: false); | 
|---|
| 1949 | err = i915_gem_ww_ctx_backoff(ctx: &eb->ww); | 
|---|
| 1950 | if (!err) | 
|---|
| 1951 | goto retry; | 
|---|
| 1952 | } | 
|---|
| 1953 |  | 
|---|
| 1954 | return err; | 
|---|
| 1955 |  | 
|---|
| 1956 | slow: | 
|---|
| 1957 | err = eb_relocate_parse_slow(eb); | 
|---|
| 1958 | if (err) | 
|---|
| 1959 | /* | 
|---|
| 1960 | * If the user expects the execobject.offset and | 
|---|
| 1961 | * reloc.presumed_offset to be an exact match, | 
|---|
| 1962 | * as for using NO_RELOC, then we cannot update | 
|---|
| 1963 | * the execobject.offset until we have completed | 
|---|
| 1964 | * relocation. | 
|---|
| 1965 | */ | 
|---|
| 1966 | eb->args->flags &= ~__EXEC_HAS_RELOC; | 
|---|
| 1967 |  | 
|---|
| 1968 | return err; | 
|---|
| 1969 | } | 
|---|
| 1970 |  | 
|---|
| 1971 | /* | 
|---|
| 1972 | * Using two helper loops for the order of which requests / batches are created | 
|---|
| 1973 | * and added the to backend. Requests are created in order from the parent to | 
|---|
| 1974 | * the last child. Requests are added in the reverse order, from the last child | 
|---|
| 1975 | * to parent. This is done for locking reasons as the timeline lock is acquired | 
|---|
| 1976 | * during request creation and released when the request is added to the | 
|---|
| 1977 | * backend. To make lockdep happy (see intel_context_timeline_lock) this must be | 
|---|
| 1978 | * the ordering. | 
|---|
| 1979 | */ | 
|---|
| 1980 | #define for_each_batch_create_order(_eb, _i) \ | 
|---|
| 1981 | for ((_i) = 0; (_i) < (_eb)->num_batches; ++(_i)) | 
|---|
| 1982 | #define for_each_batch_add_order(_eb, _i) \ | 
|---|
| 1983 | BUILD_BUG_ON(!typecheck(int, _i)); \ | 
|---|
| 1984 | for ((_i) = (_eb)->num_batches - 1; (_i) >= 0; --(_i)) | 
|---|
| 1985 |  | 
|---|
| 1986 | static struct i915_request * | 
|---|
| 1987 | eb_find_first_request_added(struct i915_execbuffer *eb) | 
|---|
| 1988 | { | 
|---|
| 1989 | int i; | 
|---|
| 1990 |  | 
|---|
| 1991 | for_each_batch_add_order(eb, i) | 
|---|
| 1992 | if (eb->requests[i]) | 
|---|
| 1993 | return eb->requests[i]; | 
|---|
| 1994 |  | 
|---|
| 1995 | GEM_BUG_ON( "Request not found"); | 
|---|
| 1996 |  | 
|---|
| 1997 | return NULL; | 
|---|
| 1998 | } | 
|---|
| 1999 |  | 
|---|
| 2000 | #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) | 
|---|
| 2001 |  | 
|---|
| 2002 | /* Stage with GFP_KERNEL allocations before we enter the signaling critical path */ | 
|---|
| 2003 | static int eb_capture_stage(struct i915_execbuffer *eb) | 
|---|
| 2004 | { | 
|---|
| 2005 | const unsigned int count = eb->buffer_count; | 
|---|
| 2006 | unsigned int i = count, j; | 
|---|
| 2007 |  | 
|---|
| 2008 | while (i--) { | 
|---|
| 2009 | struct eb_vma *ev = &eb->vma[i]; | 
|---|
| 2010 | struct i915_vma *vma = ev->vma; | 
|---|
| 2011 | unsigned int flags = ev->flags; | 
|---|
| 2012 |  | 
|---|
| 2013 | if (!(flags & EXEC_OBJECT_CAPTURE)) | 
|---|
| 2014 | continue; | 
|---|
| 2015 |  | 
|---|
| 2016 | if (i915_gem_context_is_recoverable(ctx: eb->gem_context) && | 
|---|
| 2017 | (IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0))) | 
|---|
| 2018 | return -EINVAL; | 
|---|
| 2019 |  | 
|---|
| 2020 | for_each_batch_create_order(eb, j) { | 
|---|
| 2021 | struct i915_capture_list *capture; | 
|---|
| 2022 |  | 
|---|
| 2023 | capture = kmalloc(sizeof(*capture), GFP_KERNEL); | 
|---|
| 2024 | if (!capture) | 
|---|
| 2025 | continue; | 
|---|
| 2026 |  | 
|---|
| 2027 | capture->next = eb->capture_lists[j]; | 
|---|
| 2028 | capture->vma_res = i915_vma_resource_get(vma_res: vma->resource); | 
|---|
| 2029 | eb->capture_lists[j] = capture; | 
|---|
| 2030 | } | 
|---|
| 2031 | } | 
|---|
| 2032 |  | 
|---|
| 2033 | return 0; | 
|---|
| 2034 | } | 
|---|
| 2035 |  | 
|---|
| 2036 | /* Commit once we're in the critical path */ | 
|---|
| 2037 | static void eb_capture_commit(struct i915_execbuffer *eb) | 
|---|
| 2038 | { | 
|---|
| 2039 | unsigned int j; | 
|---|
| 2040 |  | 
|---|
| 2041 | for_each_batch_create_order(eb, j) { | 
|---|
| 2042 | struct i915_request *rq = eb->requests[j]; | 
|---|
| 2043 |  | 
|---|
| 2044 | if (!rq) | 
|---|
| 2045 | break; | 
|---|
| 2046 |  | 
|---|
| 2047 | rq->capture_list = eb->capture_lists[j]; | 
|---|
| 2048 | eb->capture_lists[j] = NULL; | 
|---|
| 2049 | } | 
|---|
| 2050 | } | 
|---|
| 2051 |  | 
|---|
| 2052 | /* | 
|---|
| 2053 | * Release anything that didn't get committed due to errors. | 
|---|
| 2054 | * The capture_list will otherwise be freed at request retire. | 
|---|
| 2055 | */ | 
|---|
| 2056 | static void eb_capture_release(struct i915_execbuffer *eb) | 
|---|
| 2057 | { | 
|---|
| 2058 | unsigned int j; | 
|---|
| 2059 |  | 
|---|
| 2060 | for_each_batch_create_order(eb, j) { | 
|---|
| 2061 | if (eb->capture_lists[j]) { | 
|---|
| 2062 | i915_request_free_capture_list(capture: eb->capture_lists[j]); | 
|---|
| 2063 | eb->capture_lists[j] = NULL; | 
|---|
| 2064 | } | 
|---|
| 2065 | } | 
|---|
| 2066 | } | 
|---|
| 2067 |  | 
|---|
| 2068 | static void eb_capture_list_clear(struct i915_execbuffer *eb) | 
|---|
| 2069 | { | 
|---|
| 2070 | memset(s: eb->capture_lists, c: 0, n: sizeof(eb->capture_lists)); | 
|---|
| 2071 | } | 
|---|
| 2072 |  | 
|---|
| 2073 | #else | 
|---|
| 2074 |  | 
|---|
| 2075 | static int eb_capture_stage(struct i915_execbuffer *eb) | 
|---|
| 2076 | { | 
|---|
| 2077 | return 0; | 
|---|
| 2078 | } | 
|---|
| 2079 |  | 
|---|
| 2080 | static void eb_capture_commit(struct i915_execbuffer *eb) | 
|---|
| 2081 | { | 
|---|
| 2082 | } | 
|---|
| 2083 |  | 
|---|
| 2084 | static void eb_capture_release(struct i915_execbuffer *eb) | 
|---|
| 2085 | { | 
|---|
| 2086 | } | 
|---|
| 2087 |  | 
|---|
| 2088 | static void eb_capture_list_clear(struct i915_execbuffer *eb) | 
|---|
| 2089 | { | 
|---|
| 2090 | } | 
|---|
| 2091 |  | 
|---|
| 2092 | #endif | 
|---|
| 2093 |  | 
|---|
| 2094 | static int eb_move_to_gpu(struct i915_execbuffer *eb) | 
|---|
| 2095 | { | 
|---|
| 2096 | const unsigned int count = eb->buffer_count; | 
|---|
| 2097 | unsigned int i = count; | 
|---|
| 2098 | int err = 0, j; | 
|---|
| 2099 |  | 
|---|
| 2100 | while (i--) { | 
|---|
| 2101 | struct eb_vma *ev = &eb->vma[i]; | 
|---|
| 2102 | struct i915_vma *vma = ev->vma; | 
|---|
| 2103 | unsigned int flags = ev->flags; | 
|---|
| 2104 | struct drm_i915_gem_object *obj = vma->obj; | 
|---|
| 2105 |  | 
|---|
| 2106 | assert_vma_held(vma); | 
|---|
| 2107 |  | 
|---|
| 2108 | /* | 
|---|
| 2109 | * If the GPU is not _reading_ through the CPU cache, we need | 
|---|
| 2110 | * to make sure that any writes (both previous GPU writes from | 
|---|
| 2111 | * before a change in snooping levels and normal CPU writes) | 
|---|
| 2112 | * caught in that cache are flushed to main memory. | 
|---|
| 2113 | * | 
|---|
| 2114 | * We want to say | 
|---|
| 2115 | *   obj->cache_dirty && | 
|---|
| 2116 | *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) | 
|---|
| 2117 | * but gcc's optimiser doesn't handle that as well and emits | 
|---|
| 2118 | * two jumps instead of one. Maybe one day... | 
|---|
| 2119 | * | 
|---|
| 2120 | * FIXME: There is also sync flushing in set_pages(), which | 
|---|
| 2121 | * serves a different purpose(some of the time at least). | 
|---|
| 2122 | * | 
|---|
| 2123 | * We should consider: | 
|---|
| 2124 | * | 
|---|
| 2125 | *   1. Rip out the async flush code. | 
|---|
| 2126 | * | 
|---|
| 2127 | *   2. Or make the sync flushing use the async clflush path | 
|---|
| 2128 | *   using mandatory fences underneath. Currently the below | 
|---|
| 2129 | *   async flush happens after we bind the object. | 
|---|
| 2130 | */ | 
|---|
| 2131 | if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) { | 
|---|
| 2132 | if (i915_gem_clflush_object(obj, flags: 0)) | 
|---|
| 2133 | flags &= ~EXEC_OBJECT_ASYNC; | 
|---|
| 2134 | } | 
|---|
| 2135 |  | 
|---|
| 2136 | /* We only need to await on the first request */ | 
|---|
| 2137 | if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) { | 
|---|
| 2138 | err = i915_request_await_object | 
|---|
| 2139 | (to: eb_find_first_request_added(eb), obj, | 
|---|
| 2140 | write: flags & EXEC_OBJECT_WRITE); | 
|---|
| 2141 | } | 
|---|
| 2142 |  | 
|---|
| 2143 | for_each_batch_add_order(eb, j) { | 
|---|
| 2144 | if (err) | 
|---|
| 2145 | break; | 
|---|
| 2146 | if (!eb->requests[j]) | 
|---|
| 2147 | continue; | 
|---|
| 2148 |  | 
|---|
| 2149 | err = _i915_vma_move_to_active(vma, rq: eb->requests[j], | 
|---|
| 2150 | fence: j ? NULL : | 
|---|
| 2151 | eb->composite_fence ? | 
|---|
| 2152 | eb->composite_fence : | 
|---|
| 2153 | &eb->requests[j]->fence, | 
|---|
| 2154 | flags: flags | __EXEC_OBJECT_NO_RESERVE | | 
|---|
| 2155 | __EXEC_OBJECT_NO_REQUEST_AWAIT); | 
|---|
| 2156 | } | 
|---|
| 2157 | } | 
|---|
| 2158 |  | 
|---|
| 2159 | #ifdef CONFIG_MMU_NOTIFIER | 
|---|
| 2160 | if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) { | 
|---|
| 2161 | for (i = 0; i < count; i++) { | 
|---|
| 2162 | struct eb_vma *ev = &eb->vma[i]; | 
|---|
| 2163 | struct drm_i915_gem_object *obj = ev->vma->obj; | 
|---|
| 2164 |  | 
|---|
| 2165 | if (!i915_gem_object_is_userptr(obj)) | 
|---|
| 2166 | continue; | 
|---|
| 2167 |  | 
|---|
| 2168 | err = i915_gem_object_userptr_submit_done(obj); | 
|---|
| 2169 | if (err) | 
|---|
| 2170 | break; | 
|---|
| 2171 | } | 
|---|
| 2172 | } | 
|---|
| 2173 | #endif | 
|---|
| 2174 |  | 
|---|
| 2175 | if (unlikely(err)) | 
|---|
| 2176 | goto err_skip; | 
|---|
| 2177 |  | 
|---|
| 2178 | /* Unconditionally flush any chipset caches (for streaming writes). */ | 
|---|
| 2179 | intel_gt_chipset_flush(gt: eb->gt); | 
|---|
| 2180 | eb_capture_commit(eb); | 
|---|
| 2181 |  | 
|---|
| 2182 | return 0; | 
|---|
| 2183 |  | 
|---|
| 2184 | err_skip: | 
|---|
| 2185 | for_each_batch_create_order(eb, j) { | 
|---|
| 2186 | if (!eb->requests[j]) | 
|---|
| 2187 | break; | 
|---|
| 2188 |  | 
|---|
| 2189 | i915_request_set_error_once(rq: eb->requests[j], error: err); | 
|---|
| 2190 | } | 
|---|
| 2191 | return err; | 
|---|
| 2192 | } | 
|---|
| 2193 |  | 
|---|
| 2194 | static int i915_gem_check_execbuffer(struct drm_i915_private *i915, | 
|---|
| 2195 | struct drm_i915_gem_execbuffer2 *exec) | 
|---|
| 2196 | { | 
|---|
| 2197 | if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS) | 
|---|
| 2198 | return -EINVAL; | 
|---|
| 2199 |  | 
|---|
| 2200 | /* Kernel clipping was a DRI1 misfeature */ | 
|---|
| 2201 | if (!(exec->flags & (I915_EXEC_FENCE_ARRAY | | 
|---|
| 2202 | I915_EXEC_USE_EXTENSIONS))) { | 
|---|
| 2203 | if (exec->num_cliprects || exec->cliprects_ptr) | 
|---|
| 2204 | return -EINVAL; | 
|---|
| 2205 | } | 
|---|
| 2206 |  | 
|---|
| 2207 | if (exec->DR4 == 0xffffffff) { | 
|---|
| 2208 | drm_dbg(&i915->drm, "UXA submitting garbage DR4, fixing up\n"); | 
|---|
| 2209 | exec->DR4 = 0; | 
|---|
| 2210 | } | 
|---|
| 2211 | if (exec->DR1 || exec->DR4) | 
|---|
| 2212 | return -EINVAL; | 
|---|
| 2213 |  | 
|---|
| 2214 | if ((exec->batch_start_offset | exec->batch_len) & 0x7) | 
|---|
| 2215 | return -EINVAL; | 
|---|
| 2216 |  | 
|---|
| 2217 | return 0; | 
|---|
| 2218 | } | 
|---|
| 2219 |  | 
|---|
| 2220 | static int i915_reset_gen7_sol_offsets(struct i915_request *rq) | 
|---|
| 2221 | { | 
|---|
| 2222 | u32 *cs; | 
|---|
| 2223 | int i; | 
|---|
| 2224 |  | 
|---|
| 2225 | if (GRAPHICS_VER(rq->i915) != 7 || rq->engine->id != RCS0) { | 
|---|
| 2226 | drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n"); | 
|---|
| 2227 | return -EINVAL; | 
|---|
| 2228 | } | 
|---|
| 2229 |  | 
|---|
| 2230 | cs = intel_ring_begin(rq, num_dwords: 4 * 2 + 2); | 
|---|
| 2231 | if (IS_ERR(ptr: cs)) | 
|---|
| 2232 | return PTR_ERR(ptr: cs); | 
|---|
| 2233 |  | 
|---|
| 2234 | *cs++ = MI_LOAD_REGISTER_IMM(4); | 
|---|
| 2235 | for (i = 0; i < 4; i++) { | 
|---|
| 2236 | *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i)); | 
|---|
| 2237 | *cs++ = 0; | 
|---|
| 2238 | } | 
|---|
| 2239 | *cs++ = MI_NOOP; | 
|---|
| 2240 | intel_ring_advance(rq, cs); | 
|---|
| 2241 |  | 
|---|
| 2242 | return 0; | 
|---|
| 2243 | } | 
|---|
| 2244 |  | 
|---|
| 2245 | static struct i915_vma * | 
|---|
| 2246 | shadow_batch_pin(struct i915_execbuffer *eb, | 
|---|
| 2247 | struct drm_i915_gem_object *obj, | 
|---|
| 2248 | struct i915_address_space *vm, | 
|---|
| 2249 | unsigned int flags) | 
|---|
| 2250 | { | 
|---|
| 2251 | struct i915_vma *vma; | 
|---|
| 2252 | int err; | 
|---|
| 2253 |  | 
|---|
| 2254 | vma = i915_vma_instance(obj, vm, NULL); | 
|---|
| 2255 | if (IS_ERR(ptr: vma)) | 
|---|
| 2256 | return vma; | 
|---|
| 2257 |  | 
|---|
| 2258 | err = i915_vma_pin_ww(vma, ww: &eb->ww, size: 0, alignment: 0, flags: flags | PIN_VALIDATE); | 
|---|
| 2259 | if (err) | 
|---|
| 2260 | return ERR_PTR(error: err); | 
|---|
| 2261 |  | 
|---|
| 2262 | return vma; | 
|---|
| 2263 | } | 
|---|
| 2264 |  | 
|---|
| 2265 | static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma) | 
|---|
| 2266 | { | 
|---|
| 2267 | /* | 
|---|
| 2268 | * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure | 
|---|
| 2269 | * batch" bit. Hence we need to pin secure batches into the global gtt. | 
|---|
| 2270 | * hsw should have this fixed, but bdw mucks it up again. */ | 
|---|
| 2271 | if (eb->batch_flags & I915_DISPATCH_SECURE) | 
|---|
| 2272 | return i915_gem_object_ggtt_pin_ww(obj: vma->obj, ww: &eb->ww, NULL, size: 0, alignment: 0, PIN_VALIDATE); | 
|---|
| 2273 |  | 
|---|
| 2274 | return NULL; | 
|---|
| 2275 | } | 
|---|
| 2276 |  | 
|---|
| 2277 | static int eb_parse(struct i915_execbuffer *eb) | 
|---|
| 2278 | { | 
|---|
| 2279 | struct drm_i915_private *i915 = eb->i915; | 
|---|
| 2280 | struct intel_gt_buffer_pool_node *pool = eb->batch_pool; | 
|---|
| 2281 | struct i915_vma *shadow, *trampoline, *batch; | 
|---|
| 2282 | unsigned long len; | 
|---|
| 2283 | int err; | 
|---|
| 2284 |  | 
|---|
| 2285 | if (!eb_use_cmdparser(eb)) { | 
|---|
| 2286 | batch = eb_dispatch_secure(eb, vma: eb->batches[0]->vma); | 
|---|
| 2287 | if (IS_ERR(ptr: batch)) | 
|---|
| 2288 | return PTR_ERR(ptr: batch); | 
|---|
| 2289 |  | 
|---|
| 2290 | goto secure_batch; | 
|---|
| 2291 | } | 
|---|
| 2292 |  | 
|---|
| 2293 | if (intel_context_is_parallel(ce: eb->context)) | 
|---|
| 2294 | return -EINVAL; | 
|---|
| 2295 |  | 
|---|
| 2296 | len = eb->batch_len[0]; | 
|---|
| 2297 | if (!CMDPARSER_USES_GGTT(eb->i915)) { | 
|---|
| 2298 | /* | 
|---|
| 2299 | * ppGTT backed shadow buffers must be mapped RO, to prevent | 
|---|
| 2300 | * post-scan tampering | 
|---|
| 2301 | */ | 
|---|
| 2302 | if (!eb->context->vm->has_read_only) { | 
|---|
| 2303 | drm_dbg(&i915->drm, | 
|---|
| 2304 | "Cannot prevent post-scan tampering without RO capable vm\n"); | 
|---|
| 2305 | return -EINVAL; | 
|---|
| 2306 | } | 
|---|
| 2307 | } else { | 
|---|
| 2308 | len += I915_CMD_PARSER_TRAMPOLINE_SIZE; | 
|---|
| 2309 | } | 
|---|
| 2310 | if (unlikely(len < eb->batch_len[0])) /* last paranoid check of overflow */ | 
|---|
| 2311 | return -EINVAL; | 
|---|
| 2312 |  | 
|---|
| 2313 | if (!pool) { | 
|---|
| 2314 | pool = intel_gt_get_buffer_pool(gt: eb->gt, size: len, | 
|---|
| 2315 | type: I915_MAP_WB); | 
|---|
| 2316 | if (IS_ERR(ptr: pool)) | 
|---|
| 2317 | return PTR_ERR(ptr: pool); | 
|---|
| 2318 | eb->batch_pool = pool; | 
|---|
| 2319 | } | 
|---|
| 2320 |  | 
|---|
| 2321 | err = i915_gem_object_lock(obj: pool->obj, ww: &eb->ww); | 
|---|
| 2322 | if (err) | 
|---|
| 2323 | return err; | 
|---|
| 2324 |  | 
|---|
| 2325 | shadow = shadow_batch_pin(eb, obj: pool->obj, vm: eb->context->vm, PIN_USER); | 
|---|
| 2326 | if (IS_ERR(ptr: shadow)) | 
|---|
| 2327 | return PTR_ERR(ptr: shadow); | 
|---|
| 2328 |  | 
|---|
| 2329 | intel_gt_buffer_pool_mark_used(node: pool); | 
|---|
| 2330 | i915_gem_object_set_readonly(obj: shadow->obj); | 
|---|
| 2331 | shadow->private = pool; | 
|---|
| 2332 |  | 
|---|
| 2333 | trampoline = NULL; | 
|---|
| 2334 | if (CMDPARSER_USES_GGTT(eb->i915)) { | 
|---|
| 2335 | trampoline = shadow; | 
|---|
| 2336 |  | 
|---|
| 2337 | shadow = shadow_batch_pin(eb, obj: pool->obj, | 
|---|
| 2338 | vm: &eb->gt->ggtt->vm, | 
|---|
| 2339 | PIN_GLOBAL); | 
|---|
| 2340 | if (IS_ERR(ptr: shadow)) | 
|---|
| 2341 | return PTR_ERR(ptr: shadow); | 
|---|
| 2342 |  | 
|---|
| 2343 | shadow->private = pool; | 
|---|
| 2344 |  | 
|---|
| 2345 | eb->batch_flags |= I915_DISPATCH_SECURE; | 
|---|
| 2346 | } | 
|---|
| 2347 |  | 
|---|
| 2348 | batch = eb_dispatch_secure(eb, vma: shadow); | 
|---|
| 2349 | if (IS_ERR(ptr: batch)) | 
|---|
| 2350 | return PTR_ERR(ptr: batch); | 
|---|
| 2351 |  | 
|---|
| 2352 | err = dma_resv_reserve_fences(obj: shadow->obj->base.resv, num_fences: 1); | 
|---|
| 2353 | if (err) | 
|---|
| 2354 | return err; | 
|---|
| 2355 |  | 
|---|
| 2356 | err = intel_engine_cmd_parser(engine: eb->context->engine, | 
|---|
| 2357 | batch: eb->batches[0]->vma, | 
|---|
| 2358 | batch_offset: eb->batch_start_offset, | 
|---|
| 2359 | batch_length: eb->batch_len[0], | 
|---|
| 2360 | shadow, trampoline); | 
|---|
| 2361 | if (err) | 
|---|
| 2362 | return err; | 
|---|
| 2363 |  | 
|---|
| 2364 | eb->batches[0] = &eb->vma[eb->buffer_count++]; | 
|---|
| 2365 | eb->batches[0]->vma = i915_vma_get(vma: shadow); | 
|---|
| 2366 | eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN; | 
|---|
| 2367 |  | 
|---|
| 2368 | eb->trampoline = trampoline; | 
|---|
| 2369 | eb->batch_start_offset = 0; | 
|---|
| 2370 |  | 
|---|
| 2371 | secure_batch: | 
|---|
| 2372 | if (batch) { | 
|---|
| 2373 | if (intel_context_is_parallel(ce: eb->context)) | 
|---|
| 2374 | return -EINVAL; | 
|---|
| 2375 |  | 
|---|
| 2376 | eb->batches[0] = &eb->vma[eb->buffer_count++]; | 
|---|
| 2377 | eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN; | 
|---|
| 2378 | eb->batches[0]->vma = i915_vma_get(vma: batch); | 
|---|
| 2379 | } | 
|---|
| 2380 | return 0; | 
|---|
| 2381 | } | 
|---|
| 2382 |  | 
|---|
| 2383 | static int eb_request_submit(struct i915_execbuffer *eb, | 
|---|
| 2384 | struct i915_request *rq, | 
|---|
| 2385 | struct i915_vma *batch, | 
|---|
| 2386 | u64 batch_len) | 
|---|
| 2387 | { | 
|---|
| 2388 | int err; | 
|---|
| 2389 |  | 
|---|
| 2390 | if (intel_context_nopreempt(ce: rq->context)) | 
|---|
| 2391 | __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); | 
|---|
| 2392 |  | 
|---|
| 2393 | if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { | 
|---|
| 2394 | err = i915_reset_gen7_sol_offsets(rq); | 
|---|
| 2395 | if (err) | 
|---|
| 2396 | return err; | 
|---|
| 2397 | } | 
|---|
| 2398 |  | 
|---|
| 2399 | /* | 
|---|
| 2400 | * After we completed waiting for other engines (using HW semaphores) | 
|---|
| 2401 | * then we can signal that this request/batch is ready to run. This | 
|---|
| 2402 | * allows us to determine if the batch is still waiting on the GPU | 
|---|
| 2403 | * or actually running by checking the breadcrumb. | 
|---|
| 2404 | */ | 
|---|
| 2405 | if (rq->context->engine->emit_init_breadcrumb) { | 
|---|
| 2406 | err = rq->context->engine->emit_init_breadcrumb(rq); | 
|---|
| 2407 | if (err) | 
|---|
| 2408 | return err; | 
|---|
| 2409 | } | 
|---|
| 2410 |  | 
|---|
| 2411 | err = rq->context->engine->emit_bb_start(rq, | 
|---|
| 2412 | i915_vma_offset(vma: batch) + | 
|---|
| 2413 | eb->batch_start_offset, | 
|---|
| 2414 | batch_len, | 
|---|
| 2415 | eb->batch_flags); | 
|---|
| 2416 | if (err) | 
|---|
| 2417 | return err; | 
|---|
| 2418 |  | 
|---|
| 2419 | if (eb->trampoline) { | 
|---|
| 2420 | GEM_BUG_ON(intel_context_is_parallel(rq->context)); | 
|---|
| 2421 | GEM_BUG_ON(eb->batch_start_offset); | 
|---|
| 2422 | err = rq->context->engine->emit_bb_start(rq, | 
|---|
| 2423 | i915_vma_offset(vma: eb->trampoline) + | 
|---|
| 2424 | batch_len, 0, 0); | 
|---|
| 2425 | if (err) | 
|---|
| 2426 | return err; | 
|---|
| 2427 | } | 
|---|
| 2428 |  | 
|---|
| 2429 | return 0; | 
|---|
| 2430 | } | 
|---|
| 2431 |  | 
|---|
| 2432 | static int eb_submit(struct i915_execbuffer *eb) | 
|---|
| 2433 | { | 
|---|
| 2434 | unsigned int i; | 
|---|
| 2435 | int err; | 
|---|
| 2436 |  | 
|---|
| 2437 | err = eb_move_to_gpu(eb); | 
|---|
| 2438 |  | 
|---|
| 2439 | for_each_batch_create_order(eb, i) { | 
|---|
| 2440 | if (!eb->requests[i]) | 
|---|
| 2441 | break; | 
|---|
| 2442 |  | 
|---|
| 2443 | trace_i915_request_queue(rq: eb->requests[i], flags: eb->batch_flags); | 
|---|
| 2444 | if (!err) | 
|---|
| 2445 | err = eb_request_submit(eb, rq: eb->requests[i], | 
|---|
| 2446 | batch: eb->batches[i]->vma, | 
|---|
| 2447 | batch_len: eb->batch_len[i]); | 
|---|
| 2448 | } | 
|---|
| 2449 |  | 
|---|
| 2450 | return err; | 
|---|
| 2451 | } | 
|---|
| 2452 |  | 
|---|
| 2453 | /* | 
|---|
| 2454 | * Find one BSD ring to dispatch the corresponding BSD command. | 
|---|
| 2455 | * The engine index is returned. | 
|---|
| 2456 | */ | 
|---|
| 2457 | static unsigned int | 
|---|
| 2458 | gen8_dispatch_bsd_engine(struct drm_i915_private *i915, | 
|---|
| 2459 | struct drm_file *file) | 
|---|
| 2460 | { | 
|---|
| 2461 | struct drm_i915_file_private *file_priv = file->driver_priv; | 
|---|
| 2462 |  | 
|---|
| 2463 | /* Check whether the file_priv has already selected one ring. */ | 
|---|
| 2464 | if ((int)file_priv->bsd_engine < 0) | 
|---|
| 2465 | file_priv->bsd_engine = | 
|---|
| 2466 | get_random_u32_below(ceil: i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]); | 
|---|
| 2467 |  | 
|---|
| 2468 | return file_priv->bsd_engine; | 
|---|
| 2469 | } | 
|---|
| 2470 |  | 
|---|
| 2471 | static const enum intel_engine_id user_ring_map[] = { | 
|---|
| 2472 | [I915_EXEC_DEFAULT]	= RCS0, | 
|---|
| 2473 | [I915_EXEC_RENDER]	= RCS0, | 
|---|
| 2474 | [I915_EXEC_BLT]		= BCS0, | 
|---|
| 2475 | [I915_EXEC_BSD]		= VCS0, | 
|---|
| 2476 | [I915_EXEC_VEBOX]	= VECS0 | 
|---|
| 2477 | }; | 
|---|
| 2478 |  | 
|---|
| 2479 | static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel_context *ce) | 
|---|
| 2480 | { | 
|---|
| 2481 | struct intel_ring *ring = ce->ring; | 
|---|
| 2482 | struct intel_timeline *tl = ce->timeline; | 
|---|
| 2483 | struct i915_request *rq; | 
|---|
| 2484 |  | 
|---|
| 2485 | /* | 
|---|
| 2486 | * Completely unscientific finger-in-the-air estimates for suitable | 
|---|
| 2487 | * maximum user request size (to avoid blocking) and then backoff. | 
|---|
| 2488 | */ | 
|---|
| 2489 | if (intel_ring_update_space(ring) >= PAGE_SIZE) | 
|---|
| 2490 | return NULL; | 
|---|
| 2491 |  | 
|---|
| 2492 | /* | 
|---|
| 2493 | * Find a request that after waiting upon, there will be at least half | 
|---|
| 2494 | * the ring available. The hysteresis allows us to compete for the | 
|---|
| 2495 | * shared ring and should mean that we sleep less often prior to | 
|---|
| 2496 | * claiming our resources, but not so long that the ring completely | 
|---|
| 2497 | * drains before we can submit our next request. | 
|---|
| 2498 | */ | 
|---|
| 2499 | list_for_each_entry(rq, &tl->requests, link) { | 
|---|
| 2500 | if (rq->ring != ring) | 
|---|
| 2501 | continue; | 
|---|
| 2502 |  | 
|---|
| 2503 | if (__intel_ring_space(head: rq->postfix, | 
|---|
| 2504 | tail: ring->emit, size: ring->size) > ring->size / 2) | 
|---|
| 2505 | break; | 
|---|
| 2506 | } | 
|---|
| 2507 | if (&rq->link == &tl->requests) | 
|---|
| 2508 | return NULL; /* weird, we will check again later for real */ | 
|---|
| 2509 |  | 
|---|
| 2510 | return i915_request_get(rq); | 
|---|
| 2511 | } | 
|---|
| 2512 |  | 
|---|
| 2513 | static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce, | 
|---|
| 2514 | bool throttle) | 
|---|
| 2515 | { | 
|---|
| 2516 | struct intel_timeline *tl; | 
|---|
| 2517 | struct i915_request *rq = NULL; | 
|---|
| 2518 |  | 
|---|
| 2519 | /* | 
|---|
| 2520 | * Take a local wakeref for preparing to dispatch the execbuf as | 
|---|
| 2521 | * we expect to access the hardware fairly frequently in the | 
|---|
| 2522 | * process, and require the engine to be kept awake between accesses. | 
|---|
| 2523 | * Upon dispatch, we acquire another prolonged wakeref that we hold | 
|---|
| 2524 | * until the timeline is idle, which in turn releases the wakeref | 
|---|
| 2525 | * taken on the engine, and the parent device. | 
|---|
| 2526 | */ | 
|---|
| 2527 | tl = intel_context_timeline_lock(ce); | 
|---|
| 2528 | if (IS_ERR(ptr: tl)) | 
|---|
| 2529 | return PTR_ERR(ptr: tl); | 
|---|
| 2530 |  | 
|---|
| 2531 | intel_context_enter(ce); | 
|---|
| 2532 | if (throttle) | 
|---|
| 2533 | rq = eb_throttle(eb, ce); | 
|---|
| 2534 | intel_context_timeline_unlock(tl); | 
|---|
| 2535 |  | 
|---|
| 2536 | if (rq) { | 
|---|
| 2537 | bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; | 
|---|
| 2538 | long timeout = nonblock ? 0 : MAX_SCHEDULE_TIMEOUT; | 
|---|
| 2539 |  | 
|---|
| 2540 | if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, | 
|---|
| 2541 | timeout) < 0) { | 
|---|
| 2542 | i915_request_put(rq); | 
|---|
| 2543 |  | 
|---|
| 2544 | /* | 
|---|
| 2545 | * Error path, cannot use intel_context_timeline_lock as | 
|---|
| 2546 | * that is user interruptible and this clean up step | 
|---|
| 2547 | * must be done. | 
|---|
| 2548 | */ | 
|---|
| 2549 | mutex_lock(lock: &ce->timeline->mutex); | 
|---|
| 2550 | intel_context_exit(ce); | 
|---|
| 2551 | mutex_unlock(lock: &ce->timeline->mutex); | 
|---|
| 2552 |  | 
|---|
| 2553 | if (nonblock) | 
|---|
| 2554 | return -EWOULDBLOCK; | 
|---|
| 2555 | else | 
|---|
| 2556 | return -EINTR; | 
|---|
| 2557 | } | 
|---|
| 2558 | i915_request_put(rq); | 
|---|
| 2559 | } | 
|---|
| 2560 |  | 
|---|
| 2561 | return 0; | 
|---|
| 2562 | } | 
|---|
| 2563 |  | 
|---|
| 2564 | static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle) | 
|---|
| 2565 | { | 
|---|
| 2566 | struct intel_context *ce = eb->context, *child; | 
|---|
| 2567 | int err; | 
|---|
| 2568 | int i = 0, j = 0; | 
|---|
| 2569 |  | 
|---|
| 2570 | GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED); | 
|---|
| 2571 |  | 
|---|
| 2572 | if (unlikely(intel_context_is_banned(ce))) | 
|---|
| 2573 | return -EIO; | 
|---|
| 2574 |  | 
|---|
| 2575 | /* | 
|---|
| 2576 | * Pinning the contexts may generate requests in order to acquire | 
|---|
| 2577 | * GGTT space, so do this first before we reserve a seqno for | 
|---|
| 2578 | * ourselves. | 
|---|
| 2579 | */ | 
|---|
| 2580 | err = intel_context_pin_ww(ce, ww: &eb->ww); | 
|---|
| 2581 | if (err) | 
|---|
| 2582 | return err; | 
|---|
| 2583 | for_each_child(ce, child) { | 
|---|
| 2584 | err = intel_context_pin_ww(ce: child, ww: &eb->ww); | 
|---|
| 2585 | GEM_BUG_ON(err);	/* perma-pinned should incr a counter */ | 
|---|
| 2586 | } | 
|---|
| 2587 |  | 
|---|
| 2588 | for_each_child(ce, child) { | 
|---|
| 2589 | err = eb_pin_timeline(eb, ce: child, throttle); | 
|---|
| 2590 | if (err) | 
|---|
| 2591 | goto unwind; | 
|---|
| 2592 | ++i; | 
|---|
| 2593 | } | 
|---|
| 2594 | err = eb_pin_timeline(eb, ce, throttle); | 
|---|
| 2595 | if (err) | 
|---|
| 2596 | goto unwind; | 
|---|
| 2597 |  | 
|---|
| 2598 | eb->args->flags |= __EXEC_ENGINE_PINNED; | 
|---|
| 2599 | return 0; | 
|---|
| 2600 |  | 
|---|
| 2601 | unwind: | 
|---|
| 2602 | for_each_child(ce, child) { | 
|---|
| 2603 | if (j++ < i) { | 
|---|
| 2604 | mutex_lock(lock: &child->timeline->mutex); | 
|---|
| 2605 | intel_context_exit(ce: child); | 
|---|
| 2606 | mutex_unlock(lock: &child->timeline->mutex); | 
|---|
| 2607 | } | 
|---|
| 2608 | } | 
|---|
| 2609 | for_each_child(ce, child) | 
|---|
| 2610 | intel_context_unpin(ce: child); | 
|---|
| 2611 | intel_context_unpin(ce); | 
|---|
| 2612 | return err; | 
|---|
| 2613 | } | 
|---|
| 2614 |  | 
|---|
| 2615 | static void eb_unpin_engine(struct i915_execbuffer *eb) | 
|---|
| 2616 | { | 
|---|
| 2617 | struct intel_context *ce = eb->context, *child; | 
|---|
| 2618 |  | 
|---|
| 2619 | if (!(eb->args->flags & __EXEC_ENGINE_PINNED)) | 
|---|
| 2620 | return; | 
|---|
| 2621 |  | 
|---|
| 2622 | eb->args->flags &= ~__EXEC_ENGINE_PINNED; | 
|---|
| 2623 |  | 
|---|
| 2624 | for_each_child(ce, child) { | 
|---|
| 2625 | mutex_lock(lock: &child->timeline->mutex); | 
|---|
| 2626 | intel_context_exit(ce: child); | 
|---|
| 2627 | mutex_unlock(lock: &child->timeline->mutex); | 
|---|
| 2628 |  | 
|---|
| 2629 | intel_context_unpin(ce: child); | 
|---|
| 2630 | } | 
|---|
| 2631 |  | 
|---|
| 2632 | mutex_lock(lock: &ce->timeline->mutex); | 
|---|
| 2633 | intel_context_exit(ce); | 
|---|
| 2634 | mutex_unlock(lock: &ce->timeline->mutex); | 
|---|
| 2635 |  | 
|---|
| 2636 | intel_context_unpin(ce); | 
|---|
| 2637 | } | 
|---|
| 2638 |  | 
|---|
| 2639 | static unsigned int | 
|---|
| 2640 | eb_select_legacy_ring(struct i915_execbuffer *eb) | 
|---|
| 2641 | { | 
|---|
| 2642 | struct drm_i915_private *i915 = eb->i915; | 
|---|
| 2643 | struct drm_i915_gem_execbuffer2 *args = eb->args; | 
|---|
| 2644 | unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; | 
|---|
| 2645 |  | 
|---|
| 2646 | if (user_ring_id != I915_EXEC_BSD && | 
|---|
| 2647 | (args->flags & I915_EXEC_BSD_MASK)) { | 
|---|
| 2648 | drm_dbg(&i915->drm, | 
|---|
| 2649 | "execbuf with non bsd ring but with invalid " | 
|---|
| 2650 | "bsd dispatch flags: %d\n", (int)(args->flags)); | 
|---|
| 2651 | return -1; | 
|---|
| 2652 | } | 
|---|
| 2653 |  | 
|---|
| 2654 | if (user_ring_id == I915_EXEC_BSD && | 
|---|
| 2655 | i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO] > 1) { | 
|---|
| 2656 | unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; | 
|---|
| 2657 |  | 
|---|
| 2658 | if (bsd_idx == I915_EXEC_BSD_DEFAULT) { | 
|---|
| 2659 | bsd_idx = gen8_dispatch_bsd_engine(i915, file: eb->file); | 
|---|
| 2660 | } else if (bsd_idx >= I915_EXEC_BSD_RING1 && | 
|---|
| 2661 | bsd_idx <= I915_EXEC_BSD_RING2) { | 
|---|
| 2662 | bsd_idx >>= I915_EXEC_BSD_SHIFT; | 
|---|
| 2663 | bsd_idx--; | 
|---|
| 2664 | } else { | 
|---|
| 2665 | drm_dbg(&i915->drm, | 
|---|
| 2666 | "execbuf with unknown bsd ring: %u\n", | 
|---|
| 2667 | bsd_idx); | 
|---|
| 2668 | return -1; | 
|---|
| 2669 | } | 
|---|
| 2670 |  | 
|---|
| 2671 | return _VCS(bsd_idx); | 
|---|
| 2672 | } | 
|---|
| 2673 |  | 
|---|
| 2674 | if (user_ring_id >= ARRAY_SIZE(user_ring_map)) { | 
|---|
| 2675 | drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n", | 
|---|
| 2676 | user_ring_id); | 
|---|
| 2677 | return -1; | 
|---|
| 2678 | } | 
|---|
| 2679 |  | 
|---|
| 2680 | return user_ring_map[user_ring_id]; | 
|---|
| 2681 | } | 
|---|
| 2682 |  | 
|---|
| 2683 | static int | 
|---|
| 2684 | eb_select_engine(struct i915_execbuffer *eb) | 
|---|
| 2685 | { | 
|---|
| 2686 | struct intel_context *ce, *child; | 
|---|
| 2687 | struct intel_gt *gt; | 
|---|
| 2688 | unsigned int idx; | 
|---|
| 2689 | int err; | 
|---|
| 2690 |  | 
|---|
| 2691 | if (i915_gem_context_user_engines(ctx: eb->gem_context)) | 
|---|
| 2692 | idx = eb->args->flags & I915_EXEC_RING_MASK; | 
|---|
| 2693 | else | 
|---|
| 2694 | idx = eb_select_legacy_ring(eb); | 
|---|
| 2695 |  | 
|---|
| 2696 | ce = i915_gem_context_get_engine(ctx: eb->gem_context, idx); | 
|---|
| 2697 | if (IS_ERR(ptr: ce)) | 
|---|
| 2698 | return PTR_ERR(ptr: ce); | 
|---|
| 2699 |  | 
|---|
| 2700 | if (intel_context_is_parallel(ce)) { | 
|---|
| 2701 | if (eb->buffer_count < ce->parallel.number_children + 1) { | 
|---|
| 2702 | intel_context_put(ce); | 
|---|
| 2703 | return -EINVAL; | 
|---|
| 2704 | } | 
|---|
| 2705 | if (eb->batch_start_offset || eb->args->batch_len) { | 
|---|
| 2706 | intel_context_put(ce); | 
|---|
| 2707 | return -EINVAL; | 
|---|
| 2708 | } | 
|---|
| 2709 | } | 
|---|
| 2710 | eb->num_batches = ce->parallel.number_children + 1; | 
|---|
| 2711 | gt = ce->engine->gt; | 
|---|
| 2712 |  | 
|---|
| 2713 | for_each_child(ce, child) | 
|---|
| 2714 | intel_context_get(ce: child); | 
|---|
| 2715 | eb->wakeref = intel_gt_pm_get(gt: ce->engine->gt); | 
|---|
| 2716 | /* | 
|---|
| 2717 | * Keep GT0 active on MTL so that i915_vma_parked() doesn't | 
|---|
| 2718 | * free VMAs while execbuf ioctl is validating VMAs. | 
|---|
| 2719 | */ | 
|---|
| 2720 | if (gt->info.id) | 
|---|
| 2721 | eb->wakeref_gt0 = intel_gt_pm_get(gt: to_gt(i915: gt->i915)); | 
|---|
| 2722 |  | 
|---|
| 2723 | if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { | 
|---|
| 2724 | err = intel_context_alloc_state(ce); | 
|---|
| 2725 | if (err) | 
|---|
| 2726 | goto err; | 
|---|
| 2727 | } | 
|---|
| 2728 | for_each_child(ce, child) { | 
|---|
| 2729 | if (!test_bit(CONTEXT_ALLOC_BIT, &child->flags)) { | 
|---|
| 2730 | err = intel_context_alloc_state(ce: child); | 
|---|
| 2731 | if (err) | 
|---|
| 2732 | goto err; | 
|---|
| 2733 | } | 
|---|
| 2734 | } | 
|---|
| 2735 |  | 
|---|
| 2736 | /* | 
|---|
| 2737 | * ABI: Before userspace accesses the GPU (e.g. execbuffer), report | 
|---|
| 2738 | * EIO if the GPU is already wedged. | 
|---|
| 2739 | */ | 
|---|
| 2740 | err = intel_gt_terminally_wedged(gt: ce->engine->gt); | 
|---|
| 2741 | if (err) | 
|---|
| 2742 | goto err; | 
|---|
| 2743 |  | 
|---|
| 2744 | if (!i915_vm_tryget(vm: ce->vm)) { | 
|---|
| 2745 | err = -ENOENT; | 
|---|
| 2746 | goto err; | 
|---|
| 2747 | } | 
|---|
| 2748 |  | 
|---|
| 2749 | eb->context = ce; | 
|---|
| 2750 | eb->gt = ce->engine->gt; | 
|---|
| 2751 |  | 
|---|
| 2752 | /* | 
|---|
| 2753 | * Make sure engine pool stays alive even if we call intel_context_put | 
|---|
| 2754 | * during ww handling. The pool is destroyed when last pm reference | 
|---|
| 2755 | * is dropped, which breaks our -EDEADLK handling. | 
|---|
| 2756 | */ | 
|---|
| 2757 | return err; | 
|---|
| 2758 |  | 
|---|
| 2759 | err: | 
|---|
| 2760 | if (gt->info.id) | 
|---|
| 2761 | intel_gt_pm_put(gt: to_gt(i915: gt->i915), handle: eb->wakeref_gt0); | 
|---|
| 2762 |  | 
|---|
| 2763 | intel_gt_pm_put(gt: ce->engine->gt, handle: eb->wakeref); | 
|---|
| 2764 | for_each_child(ce, child) | 
|---|
| 2765 | intel_context_put(ce: child); | 
|---|
| 2766 | intel_context_put(ce); | 
|---|
| 2767 | return err; | 
|---|
| 2768 | } | 
|---|
| 2769 |  | 
|---|
| 2770 | static void | 
|---|
| 2771 | eb_put_engine(struct i915_execbuffer *eb) | 
|---|
| 2772 | { | 
|---|
| 2773 | struct intel_context *child; | 
|---|
| 2774 |  | 
|---|
| 2775 | i915_vm_put(vm: eb->context->vm); | 
|---|
| 2776 | /* | 
|---|
| 2777 | * This works in conjunction with eb_select_engine() to prevent | 
|---|
| 2778 | * i915_vma_parked() from interfering while execbuf validates vmas. | 
|---|
| 2779 | */ | 
|---|
| 2780 | if (eb->gt->info.id) | 
|---|
| 2781 | intel_gt_pm_put(gt: to_gt(i915: eb->gt->i915), handle: eb->wakeref_gt0); | 
|---|
| 2782 | intel_gt_pm_put(gt: eb->context->engine->gt, handle: eb->wakeref); | 
|---|
| 2783 | for_each_child(eb->context, child) | 
|---|
| 2784 | intel_context_put(ce: child); | 
|---|
| 2785 | intel_context_put(ce: eb->context); | 
|---|
| 2786 | } | 
|---|
| 2787 |  | 
|---|
| 2788 | static void | 
|---|
| 2789 | __free_fence_array(struct eb_fence *fences, unsigned int n) | 
|---|
| 2790 | { | 
|---|
| 2791 | while (n--) { | 
|---|
| 2792 | drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); | 
|---|
| 2793 | dma_fence_put(fence: fences[n].dma_fence); | 
|---|
| 2794 | dma_fence_chain_free(chain: fences[n].chain_fence); | 
|---|
| 2795 | } | 
|---|
| 2796 | kvfree(addr: fences); | 
|---|
| 2797 | } | 
|---|
| 2798 |  | 
|---|
| 2799 | static int | 
|---|
| 2800 | add_timeline_fence_array(struct i915_execbuffer *eb, | 
|---|
| 2801 | const struct drm_i915_gem_execbuffer_ext_timeline_fences *timeline_fences) | 
|---|
| 2802 | { | 
|---|
| 2803 | struct drm_i915_gem_exec_fence __user *user_fences; | 
|---|
| 2804 | u64 __user *user_values; | 
|---|
| 2805 | struct eb_fence *f; | 
|---|
| 2806 | u64 nfences; | 
|---|
| 2807 | int err = 0; | 
|---|
| 2808 |  | 
|---|
| 2809 | nfences = timeline_fences->fence_count; | 
|---|
| 2810 | if (!nfences) | 
|---|
| 2811 | return 0; | 
|---|
| 2812 |  | 
|---|
| 2813 | /* Check multiplication overflow for access_ok() and kvmalloc_array() */ | 
|---|
| 2814 | BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); | 
|---|
| 2815 | if (nfences > min_t(unsigned long, | 
|---|
| 2816 | ULONG_MAX / sizeof(*user_fences), | 
|---|
| 2817 | SIZE_MAX / sizeof(*f)) - eb->num_fences) | 
|---|
| 2818 | return -EINVAL; | 
|---|
| 2819 |  | 
|---|
| 2820 | user_fences = u64_to_user_ptr(timeline_fences->handles_ptr); | 
|---|
| 2821 | if (!access_ok(user_fences, nfences * sizeof(*user_fences))) | 
|---|
| 2822 | return -EFAULT; | 
|---|
| 2823 |  | 
|---|
| 2824 | user_values = u64_to_user_ptr(timeline_fences->values_ptr); | 
|---|
| 2825 | if (!access_ok(user_values, nfences * sizeof(*user_values))) | 
|---|
| 2826 | return -EFAULT; | 
|---|
| 2827 |  | 
|---|
| 2828 | f = krealloc(eb->fences, | 
|---|
| 2829 | (eb->num_fences + nfences) * sizeof(*f), | 
|---|
| 2830 | __GFP_NOWARN | GFP_KERNEL); | 
|---|
| 2831 | if (!f) | 
|---|
| 2832 | return -ENOMEM; | 
|---|
| 2833 |  | 
|---|
| 2834 | eb->fences = f; | 
|---|
| 2835 | f += eb->num_fences; | 
|---|
| 2836 |  | 
|---|
| 2837 | BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & | 
|---|
| 2838 | ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); | 
|---|
| 2839 |  | 
|---|
| 2840 | while (nfences--) { | 
|---|
| 2841 | struct drm_i915_gem_exec_fence user_fence; | 
|---|
| 2842 | struct drm_syncobj *syncobj; | 
|---|
| 2843 | struct dma_fence *fence = NULL; | 
|---|
| 2844 | u64 point; | 
|---|
| 2845 |  | 
|---|
| 2846 | if (__copy_from_user(to: &user_fence, | 
|---|
| 2847 | from: user_fences++, | 
|---|
| 2848 | n: sizeof(user_fence))) | 
|---|
| 2849 | return -EFAULT; | 
|---|
| 2850 |  | 
|---|
| 2851 | if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) | 
|---|
| 2852 | return -EINVAL; | 
|---|
| 2853 |  | 
|---|
| 2854 | if (__get_user(point, user_values++)) | 
|---|
| 2855 | return -EFAULT; | 
|---|
| 2856 |  | 
|---|
| 2857 | syncobj = drm_syncobj_find(file_private: eb->file, handle: user_fence.handle); | 
|---|
| 2858 | if (!syncobj) { | 
|---|
| 2859 | drm_dbg(&eb->i915->drm, | 
|---|
| 2860 | "Invalid syncobj handle provided\n"); | 
|---|
| 2861 | return -ENOENT; | 
|---|
| 2862 | } | 
|---|
| 2863 |  | 
|---|
| 2864 | fence = drm_syncobj_fence_get(syncobj); | 
|---|
| 2865 |  | 
|---|
| 2866 | if (!fence && user_fence.flags && | 
|---|
| 2867 | !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { | 
|---|
| 2868 | drm_dbg(&eb->i915->drm, | 
|---|
| 2869 | "Syncobj handle has no fence\n"); | 
|---|
| 2870 | drm_syncobj_put(obj: syncobj); | 
|---|
| 2871 | return -EINVAL; | 
|---|
| 2872 | } | 
|---|
| 2873 |  | 
|---|
| 2874 | if (fence) | 
|---|
| 2875 | err = dma_fence_chain_find_seqno(pfence: &fence, seqno: point); | 
|---|
| 2876 |  | 
|---|
| 2877 | if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { | 
|---|
| 2878 | drm_dbg(&eb->i915->drm, | 
|---|
| 2879 | "Syncobj handle missing requested point %llu\n", | 
|---|
| 2880 | point); | 
|---|
| 2881 | dma_fence_put(fence); | 
|---|
| 2882 | drm_syncobj_put(obj: syncobj); | 
|---|
| 2883 | return err; | 
|---|
| 2884 | } | 
|---|
| 2885 |  | 
|---|
| 2886 | /* | 
|---|
| 2887 | * A point might have been signaled already and | 
|---|
| 2888 | * garbage collected from the timeline. In this case | 
|---|
| 2889 | * just ignore the point and carry on. | 
|---|
| 2890 | */ | 
|---|
| 2891 | if (!fence && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { | 
|---|
| 2892 | drm_syncobj_put(obj: syncobj); | 
|---|
| 2893 | continue; | 
|---|
| 2894 | } | 
|---|
| 2895 |  | 
|---|
| 2896 | /* | 
|---|
| 2897 | * For timeline syncobjs we need to preallocate chains for | 
|---|
| 2898 | * later signaling. | 
|---|
| 2899 | */ | 
|---|
| 2900 | if (point != 0 && user_fence.flags & I915_EXEC_FENCE_SIGNAL) { | 
|---|
| 2901 | /* | 
|---|
| 2902 | * Waiting and signaling the same point (when point != | 
|---|
| 2903 | * 0) would break the timeline. | 
|---|
| 2904 | */ | 
|---|
| 2905 | if (user_fence.flags & I915_EXEC_FENCE_WAIT) { | 
|---|
| 2906 | drm_dbg(&eb->i915->drm, | 
|---|
| 2907 | "Trying to wait & signal the same timeline point.\n"); | 
|---|
| 2908 | dma_fence_put(fence); | 
|---|
| 2909 | drm_syncobj_put(obj: syncobj); | 
|---|
| 2910 | return -EINVAL; | 
|---|
| 2911 | } | 
|---|
| 2912 |  | 
|---|
| 2913 | f->chain_fence = dma_fence_chain_alloc(); | 
|---|
| 2914 | if (!f->chain_fence) { | 
|---|
| 2915 | drm_syncobj_put(obj: syncobj); | 
|---|
| 2916 | dma_fence_put(fence); | 
|---|
| 2917 | return -ENOMEM; | 
|---|
| 2918 | } | 
|---|
| 2919 | } else { | 
|---|
| 2920 | f->chain_fence = NULL; | 
|---|
| 2921 | } | 
|---|
| 2922 |  | 
|---|
| 2923 | f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2); | 
|---|
| 2924 | f->dma_fence = fence; | 
|---|
| 2925 | f->value = point; | 
|---|
| 2926 | f++; | 
|---|
| 2927 | eb->num_fences++; | 
|---|
| 2928 | } | 
|---|
| 2929 |  | 
|---|
| 2930 | return 0; | 
|---|
| 2931 | } | 
|---|
| 2932 |  | 
|---|
| 2933 | static int add_fence_array(struct i915_execbuffer *eb) | 
|---|
| 2934 | { | 
|---|
| 2935 | struct drm_i915_gem_execbuffer2 *args = eb->args; | 
|---|
| 2936 | struct drm_i915_gem_exec_fence __user *user; | 
|---|
| 2937 | unsigned long num_fences = args->num_cliprects; | 
|---|
| 2938 | struct eb_fence *f; | 
|---|
| 2939 |  | 
|---|
| 2940 | if (!(args->flags & I915_EXEC_FENCE_ARRAY)) | 
|---|
| 2941 | return 0; | 
|---|
| 2942 |  | 
|---|
| 2943 | if (!num_fences) | 
|---|
| 2944 | return 0; | 
|---|
| 2945 |  | 
|---|
| 2946 | /* Check multiplication overflow for access_ok() and kvmalloc_array() */ | 
|---|
| 2947 | BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); | 
|---|
| 2948 | if (num_fences > min_t(unsigned long, | 
|---|
| 2949 | ULONG_MAX / sizeof(*user), | 
|---|
| 2950 | SIZE_MAX / sizeof(*f) - eb->num_fences)) | 
|---|
| 2951 | return -EINVAL; | 
|---|
| 2952 |  | 
|---|
| 2953 | user = u64_to_user_ptr(args->cliprects_ptr); | 
|---|
| 2954 | if (!access_ok(user, num_fences * sizeof(*user))) | 
|---|
| 2955 | return -EFAULT; | 
|---|
| 2956 |  | 
|---|
| 2957 | f = krealloc(eb->fences, | 
|---|
| 2958 | (eb->num_fences + num_fences) * sizeof(*f), | 
|---|
| 2959 | __GFP_NOWARN | GFP_KERNEL); | 
|---|
| 2960 | if (!f) | 
|---|
| 2961 | return -ENOMEM; | 
|---|
| 2962 |  | 
|---|
| 2963 | eb->fences = f; | 
|---|
| 2964 | f += eb->num_fences; | 
|---|
| 2965 | while (num_fences--) { | 
|---|
| 2966 | struct drm_i915_gem_exec_fence user_fence; | 
|---|
| 2967 | struct drm_syncobj *syncobj; | 
|---|
| 2968 | struct dma_fence *fence = NULL; | 
|---|
| 2969 |  | 
|---|
| 2970 | if (__copy_from_user(to: &user_fence, from: user++, n: sizeof(user_fence))) | 
|---|
| 2971 | return -EFAULT; | 
|---|
| 2972 |  | 
|---|
| 2973 | if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) | 
|---|
| 2974 | return -EINVAL; | 
|---|
| 2975 |  | 
|---|
| 2976 | syncobj = drm_syncobj_find(file_private: eb->file, handle: user_fence.handle); | 
|---|
| 2977 | if (!syncobj) { | 
|---|
| 2978 | drm_dbg(&eb->i915->drm, | 
|---|
| 2979 | "Invalid syncobj handle provided\n"); | 
|---|
| 2980 | return -ENOENT; | 
|---|
| 2981 | } | 
|---|
| 2982 |  | 
|---|
| 2983 | if (user_fence.flags & I915_EXEC_FENCE_WAIT) { | 
|---|
| 2984 | fence = drm_syncobj_fence_get(syncobj); | 
|---|
| 2985 | if (!fence) { | 
|---|
| 2986 | drm_dbg(&eb->i915->drm, | 
|---|
| 2987 | "Syncobj handle has no fence\n"); | 
|---|
| 2988 | drm_syncobj_put(obj: syncobj); | 
|---|
| 2989 | return -EINVAL; | 
|---|
| 2990 | } | 
|---|
| 2991 | } | 
|---|
| 2992 |  | 
|---|
| 2993 | BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & | 
|---|
| 2994 | ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); | 
|---|
| 2995 |  | 
|---|
| 2996 | f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2); | 
|---|
| 2997 | f->dma_fence = fence; | 
|---|
| 2998 | f->value = 0; | 
|---|
| 2999 | f->chain_fence = NULL; | 
|---|
| 3000 | f++; | 
|---|
| 3001 | eb->num_fences++; | 
|---|
| 3002 | } | 
|---|
| 3003 |  | 
|---|
| 3004 | return 0; | 
|---|
| 3005 | } | 
|---|
| 3006 |  | 
|---|
| 3007 | static void put_fence_array(struct eb_fence *fences, int num_fences) | 
|---|
| 3008 | { | 
|---|
| 3009 | if (fences) | 
|---|
| 3010 | __free_fence_array(fences, n: num_fences); | 
|---|
| 3011 | } | 
|---|
| 3012 |  | 
|---|
| 3013 | static int | 
|---|
| 3014 | await_fence_array(struct i915_execbuffer *eb, | 
|---|
| 3015 | struct i915_request *rq) | 
|---|
| 3016 | { | 
|---|
| 3017 | unsigned int n; | 
|---|
| 3018 | int err; | 
|---|
| 3019 |  | 
|---|
| 3020 | for (n = 0; n < eb->num_fences; n++) { | 
|---|
| 3021 | if (!eb->fences[n].dma_fence) | 
|---|
| 3022 | continue; | 
|---|
| 3023 |  | 
|---|
| 3024 | err = i915_request_await_dma_fence(rq, fence: eb->fences[n].dma_fence); | 
|---|
| 3025 | if (err < 0) | 
|---|
| 3026 | return err; | 
|---|
| 3027 | } | 
|---|
| 3028 |  | 
|---|
| 3029 | return 0; | 
|---|
| 3030 | } | 
|---|
| 3031 |  | 
|---|
| 3032 | static void signal_fence_array(const struct i915_execbuffer *eb, | 
|---|
| 3033 | struct dma_fence * const fence) | 
|---|
| 3034 | { | 
|---|
| 3035 | unsigned int n; | 
|---|
| 3036 |  | 
|---|
| 3037 | for (n = 0; n < eb->num_fences; n++) { | 
|---|
| 3038 | struct drm_syncobj *syncobj; | 
|---|
| 3039 | unsigned int flags; | 
|---|
| 3040 |  | 
|---|
| 3041 | syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2); | 
|---|
| 3042 | if (!(flags & I915_EXEC_FENCE_SIGNAL)) | 
|---|
| 3043 | continue; | 
|---|
| 3044 |  | 
|---|
| 3045 | if (eb->fences[n].chain_fence) { | 
|---|
| 3046 | drm_syncobj_add_point(syncobj, | 
|---|
| 3047 | chain: eb->fences[n].chain_fence, | 
|---|
| 3048 | fence, | 
|---|
| 3049 | point: eb->fences[n].value); | 
|---|
| 3050 | /* | 
|---|
| 3051 | * The chain's ownership is transferred to the | 
|---|
| 3052 | * timeline. | 
|---|
| 3053 | */ | 
|---|
| 3054 | eb->fences[n].chain_fence = NULL; | 
|---|
| 3055 | } else { | 
|---|
| 3056 | drm_syncobj_replace_fence(syncobj, fence); | 
|---|
| 3057 | } | 
|---|
| 3058 | } | 
|---|
| 3059 | } | 
|---|
| 3060 |  | 
|---|
| 3061 | static int | 
|---|
| 3062 | parse_timeline_fences(struct i915_user_extension __user *ext, void *data) | 
|---|
| 3063 | { | 
|---|
| 3064 | struct i915_execbuffer *eb = data; | 
|---|
| 3065 | struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences; | 
|---|
| 3066 |  | 
|---|
| 3067 | if (copy_from_user(to: &timeline_fences, from: ext, n: sizeof(timeline_fences))) | 
|---|
| 3068 | return -EFAULT; | 
|---|
| 3069 |  | 
|---|
| 3070 | return add_timeline_fence_array(eb, timeline_fences: &timeline_fences); | 
|---|
| 3071 | } | 
|---|
| 3072 |  | 
|---|
| 3073 | static void retire_requests(struct intel_timeline *tl, struct i915_request *end) | 
|---|
| 3074 | { | 
|---|
| 3075 | struct i915_request *rq, *rn; | 
|---|
| 3076 |  | 
|---|
| 3077 | list_for_each_entry_safe(rq, rn, &tl->requests, link) | 
|---|
| 3078 | if (rq == end || !i915_request_retire(rq)) | 
|---|
| 3079 | break; | 
|---|
| 3080 | } | 
|---|
| 3081 |  | 
|---|
| 3082 | static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq, | 
|---|
| 3083 | int err, bool last_parallel) | 
|---|
| 3084 | { | 
|---|
| 3085 | struct intel_timeline * const tl = i915_request_timeline(rq); | 
|---|
| 3086 | struct i915_sched_attr attr = {}; | 
|---|
| 3087 | struct i915_request *prev; | 
|---|
| 3088 |  | 
|---|
| 3089 | lockdep_assert_held(&tl->mutex); | 
|---|
| 3090 | lockdep_unpin_lock(&tl->mutex, rq->cookie); | 
|---|
| 3091 |  | 
|---|
| 3092 | trace_i915_request_add(rq); | 
|---|
| 3093 |  | 
|---|
| 3094 | prev = __i915_request_commit(request: rq); | 
|---|
| 3095 |  | 
|---|
| 3096 | /* Check that the context wasn't destroyed before submission */ | 
|---|
| 3097 | if (likely(!intel_context_is_closed(eb->context))) { | 
|---|
| 3098 | attr = eb->gem_context->sched; | 
|---|
| 3099 | } else { | 
|---|
| 3100 | /* Serialise with context_close via the add_to_timeline */ | 
|---|
| 3101 | i915_request_set_error_once(rq, error: -ENOENT); | 
|---|
| 3102 | __i915_request_skip(rq); | 
|---|
| 3103 | err = -ENOENT; /* override any transient errors */ | 
|---|
| 3104 | } | 
|---|
| 3105 |  | 
|---|
| 3106 | if (intel_context_is_parallel(ce: eb->context)) { | 
|---|
| 3107 | if (err) { | 
|---|
| 3108 | __i915_request_skip(rq); | 
|---|
| 3109 | set_bit(nr: I915_FENCE_FLAG_SKIP_PARALLEL, | 
|---|
| 3110 | addr: &rq->fence.flags); | 
|---|
| 3111 | } | 
|---|
| 3112 | if (last_parallel) | 
|---|
| 3113 | set_bit(nr: I915_FENCE_FLAG_SUBMIT_PARALLEL, | 
|---|
| 3114 | addr: &rq->fence.flags); | 
|---|
| 3115 | } | 
|---|
| 3116 |  | 
|---|
| 3117 | __i915_request_queue(rq, attr: &attr); | 
|---|
| 3118 |  | 
|---|
| 3119 | /* Try to clean up the client's timeline after submitting the request */ | 
|---|
| 3120 | if (prev) | 
|---|
| 3121 | retire_requests(tl, end: prev); | 
|---|
| 3122 |  | 
|---|
| 3123 | mutex_unlock(lock: &tl->mutex); | 
|---|
| 3124 |  | 
|---|
| 3125 | return err; | 
|---|
| 3126 | } | 
|---|
| 3127 |  | 
|---|
| 3128 | static int eb_requests_add(struct i915_execbuffer *eb, int err) | 
|---|
| 3129 | { | 
|---|
| 3130 | int i; | 
|---|
| 3131 |  | 
|---|
| 3132 | /* | 
|---|
| 3133 | * We iterate in reverse order of creation to release timeline mutexes in | 
|---|
| 3134 | * same order. | 
|---|
| 3135 | */ | 
|---|
| 3136 | for_each_batch_add_order(eb, i) { | 
|---|
| 3137 | struct i915_request *rq = eb->requests[i]; | 
|---|
| 3138 |  | 
|---|
| 3139 | if (!rq) | 
|---|
| 3140 | continue; | 
|---|
| 3141 | err |= eb_request_add(eb, rq, err, last_parallel: i == 0); | 
|---|
| 3142 | } | 
|---|
| 3143 |  | 
|---|
| 3144 | return err; | 
|---|
| 3145 | } | 
|---|
| 3146 |  | 
|---|
| 3147 | static const i915_user_extension_fn execbuf_extensions[] = { | 
|---|
| 3148 | [DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences, | 
|---|
| 3149 | }; | 
|---|
| 3150 |  | 
|---|
| 3151 | static int | 
|---|
| 3152 | parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args, | 
|---|
| 3153 | struct i915_execbuffer *eb) | 
|---|
| 3154 | { | 
|---|
| 3155 | if (!(args->flags & I915_EXEC_USE_EXTENSIONS)) | 
|---|
| 3156 | return 0; | 
|---|
| 3157 |  | 
|---|
| 3158 | /* The execbuf2 extension mechanism reuses cliprects_ptr. So we cannot | 
|---|
| 3159 | * have another flag also using it at the same time. | 
|---|
| 3160 | */ | 
|---|
| 3161 | if (eb->args->flags & I915_EXEC_FENCE_ARRAY) | 
|---|
| 3162 | return -EINVAL; | 
|---|
| 3163 |  | 
|---|
| 3164 | if (args->num_cliprects != 0) | 
|---|
| 3165 | return -EINVAL; | 
|---|
| 3166 |  | 
|---|
| 3167 | return i915_user_extensions(u64_to_user_ptr(args->cliprects_ptr), | 
|---|
| 3168 | tbl: execbuf_extensions, | 
|---|
| 3169 | ARRAY_SIZE(execbuf_extensions), | 
|---|
| 3170 | data: eb); | 
|---|
| 3171 | } | 
|---|
| 3172 |  | 
|---|
| 3173 | static void eb_requests_get(struct i915_execbuffer *eb) | 
|---|
| 3174 | { | 
|---|
| 3175 | unsigned int i; | 
|---|
| 3176 |  | 
|---|
| 3177 | for_each_batch_create_order(eb, i) { | 
|---|
| 3178 | if (!eb->requests[i]) | 
|---|
| 3179 | break; | 
|---|
| 3180 |  | 
|---|
| 3181 | i915_request_get(rq: eb->requests[i]); | 
|---|
| 3182 | } | 
|---|
| 3183 | } | 
|---|
| 3184 |  | 
|---|
| 3185 | static void eb_requests_put(struct i915_execbuffer *eb) | 
|---|
| 3186 | { | 
|---|
| 3187 | unsigned int i; | 
|---|
| 3188 |  | 
|---|
| 3189 | for_each_batch_create_order(eb, i) { | 
|---|
| 3190 | if (!eb->requests[i]) | 
|---|
| 3191 | break; | 
|---|
| 3192 |  | 
|---|
| 3193 | i915_request_put(rq: eb->requests[i]); | 
|---|
| 3194 | } | 
|---|
| 3195 | } | 
|---|
| 3196 |  | 
|---|
| 3197 | static struct sync_file * | 
|---|
| 3198 | eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd) | 
|---|
| 3199 | { | 
|---|
| 3200 | struct sync_file *out_fence = NULL; | 
|---|
| 3201 | struct dma_fence_array *fence_array; | 
|---|
| 3202 | struct dma_fence **fences; | 
|---|
| 3203 | unsigned int i; | 
|---|
| 3204 |  | 
|---|
| 3205 | GEM_BUG_ON(!intel_context_is_parent(eb->context)); | 
|---|
| 3206 |  | 
|---|
| 3207 | fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL); | 
|---|
| 3208 | if (!fences) | 
|---|
| 3209 | return ERR_PTR(error: -ENOMEM); | 
|---|
| 3210 |  | 
|---|
| 3211 | for_each_batch_create_order(eb, i) { | 
|---|
| 3212 | fences[i] = &eb->requests[i]->fence; | 
|---|
| 3213 | __set_bit(I915_FENCE_FLAG_COMPOSITE, | 
|---|
| 3214 | &eb->requests[i]->fence.flags); | 
|---|
| 3215 | } | 
|---|
| 3216 |  | 
|---|
| 3217 | fence_array = dma_fence_array_create(num_fences: eb->num_batches, | 
|---|
| 3218 | fences, | 
|---|
| 3219 | context: eb->context->parallel.fence_context, | 
|---|
| 3220 | seqno: eb->context->parallel.seqno++, | 
|---|
| 3221 | signal_on_any: false); | 
|---|
| 3222 | if (!fence_array) { | 
|---|
| 3223 | kfree(objp: fences); | 
|---|
| 3224 | return ERR_PTR(error: -ENOMEM); | 
|---|
| 3225 | } | 
|---|
| 3226 |  | 
|---|
| 3227 | /* Move ownership to the dma_fence_array created above */ | 
|---|
| 3228 | for_each_batch_create_order(eb, i) | 
|---|
| 3229 | dma_fence_get(fence: fences[i]); | 
|---|
| 3230 |  | 
|---|
| 3231 | if (out_fence_fd != -1) { | 
|---|
| 3232 | out_fence = sync_file_create(fence: &fence_array->base); | 
|---|
| 3233 | /* sync_file now owns fence_arry, drop creation ref */ | 
|---|
| 3234 | dma_fence_put(fence: &fence_array->base); | 
|---|
| 3235 | if (!out_fence) | 
|---|
| 3236 | return ERR_PTR(error: -ENOMEM); | 
|---|
| 3237 | } | 
|---|
| 3238 |  | 
|---|
| 3239 | eb->composite_fence = &fence_array->base; | 
|---|
| 3240 |  | 
|---|
| 3241 | return out_fence; | 
|---|
| 3242 | } | 
|---|
| 3243 |  | 
|---|
| 3244 | static struct sync_file * | 
|---|
| 3245 | eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq, | 
|---|
| 3246 | struct dma_fence *in_fence, int out_fence_fd) | 
|---|
| 3247 | { | 
|---|
| 3248 | struct sync_file *out_fence = NULL; | 
|---|
| 3249 | int err; | 
|---|
| 3250 |  | 
|---|
| 3251 | if (unlikely(eb->gem_context->syncobj)) { | 
|---|
| 3252 | struct dma_fence *fence; | 
|---|
| 3253 |  | 
|---|
| 3254 | fence = drm_syncobj_fence_get(syncobj: eb->gem_context->syncobj); | 
|---|
| 3255 | err = i915_request_await_dma_fence(rq, fence); | 
|---|
| 3256 | dma_fence_put(fence); | 
|---|
| 3257 | if (err) | 
|---|
| 3258 | return ERR_PTR(error: err); | 
|---|
| 3259 | } | 
|---|
| 3260 |  | 
|---|
| 3261 | if (in_fence) { | 
|---|
| 3262 | if (eb->args->flags & I915_EXEC_FENCE_SUBMIT) | 
|---|
| 3263 | err = i915_request_await_execution(rq, fence: in_fence); | 
|---|
| 3264 | else | 
|---|
| 3265 | err = i915_request_await_dma_fence(rq, fence: in_fence); | 
|---|
| 3266 | if (err < 0) | 
|---|
| 3267 | return ERR_PTR(error: err); | 
|---|
| 3268 | } | 
|---|
| 3269 |  | 
|---|
| 3270 | if (eb->fences) { | 
|---|
| 3271 | err = await_fence_array(eb, rq); | 
|---|
| 3272 | if (err) | 
|---|
| 3273 | return ERR_PTR(error: err); | 
|---|
| 3274 | } | 
|---|
| 3275 |  | 
|---|
| 3276 | if (intel_context_is_parallel(ce: eb->context)) { | 
|---|
| 3277 | out_fence = eb_composite_fence_create(eb, out_fence_fd); | 
|---|
| 3278 | if (IS_ERR(ptr: out_fence)) | 
|---|
| 3279 | return ERR_PTR(error: -ENOMEM); | 
|---|
| 3280 | } else if (out_fence_fd != -1) { | 
|---|
| 3281 | out_fence = sync_file_create(fence: &rq->fence); | 
|---|
| 3282 | if (!out_fence) | 
|---|
| 3283 | return ERR_PTR(error: -ENOMEM); | 
|---|
| 3284 | } | 
|---|
| 3285 |  | 
|---|
| 3286 | return out_fence; | 
|---|
| 3287 | } | 
|---|
| 3288 |  | 
|---|
| 3289 | static struct intel_context * | 
|---|
| 3290 | eb_find_context(struct i915_execbuffer *eb, unsigned int context_number) | 
|---|
| 3291 | { | 
|---|
| 3292 | struct intel_context *child; | 
|---|
| 3293 |  | 
|---|
| 3294 | if (likely(context_number == 0)) | 
|---|
| 3295 | return eb->context; | 
|---|
| 3296 |  | 
|---|
| 3297 | for_each_child(eb->context, child) | 
|---|
| 3298 | if (!--context_number) | 
|---|
| 3299 | return child; | 
|---|
| 3300 |  | 
|---|
| 3301 | GEM_BUG_ON( "Context not found"); | 
|---|
| 3302 |  | 
|---|
| 3303 | return NULL; | 
|---|
| 3304 | } | 
|---|
| 3305 |  | 
|---|
| 3306 | static struct sync_file * | 
|---|
| 3307 | eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence, | 
|---|
| 3308 | int out_fence_fd) | 
|---|
| 3309 | { | 
|---|
| 3310 | struct sync_file *out_fence = NULL; | 
|---|
| 3311 | unsigned int i; | 
|---|
| 3312 |  | 
|---|
| 3313 | for_each_batch_create_order(eb, i) { | 
|---|
| 3314 | /* Allocate a request for this batch buffer nice and early. */ | 
|---|
| 3315 | eb->requests[i] = i915_request_create(ce: eb_find_context(eb, context_number: i)); | 
|---|
| 3316 | if (IS_ERR(ptr: eb->requests[i])) { | 
|---|
| 3317 | out_fence = ERR_CAST(ptr: eb->requests[i]); | 
|---|
| 3318 | eb->requests[i] = NULL; | 
|---|
| 3319 | return out_fence; | 
|---|
| 3320 | } | 
|---|
| 3321 |  | 
|---|
| 3322 | /* | 
|---|
| 3323 | * Only the first request added (committed to backend) has to | 
|---|
| 3324 | * take the in fences into account as all subsequent requests | 
|---|
| 3325 | * will have fences inserted inbetween them. | 
|---|
| 3326 | */ | 
|---|
| 3327 | if (i + 1 == eb->num_batches) { | 
|---|
| 3328 | out_fence = eb_fences_add(eb, rq: eb->requests[i], | 
|---|
| 3329 | in_fence, out_fence_fd); | 
|---|
| 3330 | if (IS_ERR(ptr: out_fence)) | 
|---|
| 3331 | return out_fence; | 
|---|
| 3332 | } | 
|---|
| 3333 |  | 
|---|
| 3334 | /* | 
|---|
| 3335 | * Not really on stack, but we don't want to call | 
|---|
| 3336 | * kfree on the batch_snapshot when we put it, so use the | 
|---|
| 3337 | * _onstack interface. | 
|---|
| 3338 | */ | 
|---|
| 3339 | if (eb->batches[i]->vma) | 
|---|
| 3340 | eb->requests[i]->batch_res = | 
|---|
| 3341 | i915_vma_resource_get(vma_res: eb->batches[i]->vma->resource); | 
|---|
| 3342 | if (eb->batch_pool) { | 
|---|
| 3343 | GEM_BUG_ON(intel_context_is_parallel(eb->context)); | 
|---|
| 3344 | intel_gt_buffer_pool_mark_active(node: eb->batch_pool, | 
|---|
| 3345 | rq: eb->requests[i]); | 
|---|
| 3346 | } | 
|---|
| 3347 | } | 
|---|
| 3348 |  | 
|---|
| 3349 | return out_fence; | 
|---|
| 3350 | } | 
|---|
| 3351 |  | 
|---|
| 3352 | static int | 
|---|
| 3353 | i915_gem_do_execbuffer(struct drm_device *dev, | 
|---|
| 3354 | struct drm_file *file, | 
|---|
| 3355 | struct drm_i915_gem_execbuffer2 *args, | 
|---|
| 3356 | struct drm_i915_gem_exec_object2 *exec) | 
|---|
| 3357 | { | 
|---|
| 3358 | struct drm_i915_private *i915 = to_i915(dev); | 
|---|
| 3359 | struct i915_execbuffer eb; | 
|---|
| 3360 | struct dma_fence *in_fence = NULL; | 
|---|
| 3361 | struct sync_file *out_fence = NULL; | 
|---|
| 3362 | int out_fence_fd = -1; | 
|---|
| 3363 | int err; | 
|---|
| 3364 |  | 
|---|
| 3365 | BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS); | 
|---|
| 3366 | BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & | 
|---|
| 3367 | ~__EXEC_OBJECT_UNKNOWN_FLAGS); | 
|---|
| 3368 |  | 
|---|
| 3369 | eb.i915 = i915; | 
|---|
| 3370 | eb.file = file; | 
|---|
| 3371 | eb.args = args; | 
|---|
| 3372 | if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) | 
|---|
| 3373 | args->flags |= __EXEC_HAS_RELOC; | 
|---|
| 3374 |  | 
|---|
| 3375 | eb.exec = exec; | 
|---|
| 3376 | eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1); | 
|---|
| 3377 | eb.vma[0].vma = NULL; | 
|---|
| 3378 | eb.batch_pool = NULL; | 
|---|
| 3379 |  | 
|---|
| 3380 | eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; | 
|---|
| 3381 | reloc_cache_init(cache: &eb.reloc_cache, i915: eb.i915); | 
|---|
| 3382 |  | 
|---|
| 3383 | eb.buffer_count = args->buffer_count; | 
|---|
| 3384 | eb.batch_start_offset = args->batch_start_offset; | 
|---|
| 3385 | eb.trampoline = NULL; | 
|---|
| 3386 |  | 
|---|
| 3387 | eb.fences = NULL; | 
|---|
| 3388 | eb.num_fences = 0; | 
|---|
| 3389 |  | 
|---|
| 3390 | eb_capture_list_clear(eb: &eb); | 
|---|
| 3391 |  | 
|---|
| 3392 | memset(s: eb.requests, c: 0, n: sizeof(struct i915_request *) * | 
|---|
| 3393 | ARRAY_SIZE(eb.requests)); | 
|---|
| 3394 | eb.composite_fence = NULL; | 
|---|
| 3395 |  | 
|---|
| 3396 | eb.batch_flags = 0; | 
|---|
| 3397 | if (args->flags & I915_EXEC_SECURE) { | 
|---|
| 3398 | if (GRAPHICS_VER(i915) >= 11) | 
|---|
| 3399 | return -ENODEV; | 
|---|
| 3400 |  | 
|---|
| 3401 | /* Return -EPERM to trigger fallback code on old binaries. */ | 
|---|
| 3402 | if (!HAS_SECURE_BATCHES(i915)) | 
|---|
| 3403 | return -EPERM; | 
|---|
| 3404 |  | 
|---|
| 3405 | if (!drm_is_current_master(fpriv: file) || !capable(CAP_SYS_ADMIN)) | 
|---|
| 3406 | return -EPERM; | 
|---|
| 3407 |  | 
|---|
| 3408 | eb.batch_flags |= I915_DISPATCH_SECURE; | 
|---|
| 3409 | } | 
|---|
| 3410 | if (args->flags & I915_EXEC_IS_PINNED) | 
|---|
| 3411 | eb.batch_flags |= I915_DISPATCH_PINNED; | 
|---|
| 3412 |  | 
|---|
| 3413 | err = parse_execbuf2_extensions(args, eb: &eb); | 
|---|
| 3414 | if (err) | 
|---|
| 3415 | goto err_ext; | 
|---|
| 3416 |  | 
|---|
| 3417 | err = add_fence_array(eb: &eb); | 
|---|
| 3418 | if (err) | 
|---|
| 3419 | goto err_ext; | 
|---|
| 3420 |  | 
|---|
| 3421 | #define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT) | 
|---|
| 3422 | if (args->flags & IN_FENCES) { | 
|---|
| 3423 | if ((args->flags & IN_FENCES) == IN_FENCES) | 
|---|
| 3424 | return -EINVAL; | 
|---|
| 3425 |  | 
|---|
| 3426 | in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); | 
|---|
| 3427 | if (!in_fence) { | 
|---|
| 3428 | err = -EINVAL; | 
|---|
| 3429 | goto err_ext; | 
|---|
| 3430 | } | 
|---|
| 3431 | } | 
|---|
| 3432 | #undef IN_FENCES | 
|---|
| 3433 |  | 
|---|
| 3434 | if (args->flags & I915_EXEC_FENCE_OUT) { | 
|---|
| 3435 | out_fence_fd = get_unused_fd_flags(O_CLOEXEC); | 
|---|
| 3436 | if (out_fence_fd < 0) { | 
|---|
| 3437 | err = out_fence_fd; | 
|---|
| 3438 | goto err_in_fence; | 
|---|
| 3439 | } | 
|---|
| 3440 | } | 
|---|
| 3441 |  | 
|---|
| 3442 | err = eb_create(eb: &eb); | 
|---|
| 3443 | if (err) | 
|---|
| 3444 | goto err_out_fence; | 
|---|
| 3445 |  | 
|---|
| 3446 | GEM_BUG_ON(!eb.lut_size); | 
|---|
| 3447 |  | 
|---|
| 3448 | err = eb_select_context(eb: &eb); | 
|---|
| 3449 | if (unlikely(err)) | 
|---|
| 3450 | goto err_destroy; | 
|---|
| 3451 |  | 
|---|
| 3452 | err = eb_select_engine(eb: &eb); | 
|---|
| 3453 | if (unlikely(err)) | 
|---|
| 3454 | goto err_context; | 
|---|
| 3455 |  | 
|---|
| 3456 | err = eb_lookup_vmas(eb: &eb); | 
|---|
| 3457 | if (err) { | 
|---|
| 3458 | eb_release_vmas(eb: &eb, final: true); | 
|---|
| 3459 | goto err_engine; | 
|---|
| 3460 | } | 
|---|
| 3461 |  | 
|---|
| 3462 | i915_gem_ww_ctx_init(ctx: &eb.ww, intr: true); | 
|---|
| 3463 |  | 
|---|
| 3464 | err = eb_relocate_parse(eb: &eb); | 
|---|
| 3465 | if (err) { | 
|---|
| 3466 | /* | 
|---|
| 3467 | * If the user expects the execobject.offset and | 
|---|
| 3468 | * reloc.presumed_offset to be an exact match, | 
|---|
| 3469 | * as for using NO_RELOC, then we cannot update | 
|---|
| 3470 | * the execobject.offset until we have completed | 
|---|
| 3471 | * relocation. | 
|---|
| 3472 | */ | 
|---|
| 3473 | args->flags &= ~__EXEC_HAS_RELOC; | 
|---|
| 3474 | goto err_vma; | 
|---|
| 3475 | } | 
|---|
| 3476 |  | 
|---|
| 3477 | ww_acquire_done(ctx: &eb.ww.ctx); | 
|---|
| 3478 | err = eb_capture_stage(eb: &eb); | 
|---|
| 3479 | if (err) | 
|---|
| 3480 | goto err_vma; | 
|---|
| 3481 |  | 
|---|
| 3482 | out_fence = eb_requests_create(eb: &eb, in_fence, out_fence_fd); | 
|---|
| 3483 | if (IS_ERR(ptr: out_fence)) { | 
|---|
| 3484 | err = PTR_ERR(ptr: out_fence); | 
|---|
| 3485 | out_fence = NULL; | 
|---|
| 3486 | if (eb.requests[0]) | 
|---|
| 3487 | goto err_request; | 
|---|
| 3488 | else | 
|---|
| 3489 | goto err_vma; | 
|---|
| 3490 | } | 
|---|
| 3491 |  | 
|---|
| 3492 | err = eb_submit(eb: &eb); | 
|---|
| 3493 |  | 
|---|
| 3494 | err_request: | 
|---|
| 3495 | eb_requests_get(eb: &eb); | 
|---|
| 3496 | err = eb_requests_add(eb: &eb, err); | 
|---|
| 3497 |  | 
|---|
| 3498 | if (eb.fences) | 
|---|
| 3499 | signal_fence_array(eb: &eb, fence: eb.composite_fence ? | 
|---|
| 3500 | eb.composite_fence : | 
|---|
| 3501 | &eb.requests[0]->fence); | 
|---|
| 3502 |  | 
|---|
| 3503 | if (unlikely(eb.gem_context->syncobj)) { | 
|---|
| 3504 | drm_syncobj_replace_fence(syncobj: eb.gem_context->syncobj, | 
|---|
| 3505 | fence: eb.composite_fence ? | 
|---|
| 3506 | eb.composite_fence : | 
|---|
| 3507 | &eb.requests[0]->fence); | 
|---|
| 3508 | } | 
|---|
| 3509 |  | 
|---|
| 3510 | if (out_fence) { | 
|---|
| 3511 | if (err == 0) { | 
|---|
| 3512 | fd_install(fd: out_fence_fd, file: out_fence->file); | 
|---|
| 3513 | args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ | 
|---|
| 3514 | args->rsvd2 |= (u64)out_fence_fd << 32; | 
|---|
| 3515 | out_fence_fd = -1; | 
|---|
| 3516 | } else { | 
|---|
| 3517 | fput(out_fence->file); | 
|---|
| 3518 | } | 
|---|
| 3519 | } | 
|---|
| 3520 |  | 
|---|
| 3521 | if (!out_fence && eb.composite_fence) | 
|---|
| 3522 | dma_fence_put(fence: eb.composite_fence); | 
|---|
| 3523 |  | 
|---|
| 3524 | eb_requests_put(eb: &eb); | 
|---|
| 3525 |  | 
|---|
| 3526 | err_vma: | 
|---|
| 3527 | eb_release_vmas(eb: &eb, final: true); | 
|---|
| 3528 | WARN_ON(err == -EDEADLK); | 
|---|
| 3529 | i915_gem_ww_ctx_fini(ctx: &eb.ww); | 
|---|
| 3530 |  | 
|---|
| 3531 | if (eb.batch_pool) | 
|---|
| 3532 | intel_gt_buffer_pool_put(node: eb.batch_pool); | 
|---|
| 3533 | err_engine: | 
|---|
| 3534 | eb_put_engine(eb: &eb); | 
|---|
| 3535 | err_context: | 
|---|
| 3536 | i915_gem_context_put(ctx: eb.gem_context); | 
|---|
| 3537 | err_destroy: | 
|---|
| 3538 | eb_destroy(eb: &eb); | 
|---|
| 3539 | err_out_fence: | 
|---|
| 3540 | if (out_fence_fd != -1) | 
|---|
| 3541 | put_unused_fd(fd: out_fence_fd); | 
|---|
| 3542 | err_in_fence: | 
|---|
| 3543 | dma_fence_put(fence: in_fence); | 
|---|
| 3544 | err_ext: | 
|---|
| 3545 | put_fence_array(fences: eb.fences, num_fences: eb.num_fences); | 
|---|
| 3546 | return err; | 
|---|
| 3547 | } | 
|---|
| 3548 |  | 
|---|
| 3549 | static size_t eb_element_size(void) | 
|---|
| 3550 | { | 
|---|
| 3551 | return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma); | 
|---|
| 3552 | } | 
|---|
| 3553 |  | 
|---|
| 3554 | static bool check_buffer_count(size_t count) | 
|---|
| 3555 | { | 
|---|
| 3556 | const size_t sz = eb_element_size(); | 
|---|
| 3557 |  | 
|---|
| 3558 | /* | 
|---|
| 3559 | * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup | 
|---|
| 3560 | * array size (see eb_create()). Otherwise, we can accept an array as | 
|---|
| 3561 | * large as can be addressed (though use large arrays at your peril)! | 
|---|
| 3562 | */ | 
|---|
| 3563 |  | 
|---|
| 3564 | return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1); | 
|---|
| 3565 | } | 
|---|
| 3566 |  | 
|---|
| 3567 | int | 
|---|
| 3568 | i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, | 
|---|
| 3569 | struct drm_file *file) | 
|---|
| 3570 | { | 
|---|
| 3571 | struct drm_i915_private *i915 = to_i915(dev); | 
|---|
| 3572 | struct drm_i915_gem_execbuffer2 *args = data; | 
|---|
| 3573 | struct drm_i915_gem_exec_object2 *exec2_list; | 
|---|
| 3574 | const size_t count = args->buffer_count; | 
|---|
| 3575 | int err; | 
|---|
| 3576 |  | 
|---|
| 3577 | if (!check_buffer_count(count)) { | 
|---|
| 3578 | drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count); | 
|---|
| 3579 | return -EINVAL; | 
|---|
| 3580 | } | 
|---|
| 3581 |  | 
|---|
| 3582 | err = i915_gem_check_execbuffer(i915, exec: args); | 
|---|
| 3583 | if (err) | 
|---|
| 3584 | return err; | 
|---|
| 3585 |  | 
|---|
| 3586 | /* Allocate extra slots for use by the command parser */ | 
|---|
| 3587 | exec2_list = kvmalloc_array(count + 2, eb_element_size(), | 
|---|
| 3588 | __GFP_NOWARN | GFP_KERNEL); | 
|---|
| 3589 | if (exec2_list == NULL) { | 
|---|
| 3590 | drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n", | 
|---|
| 3591 | count); | 
|---|
| 3592 | return -ENOMEM; | 
|---|
| 3593 | } | 
|---|
| 3594 | if (copy_from_user(to: exec2_list, | 
|---|
| 3595 | u64_to_user_ptr(args->buffers_ptr), | 
|---|
| 3596 | n: sizeof(*exec2_list) * count)) { | 
|---|
| 3597 | drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count); | 
|---|
| 3598 | kvfree(addr: exec2_list); | 
|---|
| 3599 | return -EFAULT; | 
|---|
| 3600 | } | 
|---|
| 3601 |  | 
|---|
| 3602 | err = i915_gem_do_execbuffer(dev, file, args, exec: exec2_list); | 
|---|
| 3603 |  | 
|---|
| 3604 | /* | 
|---|
| 3605 | * Now that we have begun execution of the batchbuffer, we ignore | 
|---|
| 3606 | * any new error after this point. Also given that we have already | 
|---|
| 3607 | * updated the associated relocations, we try to write out the current | 
|---|
| 3608 | * object locations irrespective of any error. | 
|---|
| 3609 | */ | 
|---|
| 3610 | if (args->flags & __EXEC_HAS_RELOC) { | 
|---|
| 3611 | struct drm_i915_gem_exec_object2 __user *user_exec_list = | 
|---|
| 3612 | u64_to_user_ptr(args->buffers_ptr); | 
|---|
| 3613 | unsigned int i; | 
|---|
| 3614 |  | 
|---|
| 3615 | /* Copy the new buffer offsets back to the user's exec list. */ | 
|---|
| 3616 | /* | 
|---|
| 3617 | * Note: count * sizeof(*user_exec_list) does not overflow, | 
|---|
| 3618 | * because we checked 'count' in check_buffer_count(). | 
|---|
| 3619 | * | 
|---|
| 3620 | * And this range already got effectively checked earlier | 
|---|
| 3621 | * when we did the "copy_from_user()" above. | 
|---|
| 3622 | */ | 
|---|
| 3623 | if (!user_write_access_begin(user_exec_list, | 
|---|
| 3624 | count * sizeof(*user_exec_list))) | 
|---|
| 3625 | goto end; | 
|---|
| 3626 |  | 
|---|
| 3627 | for (i = 0; i < args->buffer_count; i++) { | 
|---|
| 3628 | if (!(exec2_list[i].offset & UPDATE)) | 
|---|
| 3629 | continue; | 
|---|
| 3630 |  | 
|---|
| 3631 | exec2_list[i].offset = | 
|---|
| 3632 | gen8_canonical_addr(address: exec2_list[i].offset & PIN_OFFSET_MASK); | 
|---|
| 3633 | unsafe_put_user(exec2_list[i].offset, | 
|---|
| 3634 | &user_exec_list[i].offset, | 
|---|
| 3635 | end_user); | 
|---|
| 3636 | } | 
|---|
| 3637 | end_user: | 
|---|
| 3638 | user_write_access_end(); | 
|---|
| 3639 | end:; | 
|---|
| 3640 | } | 
|---|
| 3641 |  | 
|---|
| 3642 | args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; | 
|---|
| 3643 | kvfree(addr: exec2_list); | 
|---|
| 3644 | return err; | 
|---|
| 3645 | } | 
|---|
| 3646 |  | 
|---|