1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2011-2012 Intel Corporation
4 */
5
6/*
7 * This file implements HW context support. On gen5+ a HW context consists of an
8 * opaque GPU object which is referenced at times of context saves and restores.
9 * With RC6 enabled, the context is also referenced as the GPU enters and exists
10 * from RC6 (GPU has it's own internal power context, except on gen5). Though
11 * something like a context does exist for the media ring, the code only
12 * supports contexts for the render ring.
13 *
14 * In software, there is a distinction between contexts created by the user,
15 * and the default HW context. The default HW context is used by GPU clients
16 * that do not request setup of their own hardware context. The default
17 * context's state is never restored to help prevent programming errors. This
18 * would happen if a client ran and piggy-backed off another clients GPU state.
19 * The default context only exists to give the GPU some offset to load as the
20 * current to invoke a save of the context we actually care about. In fact, the
21 * code could likely be constructed, albeit in a more complicated fashion, to
22 * never use the default context, though that limits the driver's ability to
23 * swap out, and/or destroy other contexts.
24 *
25 * All other contexts are created as a request by the GPU client. These contexts
26 * store GPU state, and thus allow GPU clients to not re-emit state (and
27 * potentially query certain state) at any time. The kernel driver makes
28 * certain that the appropriate commands are inserted.
29 *
30 * The context life cycle is semi-complicated in that context BOs may live
31 * longer than the context itself because of the way the hardware, and object
32 * tracking works. Below is a very crude representation of the state machine
33 * describing the context life.
34 * refcount pincount active
35 * S0: initial state 0 0 0
36 * S1: context created 1 0 0
37 * S2: context is currently running 2 1 X
38 * S3: GPU referenced, but not current 2 0 1
39 * S4: context is current, but destroyed 1 1 0
40 * S5: like S3, but destroyed 1 0 1
41 *
42 * The most common (but not all) transitions:
43 * S0->S1: client creates a context
44 * S1->S2: client submits execbuf with context
45 * S2->S3: other clients submits execbuf with context
46 * S3->S1: context object was retired
47 * S3->S2: clients submits another execbuf
48 * S2->S4: context destroy called with current context
49 * S3->S5->S0: destroy path
50 * S4->S5->S0: destroy path on current context
51 *
52 * There are two confusing terms used above:
53 * The "current context" means the context which is currently running on the
54 * GPU. The GPU has loaded its state already and has stored away the gtt
55 * offset of the BO. The GPU is not actively referencing the data at this
56 * offset, but it will on the next context switch. The only way to avoid this
57 * is to do a GPU reset.
58 *
59 * An "active context' is one which was previously the "current context" and is
60 * on the active list waiting for the next context switch to occur. Until this
61 * happens, the object must remain at the same gtt offset. It is therefore
62 * possible to destroy a context, but it is still active.
63 *
64 */
65
66#include <linux/highmem.h>
67#include <linux/log2.h>
68#include <linux/nospec.h>
69
70#include <drm/drm_cache.h>
71#include <drm/drm_syncobj.h>
72
73#include "gt/gen6_ppgtt.h"
74#include "gt/intel_context.h"
75#include "gt/intel_context_param.h"
76#include "gt/intel_engine_heartbeat.h"
77#include "gt/intel_engine_user.h"
78#include "gt/intel_gpu_commands.h"
79#include "gt/intel_ring.h"
80#include "gt/shmem_utils.h"
81
82#include "pxp/intel_pxp.h"
83
84#include "i915_file_private.h"
85#include "i915_gem_context.h"
86#include "i915_trace.h"
87#include "i915_user_extensions.h"
88
89#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
90
91static struct kmem_cache *slab_luts;
92
93struct i915_lut_handle *i915_lut_handle_alloc(void)
94{
95 return kmem_cache_alloc(slab_luts, GFP_KERNEL);
96}
97
98void i915_lut_handle_free(struct i915_lut_handle *lut)
99{
100 return kmem_cache_free(s: slab_luts, objp: lut);
101}
102
103static void lut_close(struct i915_gem_context *ctx)
104{
105 struct radix_tree_iter iter;
106 void __rcu **slot;
107
108 mutex_lock(lock: &ctx->lut_mutex);
109 rcu_read_lock();
110 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
111 struct i915_vma *vma = rcu_dereference_raw(*slot);
112 struct drm_i915_gem_object *obj = vma->obj;
113 struct i915_lut_handle *lut;
114
115 if (!kref_get_unless_zero(kref: &obj->base.refcount))
116 continue;
117
118 spin_lock(lock: &obj->lut_lock);
119 list_for_each_entry(lut, &obj->lut_list, obj_link) {
120 if (lut->ctx != ctx)
121 continue;
122
123 if (lut->handle != iter.index)
124 continue;
125
126 list_del(entry: &lut->obj_link);
127 break;
128 }
129 spin_unlock(lock: &obj->lut_lock);
130
131 if (&lut->obj_link != &obj->lut_list) {
132 i915_lut_handle_free(lut);
133 radix_tree_iter_delete(&ctx->handles_vma, iter: &iter, slot);
134 i915_vma_close(vma);
135 i915_gem_object_put(obj);
136 }
137
138 i915_gem_object_put(obj);
139 }
140 rcu_read_unlock();
141 mutex_unlock(lock: &ctx->lut_mutex);
142}
143
144static struct intel_context *
145lookup_user_engine(struct i915_gem_context *ctx,
146 unsigned long flags,
147 const struct i915_engine_class_instance *ci)
148#define LOOKUP_USER_INDEX BIT(0)
149{
150 int idx;
151
152 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
153 return ERR_PTR(error: -EINVAL);
154
155 if (!i915_gem_context_user_engines(ctx)) {
156 struct intel_engine_cs *engine;
157
158 engine = intel_engine_lookup_user(i915: ctx->i915,
159 class: ci->engine_class,
160 instance: ci->engine_instance);
161 if (!engine)
162 return ERR_PTR(error: -EINVAL);
163
164 idx = engine->legacy_idx;
165 } else {
166 idx = ci->engine_instance;
167 }
168
169 return i915_gem_context_get_engine(ctx, idx);
170}
171
172static int validate_priority(struct drm_i915_private *i915,
173 const struct drm_i915_gem_context_param *args)
174{
175 s64 priority = args->value;
176
177 if (args->size)
178 return -EINVAL;
179
180 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
181 return -ENODEV;
182
183 if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
184 priority < I915_CONTEXT_MIN_USER_PRIORITY)
185 return -EINVAL;
186
187 if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
188 !capable(CAP_SYS_NICE))
189 return -EPERM;
190
191 return 0;
192}
193
194static void proto_context_close(struct drm_i915_private *i915,
195 struct i915_gem_proto_context *pc)
196{
197 int i;
198
199 if (pc->pxp_wakeref)
200 intel_runtime_pm_put(rpm: &i915->runtime_pm, wref: pc->pxp_wakeref);
201 if (pc->vm)
202 i915_vm_put(vm: pc->vm);
203 if (pc->user_engines) {
204 for (i = 0; i < pc->num_user_engines; i++)
205 kfree(objp: pc->user_engines[i].siblings);
206 kfree(objp: pc->user_engines);
207 }
208 kfree(objp: pc);
209}
210
211static int proto_context_set_persistence(struct drm_i915_private *i915,
212 struct i915_gem_proto_context *pc,
213 bool persist)
214{
215 if (persist) {
216 /*
217 * Only contexts that are short-lived [that will expire or be
218 * reset] are allowed to survive past termination. We require
219 * hangcheck to ensure that the persistent requests are healthy.
220 */
221 if (!i915->params.enable_hangcheck)
222 return -EINVAL;
223
224 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
225 } else {
226 /* To cancel a context we use "preempt-to-idle" */
227 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
228 return -ENODEV;
229
230 /*
231 * If the cancel fails, we then need to reset, cleanly!
232 *
233 * If the per-engine reset fails, all hope is lost! We resort
234 * to a full GPU reset in that unlikely case, but realistically
235 * if the engine could not reset, the full reset does not fare
236 * much better. The damage has been done.
237 *
238 * However, if we cannot reset an engine by itself, we cannot
239 * cleanup a hanging persistent context without causing
240 * collateral damage, and we should not pretend we can by
241 * exposing the interface.
242 */
243 if (!intel_has_reset_engine(gt: to_gt(i915)))
244 return -ENODEV;
245
246 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
247 }
248
249 return 0;
250}
251
252static int proto_context_set_protected(struct drm_i915_private *i915,
253 struct i915_gem_proto_context *pc,
254 bool protected)
255{
256 int ret = 0;
257
258 if (!protected) {
259 pc->uses_protected_content = false;
260 } else if (!intel_pxp_is_enabled(pxp: i915->pxp)) {
261 ret = -ENODEV;
262 } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
263 !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
264 ret = -EPERM;
265 } else {
266 pc->uses_protected_content = true;
267
268 /*
269 * protected context usage requires the PXP session to be up,
270 * which in turn requires the device to be active.
271 */
272 pc->pxp_wakeref = intel_runtime_pm_get(rpm: &i915->runtime_pm);
273
274 if (!intel_pxp_is_active(pxp: i915->pxp))
275 ret = intel_pxp_start(pxp: i915->pxp);
276 }
277
278 return ret;
279}
280
281static struct i915_gem_proto_context *
282proto_context_create(struct drm_i915_file_private *fpriv,
283 struct drm_i915_private *i915, unsigned int flags)
284{
285 struct i915_gem_proto_context *pc, *err;
286
287 pc = kzalloc(sizeof(*pc), GFP_KERNEL);
288 if (!pc)
289 return ERR_PTR(error: -ENOMEM);
290
291 pc->fpriv = fpriv;
292 pc->num_user_engines = -1;
293 pc->user_engines = NULL;
294 pc->user_flags = BIT(UCONTEXT_BANNABLE) |
295 BIT(UCONTEXT_RECOVERABLE);
296 if (i915->params.enable_hangcheck)
297 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
298 pc->sched.priority = I915_PRIORITY_NORMAL;
299
300 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
301 if (!HAS_EXECLISTS(i915)) {
302 err = ERR_PTR(error: -EINVAL);
303 goto proto_close;
304 }
305 pc->single_timeline = true;
306 }
307
308 return pc;
309
310proto_close:
311 proto_context_close(i915, pc);
312 return err;
313}
314
315static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
316 struct i915_gem_proto_context *pc,
317 u32 *id)
318{
319 int ret;
320 void *old;
321
322 lockdep_assert_held(&fpriv->proto_context_lock);
323
324 ret = xa_alloc(xa: &fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
325 if (ret)
326 return ret;
327
328 old = xa_store(&fpriv->proto_context_xa, index: *id, entry: pc, GFP_KERNEL);
329 if (xa_is_err(entry: old)) {
330 xa_erase(&fpriv->context_xa, index: *id);
331 return xa_err(entry: old);
332 }
333 WARN_ON(old);
334
335 return 0;
336}
337
338static int proto_context_register(struct drm_i915_file_private *fpriv,
339 struct i915_gem_proto_context *pc,
340 u32 *id)
341{
342 int ret;
343
344 mutex_lock(lock: &fpriv->proto_context_lock);
345 ret = proto_context_register_locked(fpriv, pc, id);
346 mutex_unlock(lock: &fpriv->proto_context_lock);
347
348 return ret;
349}
350
351static struct i915_address_space *
352i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
353{
354 struct i915_address_space *vm;
355
356 xa_lock(&file_priv->vm_xa);
357 vm = xa_load(&file_priv->vm_xa, index: id);
358 if (vm)
359 kref_get(kref: &vm->ref);
360 xa_unlock(&file_priv->vm_xa);
361
362 return vm;
363}
364
365static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
366 struct i915_gem_proto_context *pc,
367 const struct drm_i915_gem_context_param *args)
368{
369 struct drm_i915_private *i915 = fpriv->i915;
370 struct i915_address_space *vm;
371
372 if (args->size)
373 return -EINVAL;
374
375 if (!HAS_FULL_PPGTT(i915))
376 return -ENODEV;
377
378 if (upper_32_bits(args->value))
379 return -ENOENT;
380
381 vm = i915_gem_vm_lookup(file_priv: fpriv, id: args->value);
382 if (!vm)
383 return -ENOENT;
384
385 if (pc->vm)
386 i915_vm_put(vm: pc->vm);
387 pc->vm = vm;
388
389 return 0;
390}
391
392struct set_proto_ctx_engines {
393 struct drm_i915_private *i915;
394 unsigned num_engines;
395 struct i915_gem_proto_engine *engines;
396};
397
398static int
399set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
400 void *data)
401{
402 struct i915_context_engines_load_balance __user *ext =
403 container_of_user(base, typeof(*ext), base);
404 const struct set_proto_ctx_engines *set = data;
405 struct drm_i915_private *i915 = set->i915;
406 struct intel_engine_cs **siblings;
407 u16 num_siblings, idx;
408 unsigned int n;
409 int err;
410
411 if (!HAS_EXECLISTS(i915))
412 return -ENODEV;
413
414 if (get_user(idx, &ext->engine_index))
415 return -EFAULT;
416
417 if (idx >= set->num_engines) {
418 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
419 idx, set->num_engines);
420 return -EINVAL;
421 }
422
423 idx = array_index_nospec(idx, set->num_engines);
424 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
425 drm_dbg(&i915->drm,
426 "Invalid placement[%d], already occupied\n", idx);
427 return -EEXIST;
428 }
429
430 if (get_user(num_siblings, &ext->num_siblings))
431 return -EFAULT;
432
433 err = check_user_mbz(&ext->flags);
434 if (err)
435 return err;
436
437 err = check_user_mbz(&ext->mbz64);
438 if (err)
439 return err;
440
441 if (num_siblings == 0)
442 return 0;
443
444 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
445 if (!siblings)
446 return -ENOMEM;
447
448 for (n = 0; n < num_siblings; n++) {
449 struct i915_engine_class_instance ci;
450
451 if (copy_from_user(to: &ci, from: &ext->engines[n], n: sizeof(ci))) {
452 err = -EFAULT;
453 goto err_siblings;
454 }
455
456 siblings[n] = intel_engine_lookup_user(i915,
457 class: ci.engine_class,
458 instance: ci.engine_instance);
459 if (!siblings[n]) {
460 drm_dbg(&i915->drm,
461 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
462 n, ci.engine_class, ci.engine_instance);
463 err = -EINVAL;
464 goto err_siblings;
465 }
466 }
467
468 if (num_siblings == 1) {
469 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
470 set->engines[idx].engine = siblings[0];
471 kfree(objp: siblings);
472 } else {
473 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
474 set->engines[idx].num_siblings = num_siblings;
475 set->engines[idx].siblings = siblings;
476 }
477
478 return 0;
479
480err_siblings:
481 kfree(objp: siblings);
482
483 return err;
484}
485
486static int
487set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
488{
489 struct i915_context_engines_bond __user *ext =
490 container_of_user(base, typeof(*ext), base);
491 const struct set_proto_ctx_engines *set = data;
492 struct drm_i915_private *i915 = set->i915;
493 struct i915_engine_class_instance ci;
494 struct intel_engine_cs *master;
495 u16 idx, num_bonds;
496 int err, n;
497
498 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
499 !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
500 drm_dbg(&i915->drm,
501 "Bonding not supported on this platform\n");
502 return -ENODEV;
503 }
504
505 if (get_user(idx, &ext->virtual_index))
506 return -EFAULT;
507
508 if (idx >= set->num_engines) {
509 drm_dbg(&i915->drm,
510 "Invalid index for virtual engine: %d >= %d\n",
511 idx, set->num_engines);
512 return -EINVAL;
513 }
514
515 idx = array_index_nospec(idx, set->num_engines);
516 if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
517 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
518 return -EINVAL;
519 }
520
521 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
522 drm_dbg(&i915->drm,
523 "Bonding with virtual engines not allowed\n");
524 return -EINVAL;
525 }
526
527 err = check_user_mbz(&ext->flags);
528 if (err)
529 return err;
530
531 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
532 err = check_user_mbz(&ext->mbz64[n]);
533 if (err)
534 return err;
535 }
536
537 if (copy_from_user(to: &ci, from: &ext->master, n: sizeof(ci)))
538 return -EFAULT;
539
540 master = intel_engine_lookup_user(i915,
541 class: ci.engine_class,
542 instance: ci.engine_instance);
543 if (!master) {
544 drm_dbg(&i915->drm,
545 "Unrecognised master engine: { class:%u, instance:%u }\n",
546 ci.engine_class, ci.engine_instance);
547 return -EINVAL;
548 }
549
550 if (intel_engine_uses_guc(engine: master)) {
551 drm_dbg(&i915->drm, "bonding extension not supported with GuC submission");
552 return -ENODEV;
553 }
554
555 if (get_user(num_bonds, &ext->num_bonds))
556 return -EFAULT;
557
558 for (n = 0; n < num_bonds; n++) {
559 struct intel_engine_cs *bond;
560
561 if (copy_from_user(to: &ci, from: &ext->engines[n], n: sizeof(ci)))
562 return -EFAULT;
563
564 bond = intel_engine_lookup_user(i915,
565 class: ci.engine_class,
566 instance: ci.engine_instance);
567 if (!bond) {
568 drm_dbg(&i915->drm,
569 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
570 n, ci.engine_class, ci.engine_instance);
571 return -EINVAL;
572 }
573 }
574
575 return 0;
576}
577
578static int
579set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
580 void *data)
581{
582 struct i915_context_engines_parallel_submit __user *ext =
583 container_of_user(base, typeof(*ext), base);
584 const struct set_proto_ctx_engines *set = data;
585 struct drm_i915_private *i915 = set->i915;
586 struct i915_engine_class_instance prev_engine;
587 u64 flags;
588 int err = 0, n, i, j;
589 u16 slot, width, num_siblings;
590 struct intel_engine_cs **siblings = NULL;
591 intel_engine_mask_t prev_mask;
592
593 if (get_user(slot, &ext->engine_index))
594 return -EFAULT;
595
596 if (get_user(width, &ext->width))
597 return -EFAULT;
598
599 if (get_user(num_siblings, &ext->num_siblings))
600 return -EFAULT;
601
602 if (!intel_uc_uses_guc_submission(uc: &to_gt(i915)->uc) &&
603 num_siblings != 1) {
604 drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
605 num_siblings);
606 return -EINVAL;
607 }
608
609 if (slot >= set->num_engines) {
610 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
611 slot, set->num_engines);
612 return -EINVAL;
613 }
614
615 if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
616 drm_dbg(&i915->drm,
617 "Invalid placement[%d], already occupied\n", slot);
618 return -EINVAL;
619 }
620
621 if (get_user(flags, &ext->flags))
622 return -EFAULT;
623
624 if (flags) {
625 drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
626 return -EINVAL;
627 }
628
629 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
630 err = check_user_mbz(&ext->mbz64[n]);
631 if (err)
632 return err;
633 }
634
635 if (width < 2) {
636 drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
637 return -EINVAL;
638 }
639
640 if (num_siblings < 1) {
641 drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
642 num_siblings);
643 return -EINVAL;
644 }
645
646 siblings = kmalloc_array(num_siblings * width,
647 sizeof(*siblings),
648 GFP_KERNEL);
649 if (!siblings)
650 return -ENOMEM;
651
652 /* Create contexts / engines */
653 for (i = 0; i < width; ++i) {
654 intel_engine_mask_t current_mask = 0;
655
656 for (j = 0; j < num_siblings; ++j) {
657 struct i915_engine_class_instance ci;
658
659 n = i * num_siblings + j;
660 if (copy_from_user(to: &ci, from: &ext->engines[n], n: sizeof(ci))) {
661 err = -EFAULT;
662 goto out_err;
663 }
664
665 siblings[n] =
666 intel_engine_lookup_user(i915, class: ci.engine_class,
667 instance: ci.engine_instance);
668 if (!siblings[n]) {
669 drm_dbg(&i915->drm,
670 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
671 n, ci.engine_class, ci.engine_instance);
672 err = -EINVAL;
673 goto out_err;
674 }
675
676 /*
677 * We don't support breadcrumb handshake on these
678 * classes
679 */
680 if (siblings[n]->class == RENDER_CLASS ||
681 siblings[n]->class == COMPUTE_CLASS) {
682 err = -EINVAL;
683 goto out_err;
684 }
685
686 if (n) {
687 if (prev_engine.engine_class !=
688 ci.engine_class) {
689 drm_dbg(&i915->drm,
690 "Mismatched class %d, %d\n",
691 prev_engine.engine_class,
692 ci.engine_class);
693 err = -EINVAL;
694 goto out_err;
695 }
696 }
697
698 prev_engine = ci;
699 current_mask |= siblings[n]->logical_mask;
700 }
701
702 if (i > 0) {
703 if (current_mask != prev_mask << 1) {
704 drm_dbg(&i915->drm,
705 "Non contiguous logical mask 0x%x, 0x%x\n",
706 prev_mask, current_mask);
707 err = -EINVAL;
708 goto out_err;
709 }
710 }
711 prev_mask = current_mask;
712 }
713
714 set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
715 set->engines[slot].num_siblings = num_siblings;
716 set->engines[slot].width = width;
717 set->engines[slot].siblings = siblings;
718
719 return 0;
720
721out_err:
722 kfree(objp: siblings);
723
724 return err;
725}
726
727static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
728 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
729 [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
730 [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
731 set_proto_ctx_engines_parallel_submit,
732};
733
734static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
735 struct i915_gem_proto_context *pc,
736 const struct drm_i915_gem_context_param *args)
737{
738 struct drm_i915_private *i915 = fpriv->i915;
739 struct set_proto_ctx_engines set = { .i915 = i915 };
740 struct i915_context_param_engines __user *user =
741 u64_to_user_ptr(args->value);
742 unsigned int n;
743 u64 extensions;
744 int err;
745
746 if (pc->num_user_engines >= 0) {
747 drm_dbg(&i915->drm, "Cannot set engines twice");
748 return -EINVAL;
749 }
750
751 if (args->size < sizeof(*user) ||
752 !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
753 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
754 args->size);
755 return -EINVAL;
756 }
757
758 set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
759 /* RING_MASK has no shift so we can use it directly here */
760 if (set.num_engines > I915_EXEC_RING_MASK + 1)
761 return -EINVAL;
762
763 set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
764 if (!set.engines)
765 return -ENOMEM;
766
767 for (n = 0; n < set.num_engines; n++) {
768 struct i915_engine_class_instance ci;
769 struct intel_engine_cs *engine;
770
771 if (copy_from_user(to: &ci, from: &user->engines[n], n: sizeof(ci))) {
772 kfree(objp: set.engines);
773 return -EFAULT;
774 }
775
776 memset(s: &set.engines[n], c: 0, n: sizeof(set.engines[n]));
777
778 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
779 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
780 continue;
781
782 engine = intel_engine_lookup_user(i915,
783 class: ci.engine_class,
784 instance: ci.engine_instance);
785 if (!engine) {
786 drm_dbg(&i915->drm,
787 "Invalid engine[%d]: { class:%d, instance:%d }\n",
788 n, ci.engine_class, ci.engine_instance);
789 kfree(objp: set.engines);
790 return -ENOENT;
791 }
792
793 set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
794 set.engines[n].engine = engine;
795 }
796
797 err = -EFAULT;
798 if (!get_user(extensions, &user->extensions))
799 err = i915_user_extensions(u64_to_user_ptr(extensions),
800 tbl: set_proto_ctx_engines_extensions,
801 ARRAY_SIZE(set_proto_ctx_engines_extensions),
802 data: &set);
803 if (err) {
804 kfree(objp: set.engines);
805 return err;
806 }
807
808 pc->num_user_engines = set.num_engines;
809 pc->user_engines = set.engines;
810
811 return 0;
812}
813
814static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
815 struct i915_gem_proto_context *pc,
816 struct drm_i915_gem_context_param *args)
817{
818 struct drm_i915_private *i915 = fpriv->i915;
819 struct drm_i915_gem_context_param_sseu user_sseu;
820 struct intel_sseu *sseu;
821 int ret;
822
823 if (args->size < sizeof(user_sseu))
824 return -EINVAL;
825
826 if (GRAPHICS_VER(i915) != 11)
827 return -ENODEV;
828
829 if (copy_from_user(to: &user_sseu, u64_to_user_ptr(args->value),
830 n: sizeof(user_sseu)))
831 return -EFAULT;
832
833 if (user_sseu.rsvd)
834 return -EINVAL;
835
836 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
837 return -EINVAL;
838
839 if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
840 return -EINVAL;
841
842 if (pc->num_user_engines >= 0) {
843 int idx = user_sseu.engine.engine_instance;
844 struct i915_gem_proto_engine *pe;
845
846 if (idx >= pc->num_user_engines)
847 return -EINVAL;
848
849 idx = array_index_nospec(idx, pc->num_user_engines);
850 pe = &pc->user_engines[idx];
851
852 /* Only render engine supports RPCS configuration. */
853 if (pe->engine->class != RENDER_CLASS)
854 return -EINVAL;
855
856 sseu = &pe->sseu;
857 } else {
858 /* Only render engine supports RPCS configuration. */
859 if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
860 return -EINVAL;
861
862 /* There is only one render engine */
863 if (user_sseu.engine.engine_instance != 0)
864 return -EINVAL;
865
866 sseu = &pc->legacy_rcs_sseu;
867 }
868
869 ret = i915_gem_user_to_context_sseu(gt: to_gt(i915), user: &user_sseu, context: sseu);
870 if (ret)
871 return ret;
872
873 args->size = sizeof(user_sseu);
874
875 return 0;
876}
877
878static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
879 struct i915_gem_proto_context *pc,
880 struct drm_i915_gem_context_param *args)
881{
882 struct drm_i915_private *i915 = fpriv->i915;
883 int ret = 0;
884
885 switch (args->param) {
886 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
887 if (args->size)
888 ret = -EINVAL;
889 else if (args->value)
890 pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
891 else
892 pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
893 break;
894
895 case I915_CONTEXT_PARAM_BANNABLE:
896 if (args->size)
897 ret = -EINVAL;
898 else if (!capable(CAP_SYS_ADMIN) && !args->value)
899 ret = -EPERM;
900 else if (args->value)
901 pc->user_flags |= BIT(UCONTEXT_BANNABLE);
902 else if (pc->uses_protected_content)
903 ret = -EPERM;
904 else
905 pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
906 break;
907
908 case I915_CONTEXT_PARAM_LOW_LATENCY:
909 if (intel_uc_uses_guc_submission(uc: &to_gt(i915)->uc))
910 pc->user_flags |= BIT(UCONTEXT_LOW_LATENCY);
911 else
912 ret = -EINVAL;
913 break;
914
915 case I915_CONTEXT_PARAM_RECOVERABLE:
916 if (args->size)
917 ret = -EINVAL;
918 else if (!args->value)
919 pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
920 else if (pc->uses_protected_content)
921 ret = -EPERM;
922 else
923 pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
924 break;
925
926 case I915_CONTEXT_PARAM_PRIORITY:
927 ret = validate_priority(i915: fpriv->i915, args);
928 if (!ret)
929 pc->sched.priority = args->value;
930 break;
931
932 case I915_CONTEXT_PARAM_SSEU:
933 ret = set_proto_ctx_sseu(fpriv, pc, args);
934 break;
935
936 case I915_CONTEXT_PARAM_VM:
937 ret = set_proto_ctx_vm(fpriv, pc, args);
938 break;
939
940 case I915_CONTEXT_PARAM_ENGINES:
941 ret = set_proto_ctx_engines(fpriv, pc, args);
942 break;
943
944 case I915_CONTEXT_PARAM_PERSISTENCE:
945 if (args->size)
946 ret = -EINVAL;
947 else
948 ret = proto_context_set_persistence(i915: fpriv->i915, pc,
949 persist: args->value);
950 break;
951
952 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
953 ret = proto_context_set_protected(i915: fpriv->i915, pc,
954 protected: args->value);
955 break;
956
957 case I915_CONTEXT_PARAM_NO_ZEROMAP:
958 case I915_CONTEXT_PARAM_BAN_PERIOD:
959 case I915_CONTEXT_PARAM_RINGSIZE:
960 case I915_CONTEXT_PARAM_CONTEXT_IMAGE:
961 default:
962 ret = -EINVAL;
963 break;
964 }
965
966 return ret;
967}
968
969static int intel_context_set_gem(struct intel_context *ce,
970 struct i915_gem_context *ctx,
971 struct intel_sseu sseu)
972{
973 int ret = 0;
974
975 GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
976 RCU_INIT_POINTER(ce->gem_context, ctx);
977
978 GEM_BUG_ON(intel_context_is_pinned(ce));
979
980 if (ce->engine->class == COMPUTE_CLASS)
981 ce->ring_size = SZ_512K;
982 else
983 ce->ring_size = SZ_16K;
984
985 i915_vm_put(vm: ce->vm);
986 ce->vm = i915_gem_context_get_eb_vm(ctx);
987
988 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
989 intel_engine_has_timeslices(engine: ce->engine) &&
990 intel_engine_has_semaphores(engine: ce->engine))
991 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
992
993 if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
994 ctx->i915->params.request_timeout_ms) {
995 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
996
997 intel_context_set_watchdog_us(ce, timeout_us: (u64)timeout_ms * 1000);
998 }
999
1000 /* A valid SSEU has no zero fields */
1001 if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
1002 ret = intel_context_reconfigure_sseu(ce, sseu);
1003
1004 if (test_bit(UCONTEXT_LOW_LATENCY, &ctx->user_flags))
1005 __set_bit(CONTEXT_LOW_LATENCY, &ce->flags);
1006
1007 return ret;
1008}
1009
1010static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
1011{
1012 while (count--) {
1013 struct intel_context *ce = e->engines[count], *child;
1014
1015 if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
1016 continue;
1017
1018 for_each_child(ce, child)
1019 intel_context_unpin(ce: child);
1020 intel_context_unpin(ce);
1021 }
1022}
1023
1024static void unpin_engines(struct i915_gem_engines *e)
1025{
1026 __unpin_engines(e, count: e->num_engines);
1027}
1028
1029static void __free_engines(struct i915_gem_engines *e, unsigned int count)
1030{
1031 while (count--) {
1032 if (!e->engines[count])
1033 continue;
1034
1035 intel_context_put(ce: e->engines[count]);
1036 }
1037 kfree(objp: e);
1038}
1039
1040static void free_engines(struct i915_gem_engines *e)
1041{
1042 __free_engines(e, count: e->num_engines);
1043}
1044
1045static void free_engines_rcu(struct rcu_head *rcu)
1046{
1047 struct i915_gem_engines *engines =
1048 container_of(rcu, struct i915_gem_engines, rcu);
1049
1050 i915_sw_fence_fini(fence: &engines->fence);
1051 free_engines(e: engines);
1052}
1053
1054static void accumulate_runtime(struct i915_drm_client *client,
1055 struct i915_gem_engines *engines)
1056{
1057 struct i915_gem_engines_iter it;
1058 struct intel_context *ce;
1059
1060 if (!client)
1061 return;
1062
1063 /* Transfer accumulated runtime to the parent GEM context. */
1064 for_each_gem_engine(ce, engines, it) {
1065 unsigned int class = ce->engine->uabi_class;
1066
1067 GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime));
1068 atomic64_add(i: intel_context_get_total_runtime_ns(ce),
1069 v: &client->past_runtime[class]);
1070 }
1071}
1072
1073static int
1074engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
1075{
1076 struct i915_gem_engines *engines =
1077 container_of(fence, typeof(*engines), fence);
1078 struct i915_gem_context *ctx = engines->ctx;
1079
1080 switch (state) {
1081 case FENCE_COMPLETE:
1082 if (!list_empty(head: &engines->link)) {
1083 unsigned long flags;
1084
1085 spin_lock_irqsave(&ctx->stale.lock, flags);
1086 list_del(entry: &engines->link);
1087 spin_unlock_irqrestore(lock: &ctx->stale.lock, flags);
1088 }
1089 accumulate_runtime(client: ctx->client, engines);
1090 i915_gem_context_put(ctx);
1091
1092 break;
1093
1094 case FENCE_FREE:
1095 init_rcu_head(head: &engines->rcu);
1096 call_rcu(head: &engines->rcu, func: free_engines_rcu);
1097 break;
1098 }
1099
1100 return NOTIFY_DONE;
1101}
1102
1103static struct i915_gem_engines *alloc_engines(unsigned int count)
1104{
1105 struct i915_gem_engines *e;
1106
1107 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
1108 if (!e)
1109 return NULL;
1110
1111 i915_sw_fence_init(&e->fence, engines_notify);
1112 return e;
1113}
1114
1115static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1116 struct intel_sseu rcs_sseu)
1117{
1118 const unsigned int max = I915_NUM_ENGINES;
1119 struct intel_engine_cs *engine;
1120 struct i915_gem_engines *e, *err;
1121
1122 e = alloc_engines(count: max);
1123 if (!e)
1124 return ERR_PTR(error: -ENOMEM);
1125
1126 for_each_uabi_engine(engine, ctx->i915) {
1127 struct intel_context *ce;
1128 struct intel_sseu sseu = {};
1129 int ret;
1130
1131 if (engine->legacy_idx == INVALID_ENGINE)
1132 continue;
1133
1134 GEM_BUG_ON(engine->legacy_idx >= max);
1135 GEM_BUG_ON(e->engines[engine->legacy_idx]);
1136
1137 ce = intel_context_create(engine);
1138 if (IS_ERR(ptr: ce)) {
1139 err = ERR_CAST(ptr: ce);
1140 goto free_engines;
1141 }
1142
1143 e->engines[engine->legacy_idx] = ce;
1144 e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
1145
1146 if (engine->class == RENDER_CLASS)
1147 sseu = rcs_sseu;
1148
1149 ret = intel_context_set_gem(ce, ctx, sseu);
1150 if (ret) {
1151 err = ERR_PTR(error: ret);
1152 goto free_engines;
1153 }
1154
1155 }
1156
1157 return e;
1158
1159free_engines:
1160 free_engines(e);
1161 return err;
1162}
1163
1164static int perma_pin_contexts(struct intel_context *ce)
1165{
1166 struct intel_context *child;
1167 int i = 0, j = 0, ret;
1168
1169 GEM_BUG_ON(!intel_context_is_parent(ce));
1170
1171 ret = intel_context_pin(ce);
1172 if (unlikely(ret))
1173 return ret;
1174
1175 for_each_child(ce, child) {
1176 ret = intel_context_pin(ce: child);
1177 if (unlikely(ret))
1178 goto unwind;
1179 ++i;
1180 }
1181
1182 set_bit(CONTEXT_PERMA_PIN, addr: &ce->flags);
1183
1184 return 0;
1185
1186unwind:
1187 intel_context_unpin(ce);
1188 for_each_child(ce, child) {
1189 if (j++ < i)
1190 intel_context_unpin(ce: child);
1191 else
1192 break;
1193 }
1194
1195 return ret;
1196}
1197
1198static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1199 unsigned int num_engines,
1200 struct i915_gem_proto_engine *pe)
1201{
1202 struct i915_gem_engines *e, *err;
1203 unsigned int n;
1204
1205 e = alloc_engines(count: num_engines);
1206 if (!e)
1207 return ERR_PTR(error: -ENOMEM);
1208 e->num_engines = num_engines;
1209
1210 for (n = 0; n < num_engines; n++) {
1211 struct intel_context *ce, *child;
1212 int ret;
1213
1214 switch (pe[n].type) {
1215 case I915_GEM_ENGINE_TYPE_PHYSICAL:
1216 ce = intel_context_create(engine: pe[n].engine);
1217 break;
1218
1219 case I915_GEM_ENGINE_TYPE_BALANCED:
1220 ce = intel_engine_create_virtual(siblings: pe[n].siblings,
1221 count: pe[n].num_siblings, flags: 0);
1222 break;
1223
1224 case I915_GEM_ENGINE_TYPE_PARALLEL:
1225 ce = intel_engine_create_parallel(engines: pe[n].siblings,
1226 num_engines: pe[n].num_siblings,
1227 width: pe[n].width);
1228 break;
1229
1230 case I915_GEM_ENGINE_TYPE_INVALID:
1231 default:
1232 GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
1233 continue;
1234 }
1235
1236 if (IS_ERR(ptr: ce)) {
1237 err = ERR_CAST(ptr: ce);
1238 goto free_engines;
1239 }
1240
1241 e->engines[n] = ce;
1242
1243 ret = intel_context_set_gem(ce, ctx, sseu: pe->sseu);
1244 if (ret) {
1245 err = ERR_PTR(error: ret);
1246 goto free_engines;
1247 }
1248 for_each_child(ce, child) {
1249 ret = intel_context_set_gem(ce: child, ctx, sseu: pe->sseu);
1250 if (ret) {
1251 err = ERR_PTR(error: ret);
1252 goto free_engines;
1253 }
1254 }
1255
1256 /*
1257 * XXX: Must be done after calling intel_context_set_gem as that
1258 * function changes the ring size. The ring is allocated when
1259 * the context is pinned. If the ring size is changed after
1260 * allocation we have a mismatch of the ring size and will cause
1261 * the context to hang. Presumably with a bit of reordering we
1262 * could move the perma-pin step to the backend function
1263 * intel_engine_create_parallel.
1264 */
1265 if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1266 ret = perma_pin_contexts(ce);
1267 if (ret) {
1268 err = ERR_PTR(error: ret);
1269 goto free_engines;
1270 }
1271 }
1272 }
1273
1274 return e;
1275
1276free_engines:
1277 free_engines(e);
1278 return err;
1279}
1280
1281static void i915_gem_context_release_work(struct work_struct *work)
1282{
1283 struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
1284 release_work);
1285 struct i915_address_space *vm;
1286
1287 trace_i915_context_free(ctx);
1288 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1289
1290 spin_lock(lock: &ctx->i915->gem.contexts.lock);
1291 list_del(entry: &ctx->link);
1292 spin_unlock(lock: &ctx->i915->gem.contexts.lock);
1293
1294 if (ctx->syncobj)
1295 drm_syncobj_put(obj: ctx->syncobj);
1296
1297 vm = ctx->vm;
1298 if (vm)
1299 i915_vm_put(vm);
1300
1301 if (ctx->pxp_wakeref)
1302 intel_runtime_pm_put(rpm: &ctx->i915->runtime_pm, wref: ctx->pxp_wakeref);
1303
1304 if (ctx->client)
1305 i915_drm_client_put(client: ctx->client);
1306
1307 mutex_destroy(lock: &ctx->engines_mutex);
1308 mutex_destroy(lock: &ctx->lut_mutex);
1309
1310 put_pid(pid: ctx->pid);
1311 mutex_destroy(lock: &ctx->mutex);
1312
1313 kfree_rcu(ctx, rcu);
1314}
1315
1316void i915_gem_context_release(struct kref *ref)
1317{
1318 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
1319
1320 queue_work(wq: ctx->i915->wq, work: &ctx->release_work);
1321}
1322
1323static inline struct i915_gem_engines *
1324__context_engines_static(const struct i915_gem_context *ctx)
1325{
1326 return rcu_dereference_protected(ctx->engines, true);
1327}
1328
1329static void __reset_context(struct i915_gem_context *ctx,
1330 struct intel_engine_cs *engine)
1331{
1332 intel_gt_handle_error(gt: engine->gt, engine_mask: engine->mask, flags: 0,
1333 fmt: "context closure in %s", ctx->name);
1334}
1335
1336static bool __cancel_engine(struct intel_engine_cs *engine)
1337{
1338 /*
1339 * Send a "high priority pulse" down the engine to cause the
1340 * current request to be momentarily preempted. (If it fails to
1341 * be preempted, it will be reset). As we have marked our context
1342 * as banned, any incomplete request, including any running, will
1343 * be skipped following the preemption.
1344 *
1345 * If there is no hangchecking (one of the reasons why we try to
1346 * cancel the context) and no forced preemption, there may be no
1347 * means by which we reset the GPU and evict the persistent hog.
1348 * Ergo if we are unable to inject a preemptive pulse that can
1349 * kill the banned context, we fallback to doing a local reset
1350 * instead.
1351 */
1352 return intel_engine_pulse(engine) == 0;
1353}
1354
1355static struct intel_engine_cs *active_engine(struct intel_context *ce)
1356{
1357 struct intel_engine_cs *engine = NULL;
1358 struct i915_request *rq;
1359
1360 if (intel_context_has_inflight(ce))
1361 return intel_context_inflight(ce);
1362
1363 if (!ce->timeline)
1364 return NULL;
1365
1366 /*
1367 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
1368 * to the request to prevent it being transferred to a new timeline
1369 * (and onto a new timeline->requests list).
1370 */
1371 rcu_read_lock();
1372 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
1373 bool found;
1374
1375 /* timeline is already completed upto this point? */
1376 if (!i915_request_get_rcu(rq))
1377 break;
1378
1379 /* Check with the backend if the request is inflight */
1380 found = true;
1381 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
1382 found = i915_request_active_engine(rq, active: &engine);
1383
1384 i915_request_put(rq);
1385 if (found)
1386 break;
1387 }
1388 rcu_read_unlock();
1389
1390 return engine;
1391}
1392
1393static void
1394kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
1395{
1396 struct i915_gem_engines_iter it;
1397 struct intel_context *ce;
1398
1399 /*
1400 * Map the user's engine back to the actual engines; one virtual
1401 * engine will be mapped to multiple engines, and using ctx->engine[]
1402 * the same engine may be have multiple instances in the user's map.
1403 * However, we only care about pending requests, so only include
1404 * engines on which there are incomplete requests.
1405 */
1406 for_each_gem_engine(ce, engines, it) {
1407 struct intel_engine_cs *engine;
1408
1409 if ((exit || !persistent) && intel_context_revoke(ce))
1410 continue; /* Already marked. */
1411
1412 /*
1413 * Check the current active state of this context; if we
1414 * are currently executing on the GPU we need to evict
1415 * ourselves. On the other hand, if we haven't yet been
1416 * submitted to the GPU or if everything is complete,
1417 * we have nothing to do.
1418 */
1419 engine = active_engine(ce);
1420
1421 /* First attempt to gracefully cancel the context */
1422 if (engine && !__cancel_engine(engine) && (exit || !persistent))
1423 /*
1424 * If we are unable to send a preemptive pulse to bump
1425 * the context from the GPU, we have to resort to a full
1426 * reset. We hope the collateral damage is worth it.
1427 */
1428 __reset_context(ctx: engines->ctx, engine);
1429 }
1430}
1431
1432static void kill_context(struct i915_gem_context *ctx)
1433{
1434 struct i915_gem_engines *pos, *next;
1435
1436 spin_lock_irq(lock: &ctx->stale.lock);
1437 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1438 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1439 if (!i915_sw_fence_await(fence: &pos->fence)) {
1440 list_del_init(entry: &pos->link);
1441 continue;
1442 }
1443
1444 spin_unlock_irq(lock: &ctx->stale.lock);
1445
1446 kill_engines(engines: pos, exit: !ctx->i915->params.enable_hangcheck,
1447 persistent: i915_gem_context_is_persistent(ctx));
1448
1449 spin_lock_irq(lock: &ctx->stale.lock);
1450 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
1451 list_safe_reset_next(pos, next, link);
1452 list_del_init(entry: &pos->link); /* decouple from FENCE_COMPLETE */
1453
1454 i915_sw_fence_complete(fence: &pos->fence);
1455 }
1456 spin_unlock_irq(lock: &ctx->stale.lock);
1457}
1458
1459static void engines_idle_release(struct i915_gem_context *ctx,
1460 struct i915_gem_engines *engines)
1461{
1462 struct i915_gem_engines_iter it;
1463 struct intel_context *ce;
1464
1465 INIT_LIST_HEAD(list: &engines->link);
1466
1467 engines->ctx = i915_gem_context_get(ctx);
1468
1469 for_each_gem_engine(ce, engines, it) {
1470 int err;
1471
1472 /* serialises with execbuf */
1473 intel_context_close(ce);
1474 if (!intel_context_pin_if_active(ce))
1475 continue;
1476
1477 /* Wait until context is finally scheduled out and retired */
1478 err = i915_sw_fence_await_active(fence: &engines->fence,
1479 ref: &ce->active,
1480 I915_ACTIVE_AWAIT_BARRIER);
1481 intel_context_unpin(ce);
1482 if (err)
1483 goto kill;
1484 }
1485
1486 spin_lock_irq(lock: &ctx->stale.lock);
1487 if (!i915_gem_context_is_closed(ctx))
1488 list_add_tail(new: &engines->link, head: &ctx->stale.engines);
1489 spin_unlock_irq(lock: &ctx->stale.lock);
1490
1491kill:
1492 if (list_empty(head: &engines->link)) /* raced, already closed */
1493 kill_engines(engines, exit: true,
1494 persistent: i915_gem_context_is_persistent(ctx));
1495
1496 i915_sw_fence_commit(fence: &engines->fence);
1497}
1498
1499static void set_closed_name(struct i915_gem_context *ctx)
1500{
1501 char *s;
1502
1503 /* Replace '[]' with '<>' to indicate closed in debug prints */
1504
1505 s = strrchr(ctx->name, '[');
1506 if (!s)
1507 return;
1508
1509 *s = '<';
1510
1511 s = strchr(s + 1, ']');
1512 if (s)
1513 *s = '>';
1514}
1515
1516static void context_close(struct i915_gem_context *ctx)
1517{
1518 struct i915_drm_client *client;
1519
1520 /* Flush any concurrent set_engines() */
1521 mutex_lock(lock: &ctx->engines_mutex);
1522 unpin_engines(e: __context_engines_static(ctx));
1523 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
1524 i915_gem_context_set_closed(ctx);
1525 mutex_unlock(lock: &ctx->engines_mutex);
1526
1527 mutex_lock(lock: &ctx->mutex);
1528
1529 set_closed_name(ctx);
1530
1531 /*
1532 * The LUT uses the VMA as a backpointer to unref the object,
1533 * so we need to clear the LUT before we close all the VMA (inside
1534 * the ppgtt).
1535 */
1536 lut_close(ctx);
1537
1538 ctx->file_priv = ERR_PTR(error: -EBADF);
1539
1540 client = ctx->client;
1541 if (client) {
1542 spin_lock(lock: &client->ctx_lock);
1543 list_del_rcu(entry: &ctx->client_link);
1544 spin_unlock(lock: &client->ctx_lock);
1545 }
1546
1547 mutex_unlock(lock: &ctx->mutex);
1548
1549 /*
1550 * If the user has disabled hangchecking, we can not be sure that
1551 * the batches will ever complete after the context is closed,
1552 * keeping the context and all resources pinned forever. So in this
1553 * case we opt to forcibly kill off all remaining requests on
1554 * context close.
1555 */
1556 kill_context(ctx);
1557
1558 i915_gem_context_put(ctx);
1559}
1560
1561static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1562{
1563 if (i915_gem_context_is_persistent(ctx) == state)
1564 return 0;
1565
1566 if (state) {
1567 /*
1568 * Only contexts that are short-lived [that will expire or be
1569 * reset] are allowed to survive past termination. We require
1570 * hangcheck to ensure that the persistent requests are healthy.
1571 */
1572 if (!ctx->i915->params.enable_hangcheck)
1573 return -EINVAL;
1574
1575 i915_gem_context_set_persistence(ctx);
1576 } else {
1577 /* To cancel a context we use "preempt-to-idle" */
1578 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1579 return -ENODEV;
1580
1581 /*
1582 * If the cancel fails, we then need to reset, cleanly!
1583 *
1584 * If the per-engine reset fails, all hope is lost! We resort
1585 * to a full GPU reset in that unlikely case, but realistically
1586 * if the engine could not reset, the full reset does not fare
1587 * much better. The damage has been done.
1588 *
1589 * However, if we cannot reset an engine by itself, we cannot
1590 * cleanup a hanging persistent context without causing
1591 * collateral damage, and we should not pretend we can by
1592 * exposing the interface.
1593 */
1594 if (!intel_has_reset_engine(gt: to_gt(i915: ctx->i915)))
1595 return -ENODEV;
1596
1597 i915_gem_context_clear_persistence(ctx);
1598 }
1599
1600 return 0;
1601}
1602
1603static struct i915_gem_context *
1604i915_gem_create_context(struct drm_i915_private *i915,
1605 const struct i915_gem_proto_context *pc)
1606{
1607 struct i915_gem_context *ctx;
1608 struct i915_address_space *vm = NULL;
1609 struct i915_gem_engines *e;
1610 int err;
1611 int i;
1612
1613 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1614 if (!ctx)
1615 return ERR_PTR(error: -ENOMEM);
1616
1617 kref_init(kref: &ctx->ref);
1618 ctx->i915 = i915;
1619 ctx->sched = pc->sched;
1620 mutex_init(&ctx->mutex);
1621 INIT_LIST_HEAD(list: &ctx->link);
1622 INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
1623
1624 spin_lock_init(&ctx->stale.lock);
1625 INIT_LIST_HEAD(list: &ctx->stale.engines);
1626
1627 if (pc->vm) {
1628 vm = i915_vm_get(vm: pc->vm);
1629 } else if (HAS_FULL_PPGTT(i915)) {
1630 struct i915_ppgtt *ppgtt;
1631
1632 ppgtt = i915_ppgtt_create(gt: to_gt(i915), lmem_pt_obj_flags: 0);
1633 if (IS_ERR(ptr: ppgtt)) {
1634 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
1635 PTR_ERR(ppgtt));
1636 err = PTR_ERR(ptr: ppgtt);
1637 goto err_ctx;
1638 }
1639 ppgtt->vm.fpriv = pc->fpriv;
1640 vm = &ppgtt->vm;
1641 }
1642 if (vm)
1643 ctx->vm = vm;
1644
1645 /* Assign early so intel_context_set_gem can access these flags */
1646 ctx->user_flags = pc->user_flags;
1647
1648 mutex_init(&ctx->engines_mutex);
1649 if (pc->num_user_engines >= 0) {
1650 i915_gem_context_set_user_engines(ctx);
1651 e = user_engines(ctx, num_engines: pc->num_user_engines, pe: pc->user_engines);
1652 } else {
1653 i915_gem_context_clear_user_engines(ctx);
1654 e = default_engines(ctx, rcs_sseu: pc->legacy_rcs_sseu);
1655 }
1656 if (IS_ERR(ptr: e)) {
1657 err = PTR_ERR(ptr: e);
1658 goto err_vm;
1659 }
1660 RCU_INIT_POINTER(ctx->engines, e);
1661
1662 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
1663 mutex_init(&ctx->lut_mutex);
1664
1665 /* NB: Mark all slices as needing a remap so that when the context first
1666 * loads it will restore whatever remap state already exists. If there
1667 * is no remap info, it will be a NOP. */
1668 ctx->remap_slice = ALL_L3_SLICES(i915);
1669
1670 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
1671 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1672
1673 if (pc->single_timeline) {
1674 err = drm_syncobj_create(out_syncobj: &ctx->syncobj,
1675 DRM_SYNCOBJ_CREATE_SIGNALED,
1676 NULL);
1677 if (err)
1678 goto err_engines;
1679 }
1680
1681 if (pc->uses_protected_content) {
1682 ctx->pxp_wakeref = intel_runtime_pm_get(rpm: &i915->runtime_pm);
1683 ctx->uses_protected_content = true;
1684 }
1685
1686 trace_i915_context_create(ctx);
1687
1688 return ctx;
1689
1690err_engines:
1691 free_engines(e);
1692err_vm:
1693 if (ctx->vm)
1694 i915_vm_put(vm: ctx->vm);
1695err_ctx:
1696 kfree(objp: ctx);
1697 return ERR_PTR(error: err);
1698}
1699
1700static void init_contexts(struct i915_gem_contexts *gc)
1701{
1702 spin_lock_init(&gc->lock);
1703 INIT_LIST_HEAD(list: &gc->list);
1704}
1705
1706void i915_gem_init__contexts(struct drm_i915_private *i915)
1707{
1708 init_contexts(gc: &i915->gem.contexts);
1709}
1710
1711/*
1712 * Note that this implicitly consumes the ctx reference, by placing
1713 * the ctx in the context_xa.
1714 */
1715static void gem_context_register(struct i915_gem_context *ctx,
1716 struct drm_i915_file_private *fpriv,
1717 u32 id)
1718{
1719 struct drm_i915_private *i915 = ctx->i915;
1720 void *old;
1721
1722 ctx->file_priv = fpriv;
1723
1724 ctx->pid = get_task_pid(current, type: PIDTYPE_PID);
1725 ctx->client = i915_drm_client_get(client: fpriv->client);
1726
1727 snprintf(buf: ctx->name, size: sizeof(ctx->name), fmt: "%s[%d]",
1728 current->comm, pid_nr(pid: ctx->pid));
1729
1730 spin_lock(lock: &ctx->client->ctx_lock);
1731 list_add_tail_rcu(new: &ctx->client_link, head: &ctx->client->ctx_list);
1732 spin_unlock(lock: &ctx->client->ctx_lock);
1733
1734 spin_lock(lock: &i915->gem.contexts.lock);
1735 list_add_tail(new: &ctx->link, head: &i915->gem.contexts.list);
1736 spin_unlock(lock: &i915->gem.contexts.lock);
1737
1738 /* And finally expose ourselves to userspace via the idr */
1739 old = xa_store(&fpriv->context_xa, index: id, entry: ctx, GFP_KERNEL);
1740 WARN_ON(old);
1741}
1742
1743int i915_gem_context_open(struct drm_i915_private *i915,
1744 struct drm_file *file)
1745{
1746 struct drm_i915_file_private *file_priv = file->driver_priv;
1747 struct i915_gem_proto_context *pc;
1748 struct i915_gem_context *ctx;
1749 int err;
1750
1751 mutex_init(&file_priv->proto_context_lock);
1752 xa_init_flags(xa: &file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1753
1754 /* 0 reserved for the default context */
1755 xa_init_flags(xa: &file_priv->context_xa, XA_FLAGS_ALLOC1);
1756
1757 /* 0 reserved for invalid/unassigned ppgtt */
1758 xa_init_flags(xa: &file_priv->vm_xa, XA_FLAGS_ALLOC1);
1759
1760 pc = proto_context_create(fpriv: file_priv, i915, flags: 0);
1761 if (IS_ERR(ptr: pc)) {
1762 err = PTR_ERR(ptr: pc);
1763 goto err;
1764 }
1765
1766 ctx = i915_gem_create_context(i915, pc);
1767 proto_context_close(i915, pc);
1768 if (IS_ERR(ptr: ctx)) {
1769 err = PTR_ERR(ptr: ctx);
1770 goto err;
1771 }
1772
1773 gem_context_register(ctx, fpriv: file_priv, id: 0);
1774
1775 return 0;
1776
1777err:
1778 xa_destroy(&file_priv->vm_xa);
1779 xa_destroy(&file_priv->context_xa);
1780 xa_destroy(&file_priv->proto_context_xa);
1781 mutex_destroy(lock: &file_priv->proto_context_lock);
1782 return err;
1783}
1784
1785void i915_gem_context_close(struct drm_file *file)
1786{
1787 struct drm_i915_file_private *file_priv = file->driver_priv;
1788 struct i915_gem_proto_context *pc;
1789 struct i915_address_space *vm;
1790 struct i915_gem_context *ctx;
1791 unsigned long idx;
1792
1793 xa_for_each(&file_priv->proto_context_xa, idx, pc)
1794 proto_context_close(i915: file_priv->i915, pc);
1795 xa_destroy(&file_priv->proto_context_xa);
1796 mutex_destroy(lock: &file_priv->proto_context_lock);
1797
1798 xa_for_each(&file_priv->context_xa, idx, ctx)
1799 context_close(ctx);
1800 xa_destroy(&file_priv->context_xa);
1801
1802 xa_for_each(&file_priv->vm_xa, idx, vm)
1803 i915_vm_put(vm);
1804 xa_destroy(&file_priv->vm_xa);
1805}
1806
1807int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1808 struct drm_file *file)
1809{
1810 struct drm_i915_private *i915 = to_i915(dev);
1811 struct drm_i915_gem_vm_control *args = data;
1812 struct drm_i915_file_private *file_priv = file->driver_priv;
1813 struct i915_ppgtt *ppgtt;
1814 u32 id;
1815 int err;
1816
1817 if (!HAS_FULL_PPGTT(i915))
1818 return -ENODEV;
1819
1820 if (args->flags)
1821 return -EINVAL;
1822
1823 ppgtt = i915_ppgtt_create(gt: to_gt(i915), lmem_pt_obj_flags: 0);
1824 if (IS_ERR(ptr: ppgtt))
1825 return PTR_ERR(ptr: ppgtt);
1826
1827 if (args->extensions) {
1828 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1829 NULL, count: 0,
1830 data: ppgtt);
1831 if (err)
1832 goto err_put;
1833 }
1834
1835 err = xa_alloc(xa: &file_priv->vm_xa, id: &id, entry: &ppgtt->vm,
1836 xa_limit_32b, GFP_KERNEL);
1837 if (err)
1838 goto err_put;
1839
1840 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1841 args->vm_id = id;
1842 ppgtt->vm.fpriv = file_priv;
1843 return 0;
1844
1845err_put:
1846 i915_vm_put(vm: &ppgtt->vm);
1847 return err;
1848}
1849
1850int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1851 struct drm_file *file)
1852{
1853 struct drm_i915_file_private *file_priv = file->driver_priv;
1854 struct drm_i915_gem_vm_control *args = data;
1855 struct i915_address_space *vm;
1856
1857 if (args->flags)
1858 return -EINVAL;
1859
1860 if (args->extensions)
1861 return -EINVAL;
1862
1863 vm = xa_erase(&file_priv->vm_xa, index: args->vm_id);
1864 if (!vm)
1865 return -ENOENT;
1866
1867 i915_vm_put(vm);
1868 return 0;
1869}
1870
1871static int get_ppgtt(struct drm_i915_file_private *file_priv,
1872 struct i915_gem_context *ctx,
1873 struct drm_i915_gem_context_param *args)
1874{
1875 struct i915_address_space *vm;
1876 int err;
1877 u32 id;
1878
1879 if (!i915_gem_context_has_full_ppgtt(ctx))
1880 return -ENODEV;
1881
1882 vm = ctx->vm;
1883 GEM_BUG_ON(!vm);
1884
1885 /*
1886 * Get a reference for the allocated handle. Once the handle is
1887 * visible in the vm_xa table, userspace could try to close it
1888 * from under our feet, so we need to hold the extra reference
1889 * first.
1890 */
1891 i915_vm_get(vm);
1892
1893 err = xa_alloc(xa: &file_priv->vm_xa, id: &id, entry: vm, xa_limit_32b, GFP_KERNEL);
1894 if (err) {
1895 i915_vm_put(vm);
1896 return err;
1897 }
1898
1899 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1900 args->value = id;
1901 args->size = 0;
1902
1903 return err;
1904}
1905
1906int
1907i915_gem_user_to_context_sseu(struct intel_gt *gt,
1908 const struct drm_i915_gem_context_param_sseu *user,
1909 struct intel_sseu *context)
1910{
1911 const struct sseu_dev_info *device = &gt->info.sseu;
1912 struct drm_i915_private *i915 = gt->i915;
1913 unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(sseu: device, slice: 0);
1914
1915 /* No zeros in any field. */
1916 if (!user->slice_mask || !user->subslice_mask ||
1917 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1918 return -EINVAL;
1919
1920 /* Max > min. */
1921 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1922 return -EINVAL;
1923
1924 /*
1925 * Some future proofing on the types since the uAPI is wider than the
1926 * current internal implementation.
1927 */
1928 if (overflows_type(user->slice_mask, context->slice_mask) ||
1929 overflows_type(user->subslice_mask, context->subslice_mask) ||
1930 overflows_type(user->min_eus_per_subslice,
1931 context->min_eus_per_subslice) ||
1932 overflows_type(user->max_eus_per_subslice,
1933 context->max_eus_per_subslice))
1934 return -EINVAL;
1935
1936 /* Check validity against hardware. */
1937 if (user->slice_mask & ~device->slice_mask)
1938 return -EINVAL;
1939
1940 if (user->subslice_mask & ~dev_subslice_mask)
1941 return -EINVAL;
1942
1943 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1944 return -EINVAL;
1945
1946 context->slice_mask = user->slice_mask;
1947 context->subslice_mask = user->subslice_mask;
1948 context->min_eus_per_subslice = user->min_eus_per_subslice;
1949 context->max_eus_per_subslice = user->max_eus_per_subslice;
1950
1951 /* Part specific restrictions. */
1952 if (GRAPHICS_VER(i915) == 11) {
1953 unsigned int hw_s = hweight8(device->slice_mask);
1954 unsigned int hw_ss_per_s = hweight8(dev_subslice_mask);
1955 unsigned int req_s = hweight8(context->slice_mask);
1956 unsigned int req_ss = hweight8(context->subslice_mask);
1957
1958 /*
1959 * Only full subslice enablement is possible if more than one
1960 * slice is turned on.
1961 */
1962 if (req_s > 1 && req_ss != hw_ss_per_s)
1963 return -EINVAL;
1964
1965 /*
1966 * If more than four (SScount bitfield limit) subslices are
1967 * requested then the number has to be even.
1968 */
1969 if (req_ss > 4 && (req_ss & 1))
1970 return -EINVAL;
1971
1972 /*
1973 * If only one slice is enabled and subslice count is below the
1974 * device full enablement, it must be at most half of the all
1975 * available subslices.
1976 */
1977 if (req_s == 1 && req_ss < hw_ss_per_s &&
1978 req_ss > (hw_ss_per_s / 2))
1979 return -EINVAL;
1980
1981 /* ABI restriction - VME use case only. */
1982
1983 /* All slices or one slice only. */
1984 if (req_s != 1 && req_s != hw_s)
1985 return -EINVAL;
1986
1987 /*
1988 * Half subslices or full enablement only when one slice is
1989 * enabled.
1990 */
1991 if (req_s == 1 &&
1992 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1993 return -EINVAL;
1994
1995 /* No EU configuration changes. */
1996 if ((user->min_eus_per_subslice !=
1997 device->max_eus_per_subslice) ||
1998 (user->max_eus_per_subslice !=
1999 device->max_eus_per_subslice))
2000 return -EINVAL;
2001 }
2002
2003 return 0;
2004}
2005
2006static int set_sseu(struct i915_gem_context *ctx,
2007 struct drm_i915_gem_context_param *args)
2008{
2009 struct drm_i915_private *i915 = ctx->i915;
2010 struct drm_i915_gem_context_param_sseu user_sseu;
2011 struct intel_context *ce;
2012 struct intel_sseu sseu;
2013 unsigned long lookup;
2014 int ret;
2015
2016 if (args->size < sizeof(user_sseu))
2017 return -EINVAL;
2018
2019 if (GRAPHICS_VER(i915) != 11)
2020 return -ENODEV;
2021
2022 if (copy_from_user(to: &user_sseu, u64_to_user_ptr(args->value),
2023 n: sizeof(user_sseu)))
2024 return -EFAULT;
2025
2026 if (user_sseu.rsvd)
2027 return -EINVAL;
2028
2029 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2030 return -EINVAL;
2031
2032 lookup = 0;
2033 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2034 lookup |= LOOKUP_USER_INDEX;
2035
2036 ce = lookup_user_engine(ctx, flags: lookup, ci: &user_sseu.engine);
2037 if (IS_ERR(ptr: ce))
2038 return PTR_ERR(ptr: ce);
2039
2040 /* Only render engine supports RPCS configuration. */
2041 if (ce->engine->class != RENDER_CLASS) {
2042 ret = -ENODEV;
2043 goto out_ce;
2044 }
2045
2046 ret = i915_gem_user_to_context_sseu(gt: ce->engine->gt, user: &user_sseu, context: &sseu);
2047 if (ret)
2048 goto out_ce;
2049
2050 ret = intel_context_reconfigure_sseu(ce, sseu);
2051 if (ret)
2052 goto out_ce;
2053
2054 args->size = sizeof(user_sseu);
2055
2056out_ce:
2057 intel_context_put(ce);
2058 return ret;
2059}
2060
2061static int
2062set_persistence(struct i915_gem_context *ctx,
2063 const struct drm_i915_gem_context_param *args)
2064{
2065 if (args->size)
2066 return -EINVAL;
2067
2068 return __context_set_persistence(ctx, state: args->value);
2069}
2070
2071static int set_priority(struct i915_gem_context *ctx,
2072 const struct drm_i915_gem_context_param *args)
2073{
2074 struct i915_gem_engines_iter it;
2075 struct intel_context *ce;
2076 int err;
2077
2078 err = validate_priority(i915: ctx->i915, args);
2079 if (err)
2080 return err;
2081
2082 ctx->sched.priority = args->value;
2083
2084 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2085 if (!intel_engine_has_timeslices(engine: ce->engine))
2086 continue;
2087
2088 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2089 intel_engine_has_semaphores(engine: ce->engine))
2090 intel_context_set_use_semaphores(ce);
2091 else
2092 intel_context_clear_use_semaphores(ce);
2093 }
2094 i915_gem_context_unlock_engines(ctx);
2095
2096 return 0;
2097}
2098
2099static int get_protected(struct i915_gem_context *ctx,
2100 struct drm_i915_gem_context_param *args)
2101{
2102 args->size = 0;
2103 args->value = i915_gem_context_uses_protected_content(ctx);
2104
2105 return 0;
2106}
2107
2108static int set_context_image(struct i915_gem_context *ctx,
2109 struct drm_i915_gem_context_param *args)
2110{
2111 struct i915_gem_context_param_context_image user;
2112 struct intel_context *ce;
2113 struct file *shmem_state;
2114 unsigned long lookup;
2115 void *state;
2116 int ret = 0;
2117
2118 if (!IS_ENABLED(CONFIG_DRM_I915_REPLAY_GPU_HANGS_API))
2119 return -EINVAL;
2120
2121 if (!ctx->i915->params.enable_debug_only_api)
2122 return -EINVAL;
2123
2124 if (args->size < sizeof(user))
2125 return -EINVAL;
2126
2127 if (copy_from_user(to: &user, u64_to_user_ptr(args->value), n: sizeof(user)))
2128 return -EFAULT;
2129
2130 if (user.mbz)
2131 return -EINVAL;
2132
2133 if (user.flags & ~(I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX))
2134 return -EINVAL;
2135
2136 lookup = 0;
2137 if (user.flags & I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX)
2138 lookup |= LOOKUP_USER_INDEX;
2139
2140 ce = lookup_user_engine(ctx, flags: lookup, ci: &user.engine);
2141 if (IS_ERR(ptr: ce))
2142 return PTR_ERR(ptr: ce);
2143
2144 if (user.size < ce->engine->context_size) {
2145 ret = -EINVAL;
2146 goto out_ce;
2147 }
2148
2149 if (drm_WARN_ON_ONCE(&ctx->i915->drm,
2150 test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
2151 /*
2152 * This is racy but for a debug only API, if userspace is keen
2153 * to create and configure contexts, while simultaneously using
2154 * them from a second thread, let them suffer by potentially not
2155 * executing with the context image they just raced to apply.
2156 */
2157 ret = -EBUSY;
2158 goto out_ce;
2159 }
2160
2161 state = memdup_user(u64_to_user_ptr(user.image), ce->engine->context_size);
2162 if (IS_ERR(ptr: state)) {
2163 ret = PTR_ERR(ptr: state);
2164 goto out_ce;
2165 }
2166
2167 shmem_state = shmem_create_from_data(name: ce->engine->name,
2168 data: state, len: ce->engine->context_size);
2169 if (IS_ERR(ptr: shmem_state)) {
2170 ret = PTR_ERR(ptr: shmem_state);
2171 goto out_state;
2172 }
2173
2174 if (intel_context_set_own_state(ce)) {
2175 ret = -EBUSY;
2176 fput(shmem_state);
2177 goto out_state;
2178 }
2179
2180 ce->default_state = shmem_state;
2181
2182 args->size = sizeof(user);
2183
2184out_state:
2185 kfree(objp: state);
2186out_ce:
2187 intel_context_put(ce);
2188 return ret;
2189}
2190
2191static int ctx_setparam(struct drm_i915_file_private *fpriv,
2192 struct i915_gem_context *ctx,
2193 struct drm_i915_gem_context_param *args)
2194{
2195 int ret = 0;
2196
2197 switch (args->param) {
2198 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2199 if (args->size)
2200 ret = -EINVAL;
2201 else if (args->value)
2202 i915_gem_context_set_no_error_capture(ctx);
2203 else
2204 i915_gem_context_clear_no_error_capture(ctx);
2205 break;
2206
2207 case I915_CONTEXT_PARAM_BANNABLE:
2208 if (args->size)
2209 ret = -EINVAL;
2210 else if (!capable(CAP_SYS_ADMIN) && !args->value)
2211 ret = -EPERM;
2212 else if (args->value)
2213 i915_gem_context_set_bannable(ctx);
2214 else if (i915_gem_context_uses_protected_content(ctx))
2215 ret = -EPERM; /* can't clear this for protected contexts */
2216 else
2217 i915_gem_context_clear_bannable(ctx);
2218 break;
2219
2220 case I915_CONTEXT_PARAM_RECOVERABLE:
2221 if (args->size)
2222 ret = -EINVAL;
2223 else if (!args->value)
2224 i915_gem_context_clear_recoverable(ctx);
2225 else if (i915_gem_context_uses_protected_content(ctx))
2226 ret = -EPERM; /* can't set this for protected contexts */
2227 else
2228 i915_gem_context_set_recoverable(ctx);
2229 break;
2230
2231 case I915_CONTEXT_PARAM_PRIORITY:
2232 ret = set_priority(ctx, args);
2233 break;
2234
2235 case I915_CONTEXT_PARAM_SSEU:
2236 ret = set_sseu(ctx, args);
2237 break;
2238
2239 case I915_CONTEXT_PARAM_PERSISTENCE:
2240 ret = set_persistence(ctx, args);
2241 break;
2242
2243 case I915_CONTEXT_PARAM_CONTEXT_IMAGE:
2244 ret = set_context_image(ctx, args);
2245 break;
2246
2247 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2248 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2249 case I915_CONTEXT_PARAM_BAN_PERIOD:
2250 case I915_CONTEXT_PARAM_RINGSIZE:
2251 case I915_CONTEXT_PARAM_VM:
2252 case I915_CONTEXT_PARAM_ENGINES:
2253 default:
2254 ret = -EINVAL;
2255 break;
2256 }
2257
2258 return ret;
2259}
2260
2261struct create_ext {
2262 struct i915_gem_proto_context *pc;
2263 struct drm_i915_file_private *fpriv;
2264};
2265
2266static int create_setparam(struct i915_user_extension __user *ext, void *data)
2267{
2268 struct drm_i915_gem_context_create_ext_setparam local;
2269 const struct create_ext *arg = data;
2270
2271 if (copy_from_user(to: &local, from: ext, n: sizeof(local)))
2272 return -EFAULT;
2273
2274 if (local.param.ctx_id)
2275 return -EINVAL;
2276
2277 return set_proto_ctx_param(fpriv: arg->fpriv, pc: arg->pc, args: &local.param);
2278}
2279
2280static int invalid_ext(struct i915_user_extension __user *ext, void *data)
2281{
2282 return -EINVAL;
2283}
2284
2285static const i915_user_extension_fn create_extensions[] = {
2286 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2287 [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
2288};
2289
2290static bool client_is_banned(struct drm_i915_file_private *file_priv)
2291{
2292 return atomic_read(v: &file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2293}
2294
2295static inline struct i915_gem_context *
2296__context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2297{
2298 struct i915_gem_context *ctx;
2299
2300 rcu_read_lock();
2301 ctx = xa_load(&file_priv->context_xa, index: id);
2302 if (ctx && !kref_get_unless_zero(kref: &ctx->ref))
2303 ctx = NULL;
2304 rcu_read_unlock();
2305
2306 return ctx;
2307}
2308
2309static struct i915_gem_context *
2310finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2311 struct i915_gem_proto_context *pc, u32 id)
2312{
2313 struct i915_gem_context *ctx;
2314 void *old;
2315
2316 lockdep_assert_held(&file_priv->proto_context_lock);
2317
2318 ctx = i915_gem_create_context(i915: file_priv->i915, pc);
2319 if (IS_ERR(ptr: ctx))
2320 return ctx;
2321
2322 /*
2323 * One for the xarray and one for the caller. We need to grab
2324 * the reference *prior* to making the ctx visible to userspace
2325 * in gem_context_register(), as at any point after that
2326 * userspace can try to race us with another thread destroying
2327 * the context under our feet.
2328 */
2329 i915_gem_context_get(ctx);
2330
2331 gem_context_register(ctx, fpriv: file_priv, id);
2332
2333 old = xa_erase(&file_priv->proto_context_xa, index: id);
2334 GEM_BUG_ON(old != pc);
2335 proto_context_close(i915: file_priv->i915, pc);
2336
2337 return ctx;
2338}
2339
2340struct i915_gem_context *
2341i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2342{
2343 struct i915_gem_proto_context *pc;
2344 struct i915_gem_context *ctx;
2345
2346 ctx = __context_lookup(file_priv, id);
2347 if (ctx)
2348 return ctx;
2349
2350 mutex_lock(lock: &file_priv->proto_context_lock);
2351 /* Try one more time under the lock */
2352 ctx = __context_lookup(file_priv, id);
2353 if (!ctx) {
2354 pc = xa_load(&file_priv->proto_context_xa, index: id);
2355 if (!pc)
2356 ctx = ERR_PTR(error: -ENOENT);
2357 else
2358 ctx = finalize_create_context_locked(file_priv, pc, id);
2359 }
2360 mutex_unlock(lock: &file_priv->proto_context_lock);
2361
2362 return ctx;
2363}
2364
2365int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2366 struct drm_file *file)
2367{
2368 struct drm_i915_private *i915 = to_i915(dev);
2369 struct drm_i915_gem_context_create_ext *args = data;
2370 struct create_ext ext_data;
2371 int ret;
2372 u32 id;
2373
2374 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2375 return -ENODEV;
2376
2377 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2378 return -EINVAL;
2379
2380 ret = intel_gt_terminally_wedged(gt: to_gt(i915));
2381 if (ret)
2382 return ret;
2383
2384 ext_data.fpriv = file->driver_priv;
2385 if (client_is_banned(file_priv: ext_data.fpriv)) {
2386 drm_dbg(&i915->drm,
2387 "client %s[%d] banned from creating ctx\n",
2388 current->comm, task_pid_nr(current));
2389 return -EIO;
2390 }
2391
2392 ext_data.pc = proto_context_create(fpriv: file->driver_priv, i915,
2393 flags: args->flags);
2394 if (IS_ERR(ptr: ext_data.pc))
2395 return PTR_ERR(ptr: ext_data.pc);
2396
2397 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2398 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2399 tbl: create_extensions,
2400 ARRAY_SIZE(create_extensions),
2401 data: &ext_data);
2402 if (ret)
2403 goto err_pc;
2404 }
2405
2406 if (GRAPHICS_VER(i915) > 12) {
2407 struct i915_gem_context *ctx;
2408
2409 /* Get ourselves a context ID */
2410 ret = xa_alloc(xa: &ext_data.fpriv->context_xa, id: &id, NULL,
2411 xa_limit_32b, GFP_KERNEL);
2412 if (ret)
2413 goto err_pc;
2414
2415 ctx = i915_gem_create_context(i915, pc: ext_data.pc);
2416 if (IS_ERR(ptr: ctx)) {
2417 ret = PTR_ERR(ptr: ctx);
2418 goto err_pc;
2419 }
2420
2421 proto_context_close(i915, pc: ext_data.pc);
2422 gem_context_register(ctx, fpriv: ext_data.fpriv, id);
2423 } else {
2424 ret = proto_context_register(fpriv: ext_data.fpriv, pc: ext_data.pc, id: &id);
2425 if (ret < 0)
2426 goto err_pc;
2427 }
2428
2429 args->ctx_id = id;
2430
2431 return 0;
2432
2433err_pc:
2434 proto_context_close(i915, pc: ext_data.pc);
2435 return ret;
2436}
2437
2438int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2439 struct drm_file *file)
2440{
2441 struct drm_i915_gem_context_destroy *args = data;
2442 struct drm_i915_file_private *file_priv = file->driver_priv;
2443 struct i915_gem_proto_context *pc;
2444 struct i915_gem_context *ctx;
2445
2446 if (args->pad != 0)
2447 return -EINVAL;
2448
2449 if (!args->ctx_id)
2450 return -ENOENT;
2451
2452 /* We need to hold the proto-context lock here to prevent races
2453 * with finalize_create_context_locked().
2454 */
2455 mutex_lock(lock: &file_priv->proto_context_lock);
2456 ctx = xa_erase(&file_priv->context_xa, index: args->ctx_id);
2457 pc = xa_erase(&file_priv->proto_context_xa, index: args->ctx_id);
2458 mutex_unlock(lock: &file_priv->proto_context_lock);
2459
2460 if (!ctx && !pc)
2461 return -ENOENT;
2462 GEM_WARN_ON(ctx && pc);
2463
2464 if (pc)
2465 proto_context_close(i915: file_priv->i915, pc);
2466
2467 if (ctx)
2468 context_close(ctx);
2469
2470 return 0;
2471}
2472
2473static int get_sseu(struct i915_gem_context *ctx,
2474 struct drm_i915_gem_context_param *args)
2475{
2476 struct drm_i915_gem_context_param_sseu user_sseu;
2477 struct intel_context *ce;
2478 unsigned long lookup;
2479 int err;
2480
2481 if (args->size == 0)
2482 goto out;
2483 else if (args->size < sizeof(user_sseu))
2484 return -EINVAL;
2485
2486 if (copy_from_user(to: &user_sseu, u64_to_user_ptr(args->value),
2487 n: sizeof(user_sseu)))
2488 return -EFAULT;
2489
2490 if (user_sseu.rsvd)
2491 return -EINVAL;
2492
2493 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2494 return -EINVAL;
2495
2496 lookup = 0;
2497 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2498 lookup |= LOOKUP_USER_INDEX;
2499
2500 ce = lookup_user_engine(ctx, flags: lookup, ci: &user_sseu.engine);
2501 if (IS_ERR(ptr: ce))
2502 return PTR_ERR(ptr: ce);
2503
2504 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2505 if (err) {
2506 intel_context_put(ce);
2507 return err;
2508 }
2509
2510 user_sseu.slice_mask = ce->sseu.slice_mask;
2511 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2512 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2513 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2514
2515 intel_context_unlock_pinned(ce);
2516 intel_context_put(ce);
2517
2518 if (copy_to_user(u64_to_user_ptr(args->value), from: &user_sseu,
2519 n: sizeof(user_sseu)))
2520 return -EFAULT;
2521
2522out:
2523 args->size = sizeof(user_sseu);
2524
2525 return 0;
2526}
2527
2528int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2529 struct drm_file *file)
2530{
2531 struct drm_i915_file_private *file_priv = file->driver_priv;
2532 struct drm_i915_gem_context_param *args = data;
2533 struct i915_gem_context *ctx;
2534 struct i915_address_space *vm;
2535 int ret = 0;
2536
2537 ctx = i915_gem_context_lookup(file_priv, id: args->ctx_id);
2538 if (IS_ERR(ptr: ctx))
2539 return PTR_ERR(ptr: ctx);
2540
2541 switch (args->param) {
2542 case I915_CONTEXT_PARAM_GTT_SIZE:
2543 args->size = 0;
2544 vm = i915_gem_context_get_eb_vm(ctx);
2545 args->value = vm->total;
2546 i915_vm_put(vm);
2547
2548 break;
2549
2550 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2551 args->size = 0;
2552 args->value = i915_gem_context_no_error_capture(ctx);
2553 break;
2554
2555 case I915_CONTEXT_PARAM_BANNABLE:
2556 args->size = 0;
2557 args->value = i915_gem_context_is_bannable(ctx);
2558 break;
2559
2560 case I915_CONTEXT_PARAM_RECOVERABLE:
2561 args->size = 0;
2562 args->value = i915_gem_context_is_recoverable(ctx);
2563 break;
2564
2565 case I915_CONTEXT_PARAM_PRIORITY:
2566 args->size = 0;
2567 args->value = ctx->sched.priority;
2568 break;
2569
2570 case I915_CONTEXT_PARAM_SSEU:
2571 ret = get_sseu(ctx, args);
2572 break;
2573
2574 case I915_CONTEXT_PARAM_VM:
2575 ret = get_ppgtt(file_priv, ctx, args);
2576 break;
2577
2578 case I915_CONTEXT_PARAM_PERSISTENCE:
2579 args->size = 0;
2580 args->value = i915_gem_context_is_persistent(ctx);
2581 break;
2582
2583 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2584 ret = get_protected(ctx, args);
2585 break;
2586
2587 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2588 case I915_CONTEXT_PARAM_BAN_PERIOD:
2589 case I915_CONTEXT_PARAM_ENGINES:
2590 case I915_CONTEXT_PARAM_RINGSIZE:
2591 case I915_CONTEXT_PARAM_CONTEXT_IMAGE:
2592 default:
2593 ret = -EINVAL;
2594 break;
2595 }
2596
2597 i915_gem_context_put(ctx);
2598 return ret;
2599}
2600
2601int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2602 struct drm_file *file)
2603{
2604 struct drm_i915_file_private *file_priv = file->driver_priv;
2605 struct drm_i915_gem_context_param *args = data;
2606 struct i915_gem_proto_context *pc;
2607 struct i915_gem_context *ctx;
2608 int ret = 0;
2609
2610 mutex_lock(lock: &file_priv->proto_context_lock);
2611 ctx = __context_lookup(file_priv, id: args->ctx_id);
2612 if (!ctx) {
2613 pc = xa_load(&file_priv->proto_context_xa, index: args->ctx_id);
2614 if (pc) {
2615 /* Contexts should be finalized inside
2616 * GEM_CONTEXT_CREATE starting with graphics
2617 * version 13.
2618 */
2619 WARN_ON(GRAPHICS_VER(file_priv->i915) > 12);
2620 ret = set_proto_ctx_param(fpriv: file_priv, pc, args);
2621 } else {
2622 ret = -ENOENT;
2623 }
2624 }
2625 mutex_unlock(lock: &file_priv->proto_context_lock);
2626
2627 if (ctx) {
2628 ret = ctx_setparam(fpriv: file_priv, ctx, args);
2629 i915_gem_context_put(ctx);
2630 }
2631
2632 return ret;
2633}
2634
2635int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2636 void *data, struct drm_file *file)
2637{
2638 struct drm_i915_private *i915 = to_i915(dev);
2639 struct drm_i915_reset_stats *args = data;
2640 struct i915_gem_context *ctx;
2641
2642 if (args->flags || args->pad)
2643 return -EINVAL;
2644
2645 ctx = i915_gem_context_lookup(file_priv: file->driver_priv, id: args->ctx_id);
2646 if (IS_ERR(ptr: ctx))
2647 return PTR_ERR(ptr: ctx);
2648
2649 /*
2650 * We opt for unserialised reads here. This may result in tearing
2651 * in the extremely unlikely event of a GPU hang on this context
2652 * as we are querying them. If we need that extra layer of protection,
2653 * we should wrap the hangstats with a seqlock.
2654 */
2655
2656 if (capable(CAP_SYS_ADMIN))
2657 args->reset_count = i915_reset_count(error: &i915->gpu_error);
2658 else
2659 args->reset_count = 0;
2660
2661 args->batch_active = atomic_read(v: &ctx->guilty_count);
2662 args->batch_pending = atomic_read(v: &ctx->active_count);
2663
2664 i915_gem_context_put(ctx);
2665 return 0;
2666}
2667
2668/* GEM context-engines iterator: for_each_gem_engine() */
2669struct intel_context *
2670i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2671{
2672 const struct i915_gem_engines *e = it->engines;
2673 struct intel_context *ctx;
2674
2675 if (unlikely(!e))
2676 return NULL;
2677
2678 do {
2679 if (it->idx >= e->num_engines)
2680 return NULL;
2681
2682 ctx = e->engines[it->idx++];
2683 } while (!ctx);
2684
2685 return ctx;
2686}
2687
2688#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2689#include "selftests/mock_context.c"
2690#include "selftests/i915_gem_context.c"
2691#endif
2692
2693void i915_gem_context_module_exit(void)
2694{
2695 kmem_cache_destroy(s: slab_luts);
2696}
2697
2698int __init i915_gem_context_module_init(void)
2699{
2700 slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2701 if (!slab_luts)
2702 return -ENOMEM;
2703
2704 if (IS_ENABLED(CONFIG_DRM_I915_REPLAY_GPU_HANGS_API)) {
2705 pr_notice("**************************************************************\n");
2706 pr_notice("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2707 pr_notice("** **\n");
2708 if (i915_modparams.enable_debug_only_api)
2709 pr_notice("** i915.enable_debug_only_api is intended to be set **\n");
2710 else
2711 pr_notice("** CONFIG_DRM_I915_REPLAY_GPU_HANGS_API builds are intended **\n");
2712 pr_notice("** for specific userspace graphics stack developers only! **\n");
2713 pr_notice("** **\n");
2714 pr_notice("** If you are seeing this message please report this to the **\n");
2715 pr_notice("** provider of your kernel build. **\n");
2716 pr_notice("** **\n");
2717 pr_notice("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2718 pr_notice("**************************************************************\n");
2719 }
2720
2721 return 0;
2722}
2723