1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include <linux/kobject.h>
7#include <linux/sysfs.h>
8
9#include "i915_drv.h"
10#include "i915_timer_util.h"
11#include "intel_engine.h"
12#include "intel_engine_heartbeat.h"
13#include "sysfs_engines.h"
14
15struct kobj_engine {
16 struct kobject base;
17 struct intel_engine_cs *engine;
18};
19
20static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
21{
22 return container_of(kobj, struct kobj_engine, base)->engine;
23}
24
25static ssize_t
26name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
27{
28 return sysfs_emit(buf, fmt: "%s\n", kobj_to_engine(kobj)->name);
29}
30
31static const struct kobj_attribute name_attr =
32__ATTR(name, 0444, name_show, NULL);
33
34static ssize_t
35class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
36{
37 return sysfs_emit(buf, fmt: "%d\n", kobj_to_engine(kobj)->uabi_class);
38}
39
40static const struct kobj_attribute class_attr =
41__ATTR(class, 0444, class_show, NULL);
42
43static ssize_t
44inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
45{
46 return sysfs_emit(buf, fmt: "%d\n", kobj_to_engine(kobj)->uabi_instance);
47}
48
49static const struct kobj_attribute inst_attr =
50__ATTR(instance, 0444, inst_show, NULL);
51
52static ssize_t
53mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
54{
55 return sysfs_emit(buf, fmt: "0x%x\n", kobj_to_engine(kobj)->mmio_base);
56}
57
58static const struct kobj_attribute mmio_attr =
59__ATTR(mmio_base, 0444, mmio_show, NULL);
60
61static const char * const vcs_caps[] = {
62 [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
63 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
64};
65
66static const char * const vecs_caps[] = {
67 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
68};
69
70static ssize_t repr_trim(char *buf, ssize_t len)
71{
72 /* Trim off the trailing space and replace with a newline */
73 if (len > PAGE_SIZE)
74 len = PAGE_SIZE;
75 if (len > 0)
76 buf[len - 1] = '\n';
77
78 return len;
79}
80
81static ssize_t
82__caps_show(struct intel_engine_cs *engine,
83 unsigned long caps, char *buf, bool show_unknown)
84{
85 const char * const *repr;
86 int count, n;
87 ssize_t len;
88
89 switch (engine->class) {
90 case VIDEO_DECODE_CLASS:
91 repr = vcs_caps;
92 count = ARRAY_SIZE(vcs_caps);
93 break;
94
95 case VIDEO_ENHANCEMENT_CLASS:
96 repr = vecs_caps;
97 count = ARRAY_SIZE(vecs_caps);
98 break;
99
100 default:
101 repr = NULL;
102 count = 0;
103 break;
104 }
105 GEM_BUG_ON(count > BITS_PER_LONG);
106
107 len = 0;
108 for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
109 if (n >= count || !repr[n]) {
110 if (GEM_WARN_ON(show_unknown))
111 len += sysfs_emit_at(buf, at: len, fmt: "[%x] ", n);
112 } else {
113 len += sysfs_emit_at(buf, at: len, fmt: "%s ", repr[n]);
114 }
115 if (GEM_WARN_ON(len >= PAGE_SIZE))
116 break;
117 }
118 return repr_trim(buf, len);
119}
120
121static ssize_t
122caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
123{
124 struct intel_engine_cs *engine = kobj_to_engine(kobj);
125
126 return __caps_show(engine, caps: engine->uabi_capabilities, buf, show_unknown: true);
127}
128
129static const struct kobj_attribute caps_attr =
130__ATTR(capabilities, 0444, caps_show, NULL);
131
132static ssize_t
133all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
134{
135 return __caps_show(engine: kobj_to_engine(kobj), caps: -1, buf, show_unknown: false);
136}
137
138static const struct kobj_attribute all_caps_attr =
139__ATTR(known_capabilities, 0444, all_caps_show, NULL);
140
141static ssize_t
142max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
143 const char *buf, size_t count)
144{
145 struct intel_engine_cs *engine = kobj_to_engine(kobj);
146 unsigned long long duration, clamped;
147 int err;
148
149 /*
150 * When waiting for a request, if is it currently being executed
151 * on the GPU, we busywait for a short while before sleeping. The
152 * premise is that most requests are short, and if it is already
153 * executing then there is a good chance that it will complete
154 * before we can setup the interrupt handler and go to sleep.
155 * We try to offset the cost of going to sleep, by first spinning
156 * on the request -- if it completed in less time than it would take
157 * to go sleep, process the interrupt and return back to the client,
158 * then we have saved the client some latency, albeit at the cost
159 * of spinning on an expensive CPU core.
160 *
161 * While we try to avoid waiting at all for a request that is unlikely
162 * to complete, deciding how long it is worth spinning is for is an
163 * arbitrary decision: trading off power vs latency.
164 */
165
166 err = kstrtoull(s: buf, base: 0, res: &duration);
167 if (err)
168 return err;
169
170 clamped = intel_clamp_max_busywait_duration_ns(engine, value: duration);
171 if (duration != clamped)
172 return -EINVAL;
173
174 WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
175
176 return count;
177}
178
179static ssize_t
180max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
181{
182 struct intel_engine_cs *engine = kobj_to_engine(kobj);
183
184 return sysfs_emit(buf, fmt: "%lu\n", engine->props.max_busywait_duration_ns);
185}
186
187static const struct kobj_attribute max_spin_attr =
188__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
189
190static ssize_t
191max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
192{
193 struct intel_engine_cs *engine = kobj_to_engine(kobj);
194
195 return sysfs_emit(buf, fmt: "%lu\n", engine->defaults.max_busywait_duration_ns);
196}
197
198static const struct kobj_attribute max_spin_def =
199__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
200
201static ssize_t
202timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
203 const char *buf, size_t count)
204{
205 struct intel_engine_cs *engine = kobj_to_engine(kobj);
206 unsigned long long duration, clamped;
207 int err;
208
209 /*
210 * Execlists uses a scheduling quantum (a timeslice) to alternate
211 * execution between ready-to-run contexts of equal priority. This
212 * ensures that all users (though only if they of equal importance)
213 * have the opportunity to run and prevents livelocks where contexts
214 * may have implicit ordering due to userspace semaphores.
215 */
216
217 err = kstrtoull(s: buf, base: 0, res: &duration);
218 if (err)
219 return err;
220
221 clamped = intel_clamp_timeslice_duration_ms(engine, value: duration);
222 if (duration != clamped)
223 return -EINVAL;
224
225 WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
226
227 if (execlists_active(execlists: &engine->execlists))
228 set_timer_ms(t: &engine->execlists.timer, timeout: duration);
229
230 return count;
231}
232
233static ssize_t
234timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
235{
236 struct intel_engine_cs *engine = kobj_to_engine(kobj);
237
238 return sysfs_emit(buf, fmt: "%lu\n", engine->props.timeslice_duration_ms);
239}
240
241static const struct kobj_attribute timeslice_duration_attr =
242__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
243
244static ssize_t
245timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
246{
247 struct intel_engine_cs *engine = kobj_to_engine(kobj);
248
249 return sysfs_emit(buf, fmt: "%lu\n", engine->defaults.timeslice_duration_ms);
250}
251
252static const struct kobj_attribute timeslice_duration_def =
253__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
254
255static ssize_t
256stop_store(struct kobject *kobj, struct kobj_attribute *attr,
257 const char *buf, size_t count)
258{
259 struct intel_engine_cs *engine = kobj_to_engine(kobj);
260 unsigned long long duration, clamped;
261 int err;
262
263 /*
264 * When we allow ourselves to sleep before a GPU reset after disabling
265 * submission, even for a few milliseconds, gives an innocent context
266 * the opportunity to clear the GPU before the reset occurs. However,
267 * how long to sleep depends on the typical non-preemptible duration
268 * (a similar problem to determining the ideal preempt-reset timeout
269 * or even the heartbeat interval).
270 */
271
272 err = kstrtoull(s: buf, base: 0, res: &duration);
273 if (err)
274 return err;
275
276 clamped = intel_clamp_stop_timeout_ms(engine, value: duration);
277 if (duration != clamped)
278 return -EINVAL;
279
280 WRITE_ONCE(engine->props.stop_timeout_ms, duration);
281 return count;
282}
283
284static ssize_t
285stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
286{
287 struct intel_engine_cs *engine = kobj_to_engine(kobj);
288
289 return sysfs_emit(buf, fmt: "%lu\n", engine->props.stop_timeout_ms);
290}
291
292static const struct kobj_attribute stop_timeout_attr =
293__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
294
295static ssize_t
296stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
297{
298 struct intel_engine_cs *engine = kobj_to_engine(kobj);
299
300 return sysfs_emit(buf, fmt: "%lu\n", engine->defaults.stop_timeout_ms);
301}
302
303static const struct kobj_attribute stop_timeout_def =
304__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
305
306static ssize_t
307preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
308 const char *buf, size_t count)
309{
310 struct intel_engine_cs *engine = kobj_to_engine(kobj);
311 unsigned long long timeout, clamped;
312 int err;
313
314 /*
315 * After initialising a preemption request, we give the current
316 * resident a small amount of time to vacate the GPU. The preemption
317 * request is for a higher priority context and should be immediate to
318 * maintain high quality of service (and avoid priority inversion).
319 * However, the preemption granularity of the GPU can be quite coarse
320 * and so we need a compromise.
321 */
322
323 err = kstrtoull(s: buf, base: 0, res: &timeout);
324 if (err)
325 return err;
326
327 clamped = intel_clamp_preempt_timeout_ms(engine, value: timeout);
328 if (timeout != clamped)
329 return -EINVAL;
330
331 WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
332
333 if (READ_ONCE(engine->execlists.pending[0]))
334 set_timer_ms(t: &engine->execlists.preempt, timeout);
335
336 return count;
337}
338
339static ssize_t
340preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
341 char *buf)
342{
343 struct intel_engine_cs *engine = kobj_to_engine(kobj);
344
345 return sysfs_emit(buf, fmt: "%lu\n", engine->props.preempt_timeout_ms);
346}
347
348static const struct kobj_attribute preempt_timeout_attr =
349__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
350
351static ssize_t
352preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
353 char *buf)
354{
355 struct intel_engine_cs *engine = kobj_to_engine(kobj);
356
357 return sysfs_emit(buf, fmt: "%lu\n", engine->defaults.preempt_timeout_ms);
358}
359
360static const struct kobj_attribute preempt_timeout_def =
361__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
362
363static ssize_t
364heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
365 const char *buf, size_t count)
366{
367 struct intel_engine_cs *engine = kobj_to_engine(kobj);
368 unsigned long long delay, clamped;
369 int err;
370
371 /*
372 * We monitor the health of the system via periodic heartbeat pulses.
373 * The pulses also provide the opportunity to perform garbage
374 * collection. However, we interpret an incomplete pulse (a missed
375 * heartbeat) as an indication that the system is no longer responsive,
376 * i.e. hung, and perform an engine or full GPU reset. Given that the
377 * preemption granularity can be very coarse on a system, the optimal
378 * value for any workload is unknowable!
379 */
380
381 err = kstrtoull(s: buf, base: 0, res: &delay);
382 if (err)
383 return err;
384
385 clamped = intel_clamp_heartbeat_interval_ms(engine, value: delay);
386 if (delay != clamped)
387 return -EINVAL;
388
389 err = intel_engine_set_heartbeat(engine, delay);
390 if (err)
391 return err;
392
393 return count;
394}
395
396static ssize_t
397heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
398{
399 struct intel_engine_cs *engine = kobj_to_engine(kobj);
400
401 return sysfs_emit(buf, fmt: "%lu\n", engine->props.heartbeat_interval_ms);
402}
403
404static const struct kobj_attribute heartbeat_interval_attr =
405__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
406
407static ssize_t
408heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
409{
410 struct intel_engine_cs *engine = kobj_to_engine(kobj);
411
412 return sysfs_emit(buf, fmt: "%lu\n", engine->defaults.heartbeat_interval_ms);
413}
414
415static const struct kobj_attribute heartbeat_interval_def =
416__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
417
418static void kobj_engine_release(struct kobject *kobj)
419{
420 kfree(objp: kobj);
421}
422
423static const struct kobj_type kobj_engine_type = {
424 .release = kobj_engine_release,
425 .sysfs_ops = &kobj_sysfs_ops
426};
427
428static struct kobject *
429kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
430{
431 struct kobj_engine *ke;
432
433 ke = kzalloc(sizeof(*ke), GFP_KERNEL);
434 if (!ke)
435 return NULL;
436
437 kobject_init(kobj: &ke->base, ktype: &kobj_engine_type);
438 ke->engine = engine;
439
440 if (kobject_add(kobj: &ke->base, parent: dir, fmt: "%s", engine->name)) {
441 kobject_put(kobj: &ke->base);
442 return NULL;
443 }
444
445 /* xfer ownership to sysfs tree */
446 return &ke->base;
447}
448
449static void add_defaults(struct kobj_engine *parent)
450{
451 static const struct attribute * const files[] = {
452 &max_spin_def.attr,
453 &stop_timeout_def.attr,
454#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
455 &heartbeat_interval_def.attr,
456#endif
457 NULL
458 };
459 struct kobj_engine *ke;
460
461 ke = kzalloc(sizeof(*ke), GFP_KERNEL);
462 if (!ke)
463 return;
464
465 kobject_init(kobj: &ke->base, ktype: &kobj_engine_type);
466 ke->engine = parent->engine;
467
468 if (kobject_add(kobj: &ke->base, parent: &parent->base, fmt: "%s", ".defaults")) {
469 kobject_put(kobj: &ke->base);
470 return;
471 }
472
473 if (sysfs_create_files(kobj: &ke->base, attr: files))
474 return;
475
476 if (intel_engine_has_timeslices(engine: ke->engine) &&
477 sysfs_create_file(kobj: &ke->base, attr: &timeslice_duration_def.attr))
478 return;
479
480 if (intel_engine_has_preempt_reset(engine: ke->engine) &&
481 sysfs_create_file(kobj: &ke->base, attr: &preempt_timeout_def.attr))
482 return;
483}
484
485void intel_engines_add_sysfs(struct drm_i915_private *i915)
486{
487 static const struct attribute * const files[] = {
488 &name_attr.attr,
489 &class_attr.attr,
490 &inst_attr.attr,
491 &mmio_attr.attr,
492 &caps_attr.attr,
493 &all_caps_attr.attr,
494 &max_spin_attr.attr,
495 &stop_timeout_attr.attr,
496#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
497 &heartbeat_interval_attr.attr,
498#endif
499 NULL
500 };
501
502 struct device *kdev = i915->drm.primary->kdev;
503 struct intel_engine_cs *engine;
504 struct kobject *dir;
505
506 dir = kobject_create_and_add(name: "engine", parent: &kdev->kobj);
507 if (!dir)
508 return;
509
510 for_each_uabi_engine(engine, i915) {
511 struct kobject *kobj;
512
513 kobj = kobj_engine(dir, engine);
514 if (!kobj)
515 goto err_engine;
516
517 if (sysfs_create_files(kobj, attr: files))
518 goto err_object;
519
520 if (intel_engine_has_timeslices(engine) &&
521 sysfs_create_file(kobj, attr: &timeslice_duration_attr.attr))
522 goto err_engine;
523
524 if (intel_engine_has_preempt_reset(engine) &&
525 sysfs_create_file(kobj, attr: &preempt_timeout_attr.attr))
526 goto err_engine;
527
528 add_defaults(container_of(kobj, struct kobj_engine, base));
529
530 if (0) {
531err_object:
532 kobject_put(kobj);
533err_engine:
534 dev_warn(kdev, "Failed to add sysfs engine '%s'\n",
535 engine->name);
536 }
537 }
538}
539