1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/debugfs.h>
30#include <linux/sched/mm.h>
31#include <linux/sort.h>
32#include <linux/string_helpers.h>
33
34#include <drm/drm_debugfs.h>
35
36#include "gem/i915_gem_context.h"
37#include "gt/intel_gt.h"
38#include "gt/intel_gt_buffer_pool.h"
39#include "gt/intel_gt_clock_utils.h"
40#include "gt/intel_gt_debugfs.h"
41#include "gt/intel_gt_pm.h"
42#include "gt/intel_gt_pm_debugfs.h"
43#include "gt/intel_gt_regs.h"
44#include "gt/intel_gt_requests.h"
45#include "gt/intel_rc6.h"
46#include "gt/intel_reset.h"
47#include "gt/intel_rps.h"
48#include "gt/intel_sseu_debugfs.h"
49
50#include "i915_debugfs.h"
51#include "i915_debugfs_params.h"
52#include "i915_driver.h"
53#include "i915_gpu_error.h"
54#include "i915_irq.h"
55#include "i915_reg.h"
56#include "i915_scheduler.h"
57#include "i915_wait_util.h"
58#include "intel_mchbar_regs.h"
59
60static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
61{
62 return to_i915(dev: node->minor->dev);
63}
64
65static int i915_capabilities(struct seq_file *m, void *data)
66{
67 struct drm_i915_private *i915 = node_to_i915(node: m->private);
68 struct drm_printer p = drm_seq_file_printer(f: m);
69
70 intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), p: &p);
71 i915_print_iommu_status(i915, p: &p);
72 intel_gt_info_print(info: &to_gt(i915)->info, p: &p);
73 intel_driver_caps_print(caps: &i915->caps, p: &p);
74
75 i915_params_dump(params: &i915->params, p: &p);
76
77 return 0;
78}
79
80static char get_tiling_flag(struct drm_i915_gem_object *obj)
81{
82 switch (i915_gem_object_get_tiling(obj)) {
83 default:
84 case I915_TILING_NONE: return ' ';
85 case I915_TILING_X: return 'X';
86 case I915_TILING_Y: return 'Y';
87 }
88}
89
90static char get_global_flag(struct drm_i915_gem_object *obj)
91{
92 return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
93}
94
95static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
96{
97 return obj->mm.mapping ? 'M' : ' ';
98}
99
100static const char *
101stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
102{
103 size_t x = 0;
104
105 switch (page_sizes) {
106 case 0:
107 return "";
108 case I915_GTT_PAGE_SIZE_4K:
109 return "4K";
110 case I915_GTT_PAGE_SIZE_64K:
111 return "64K";
112 case I915_GTT_PAGE_SIZE_2M:
113 return "2M";
114 default:
115 if (!buf)
116 return "M";
117
118 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
119 x += snprintf(buf: buf + x, size: len - x, fmt: "2M, ");
120 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
121 x += snprintf(buf: buf + x, size: len - x, fmt: "64K, ");
122 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
123 x += snprintf(buf: buf + x, size: len - x, fmt: "4K, ");
124 buf[x-2] = '\0';
125
126 return buf;
127 }
128}
129
130static const char *stringify_vma_type(const struct i915_vma *vma)
131{
132 if (i915_vma_is_ggtt(vma))
133 return "ggtt";
134
135 if (i915_vma_is_dpt(vma))
136 return "dpt";
137
138 return "ppgtt";
139}
140
141static const char *i915_cache_level_str(struct drm_i915_gem_object *obj)
142{
143 struct drm_i915_private *i915 = obj_to_i915(obj);
144
145 if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 74))) {
146 switch (obj->pat_index) {
147 case 0: return " WB";
148 case 1: return " WT";
149 case 2: return " UC";
150 case 3: return " WB (1-Way Coh)";
151 case 4: return " WB (2-Way Coh)";
152 default: return " not defined";
153 }
154 } else if (GRAPHICS_VER(i915) >= 12) {
155 switch (obj->pat_index) {
156 case 0: return " WB";
157 case 1: return " WC";
158 case 2: return " WT";
159 case 3: return " UC";
160 default: return " not defined";
161 }
162 } else {
163 switch (obj->pat_index) {
164 case 0: return " UC";
165 case 1: return HAS_LLC(i915) ?
166 " LLC" : " snooped";
167 case 2: return " L3+LLC";
168 case 3: return " WT";
169 default: return " not defined";
170 }
171 }
172}
173
174void
175i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
176{
177 struct i915_vma *vma;
178 int pin_count = 0;
179
180 seq_printf(m, fmt: "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
181 &obj->base,
182 get_tiling_flag(obj),
183 get_global_flag(obj),
184 get_pin_mapped_flag(obj),
185 obj->base.size / 1024,
186 obj->read_domains,
187 obj->write_domain,
188 i915_cache_level_str(obj),
189 obj->mm.dirty ? " dirty" : "",
190 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
191 if (obj->base.name)
192 seq_printf(m, fmt: " (name: %d)", obj->base.name);
193
194 spin_lock(lock: &obj->vma.lock);
195 list_for_each_entry(vma, &obj->vma.list, obj_link) {
196 if (!drm_mm_node_allocated(node: &vma->node))
197 continue;
198
199 spin_unlock(lock: &obj->vma.lock);
200
201 if (i915_vma_is_pinned(vma))
202 pin_count++;
203
204 seq_printf(m, fmt: " (%s offset: %08llx, size: %08llx, pages: %s",
205 stringify_vma_type(vma),
206 i915_vma_offset(vma), i915_vma_size(vma),
207 stringify_page_sizes(page_sizes: vma->resource->page_sizes_gtt,
208 NULL, len: 0));
209 if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
210 switch (vma->gtt_view.type) {
211 case I915_GTT_VIEW_NORMAL:
212 seq_puts(m, s: ", normal");
213 break;
214
215 case I915_GTT_VIEW_PARTIAL:
216 seq_printf(m, fmt: ", partial [%08llx+%x]",
217 vma->gtt_view.partial.offset << PAGE_SHIFT,
218 vma->gtt_view.partial.size << PAGE_SHIFT);
219 break;
220
221 case I915_GTT_VIEW_ROTATED:
222 seq_printf(m, fmt: ", rotated [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
223 vma->gtt_view.rotated.plane[0].width,
224 vma->gtt_view.rotated.plane[0].height,
225 vma->gtt_view.rotated.plane[0].src_stride,
226 vma->gtt_view.rotated.plane[0].dst_stride,
227 vma->gtt_view.rotated.plane[0].offset,
228 vma->gtt_view.rotated.plane[1].width,
229 vma->gtt_view.rotated.plane[1].height,
230 vma->gtt_view.rotated.plane[1].src_stride,
231 vma->gtt_view.rotated.plane[1].dst_stride,
232 vma->gtt_view.rotated.plane[1].offset);
233 break;
234
235 case I915_GTT_VIEW_REMAPPED:
236 seq_printf(m, fmt: ", remapped [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
237 vma->gtt_view.remapped.plane[0].width,
238 vma->gtt_view.remapped.plane[0].height,
239 vma->gtt_view.remapped.plane[0].src_stride,
240 vma->gtt_view.remapped.plane[0].dst_stride,
241 vma->gtt_view.remapped.plane[0].offset,
242 vma->gtt_view.remapped.plane[1].width,
243 vma->gtt_view.remapped.plane[1].height,
244 vma->gtt_view.remapped.plane[1].src_stride,
245 vma->gtt_view.remapped.plane[1].dst_stride,
246 vma->gtt_view.remapped.plane[1].offset);
247 break;
248
249 default:
250 MISSING_CASE(vma->gtt_view.type);
251 break;
252 }
253 }
254 if (vma->fence)
255 seq_printf(m, fmt: " , fence: %d", vma->fence->id);
256 seq_puts(m, s: ")");
257
258 spin_lock(lock: &obj->vma.lock);
259 }
260 spin_unlock(lock: &obj->vma.lock);
261
262 seq_printf(m, fmt: " (pinned x %d)", pin_count);
263 if (i915_gem_object_is_stolen(obj))
264 seq_printf(m, fmt: " (stolen: %08llx)", obj->stolen->start);
265 if (i915_gem_object_is_framebuffer(obj))
266 seq_printf(m, fmt: " (fb)");
267}
268
269static int i915_gem_object_info(struct seq_file *m, void *data)
270{
271 struct drm_i915_private *i915 = node_to_i915(node: m->private);
272 struct drm_printer p = drm_seq_file_printer(f: m);
273 struct intel_memory_region *mr;
274 enum intel_region_id id;
275
276 seq_printf(m, fmt: "%u shrinkable [%u free] objects, %llu bytes\n",
277 i915->mm.shrink_count,
278 atomic_read(v: &i915->mm.free_count),
279 i915->mm.shrink_memory);
280 for_each_memory_region(mr, i915, id)
281 intel_memory_region_debug(mr, printer: &p);
282
283 return 0;
284}
285
286static int i915_frequency_info(struct seq_file *m, void *unused)
287{
288 struct drm_i915_private *i915 = node_to_i915(node: m->private);
289 struct intel_gt *gt = to_gt(i915);
290 struct drm_printer p = drm_seq_file_printer(f: m);
291
292 intel_gt_pm_frequency_dump(gt, m: &p);
293
294 return 0;
295}
296
297static const char *swizzle_string(unsigned swizzle)
298{
299 switch (swizzle) {
300 case I915_BIT_6_SWIZZLE_NONE:
301 return "none";
302 case I915_BIT_6_SWIZZLE_9:
303 return "bit9";
304 case I915_BIT_6_SWIZZLE_9_10:
305 return "bit9/bit10";
306 case I915_BIT_6_SWIZZLE_9_11:
307 return "bit9/bit11";
308 case I915_BIT_6_SWIZZLE_9_10_11:
309 return "bit9/bit10/bit11";
310 case I915_BIT_6_SWIZZLE_9_17:
311 return "bit9/bit17";
312 case I915_BIT_6_SWIZZLE_9_10_17:
313 return "bit9/bit10/bit17";
314 case I915_BIT_6_SWIZZLE_UNKNOWN:
315 return "unknown";
316 }
317
318 return "bug";
319}
320
321static int i915_swizzle_info(struct seq_file *m, void *data)
322{
323 struct drm_i915_private *dev_priv = node_to_i915(node: m->private);
324 struct intel_uncore *uncore = &dev_priv->uncore;
325 intel_wakeref_t wakeref;
326
327 seq_printf(m, fmt: "bit6 swizzle for X-tiling = %s\n",
328 swizzle_string(swizzle: to_gt(i915: dev_priv)->ggtt->bit_6_swizzle_x));
329 seq_printf(m, fmt: "bit6 swizzle for Y-tiling = %s\n",
330 swizzle_string(swizzle: to_gt(i915: dev_priv)->ggtt->bit_6_swizzle_y));
331
332 if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
333 seq_puts(m, s: "L-shaped memory detected\n");
334
335 /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
336 if (GRAPHICS_VER(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
337 return 0;
338
339 wakeref = intel_runtime_pm_get(rpm: &dev_priv->runtime_pm);
340
341 if (IS_GRAPHICS_VER(dev_priv, 3, 4)) {
342 seq_printf(m, fmt: "DDC = 0x%08x\n",
343 intel_uncore_read(uncore, DCC));
344 seq_printf(m, fmt: "DDC2 = 0x%08x\n",
345 intel_uncore_read(uncore, DCC2));
346 seq_printf(m, fmt: "C0DRB3 = 0x%04x\n",
347 intel_uncore_read16(uncore, C0DRB3_BW));
348 seq_printf(m, fmt: "C1DRB3 = 0x%04x\n",
349 intel_uncore_read16(uncore, C1DRB3_BW));
350 } else if (GRAPHICS_VER(dev_priv) >= 6) {
351 seq_printf(m, fmt: "MAD_DIMM_C0 = 0x%08x\n",
352 intel_uncore_read(uncore, MAD_DIMM_C0));
353 seq_printf(m, fmt: "MAD_DIMM_C1 = 0x%08x\n",
354 intel_uncore_read(uncore, MAD_DIMM_C1));
355 seq_printf(m, fmt: "MAD_DIMM_C2 = 0x%08x\n",
356 intel_uncore_read(uncore, MAD_DIMM_C2));
357 seq_printf(m, fmt: "TILECTL = 0x%08x\n",
358 intel_uncore_read(uncore, TILECTL));
359 if (GRAPHICS_VER(dev_priv) >= 8)
360 seq_printf(m, fmt: "GAMTARBMODE = 0x%08x\n",
361 intel_uncore_read(uncore, GAMTARBMODE));
362 else
363 seq_printf(m, fmt: "ARB_MODE = 0x%08x\n",
364 intel_uncore_read(uncore, ARB_MODE));
365 seq_printf(m, fmt: "DISP_ARB_CTL = 0x%08x\n",
366 intel_uncore_read(uncore, DISP_ARB_CTL));
367 }
368
369 intel_runtime_pm_put(rpm: &dev_priv->runtime_pm, wref: wakeref);
370
371 return 0;
372}
373
374static int i915_rps_boost_info(struct seq_file *m, void *data)
375{
376 struct drm_i915_private *dev_priv = node_to_i915(node: m->private);
377 struct intel_rps *rps = &to_gt(i915: dev_priv)->rps;
378
379 seq_printf(m, fmt: "RPS enabled? %s\n",
380 str_yes_no(v: intel_rps_is_enabled(rps)));
381 seq_printf(m, fmt: "RPS active? %s\n",
382 str_yes_no(v: intel_rps_is_active(rps)));
383 seq_printf(m, fmt: "GPU busy? %s\n", str_yes_no(v: to_gt(i915: dev_priv)->awake));
384 seq_printf(m, fmt: "Boosts outstanding? %d\n",
385 atomic_read(v: &rps->num_waiters));
386 seq_printf(m, fmt: "Interactive? %d\n", READ_ONCE(rps->power.interactive));
387 seq_printf(m, fmt: "Frequency requested %d, actual %d\n",
388 intel_gpu_freq(rps, val: rps->cur_freq),
389 intel_rps_read_actual_frequency(rps));
390 seq_printf(m, fmt: " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
391 intel_gpu_freq(rps, val: rps->min_freq),
392 intel_gpu_freq(rps, val: rps->min_freq_softlimit),
393 intel_gpu_freq(rps, val: rps->max_freq_softlimit),
394 intel_gpu_freq(rps, val: rps->max_freq));
395 seq_printf(m, fmt: " idle:%d, efficient:%d, boost:%d\n",
396 intel_gpu_freq(rps, val: rps->idle_freq),
397 intel_gpu_freq(rps, val: rps->efficient_freq),
398 intel_gpu_freq(rps, val: rps->boost_freq));
399
400 seq_printf(m, fmt: "Wait boosts: %d\n", READ_ONCE(rps->boosts));
401
402 return 0;
403}
404
405static int i915_runtime_pm_status(struct seq_file *m, void *unused)
406{
407 struct drm_i915_private *dev_priv = node_to_i915(node: m->private);
408 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
409
410 if (!HAS_RUNTIME_PM(dev_priv))
411 seq_puts(m, s: "Runtime power management not supported\n");
412
413 seq_printf(m, fmt: "GPU idle: %s\n", str_yes_no(v: !to_gt(i915: dev_priv)->awake));
414 seq_printf(m, fmt: "IRQs disabled: %s\n",
415 str_yes_no(v: !intel_irqs_enabled(dev_priv)));
416#ifdef CONFIG_PM
417 seq_printf(m, fmt: "Usage count: %d\n",
418 atomic_read(v: &dev_priv->drm.dev->power.usage_count));
419#else
420 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
421#endif
422 seq_printf(m, fmt: "PCI device power state: %s [%d]\n",
423 pci_power_name(state: pdev->current_state),
424 pdev->current_state);
425
426 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
427 struct drm_printer p = drm_seq_file_printer(f: m);
428
429 print_intel_runtime_pm_wakeref(rpm: &dev_priv->runtime_pm, p: &p);
430 }
431
432 return 0;
433}
434
435static int i915_engine_info(struct seq_file *m, void *unused)
436{
437 struct drm_i915_private *i915 = node_to_i915(node: m->private);
438 struct intel_engine_cs *engine;
439 intel_wakeref_t wakeref;
440 struct drm_printer p;
441
442 wakeref = intel_runtime_pm_get(rpm: &i915->runtime_pm);
443
444 seq_printf(m, fmt: "GT awake? %s [%d], %llums\n",
445 str_yes_no(v: to_gt(i915)->awake),
446 atomic_read(v: &to_gt(i915)->wakeref.count),
447 ktime_to_ms(kt: intel_gt_get_awake_time(gt: to_gt(i915))));
448 seq_printf(m, fmt: "CS timestamp frequency: %u Hz, %d ns\n",
449 to_gt(i915)->clock_frequency,
450 to_gt(i915)->clock_period_ns);
451
452 p = drm_seq_file_printer(f: m);
453 for_each_uabi_engine(engine, i915)
454 intel_engine_dump(engine, m: &p, header: "%s\n", engine->name);
455
456 intel_gt_show_timelines(gt: to_gt(i915), m: &p, show_request: i915_request_show_with_schedule);
457
458 intel_runtime_pm_put(rpm: &i915->runtime_pm, wref: wakeref);
459
460 return 0;
461}
462
463static int i915_wa_registers(struct seq_file *m, void *unused)
464{
465 struct drm_i915_private *i915 = node_to_i915(node: m->private);
466 struct intel_engine_cs *engine;
467
468 for_each_uabi_engine(engine, i915) {
469 const struct i915_wa_list *wal = &engine->ctx_wa_list;
470 const struct i915_wa *wa;
471 unsigned int count;
472
473 count = wal->count;
474 if (!count)
475 continue;
476
477 seq_printf(m, fmt: "%s: Workarounds applied: %u\n",
478 engine->name, count);
479
480 for (wa = wal->list; count--; wa++)
481 seq_printf(m, fmt: "0x%X: 0x%08X, mask: 0x%08X\n",
482 i915_mmio_reg_offset(wa->reg),
483 wa->set, wa->clr);
484
485 seq_printf(m, fmt: "\n");
486 }
487
488 return 0;
489}
490
491static int i915_wedged_get(void *data, u64 *val)
492{
493 struct drm_i915_private *i915 = data;
494 struct intel_gt *gt;
495 unsigned int i;
496
497 *val = 0;
498
499 for_each_gt(gt, i915, i) {
500 int ret;
501
502 ret = intel_gt_debugfs_reset_show(gt, val);
503 if (ret)
504 return ret;
505
506 /* at least one tile should be wedged */
507 if (*val)
508 break;
509 }
510
511 return 0;
512}
513
514static int i915_wedged_set(void *data, u64 val)
515{
516 struct drm_i915_private *i915 = data;
517 struct intel_gt *gt;
518 unsigned int i;
519
520 for_each_gt(gt, i915, i)
521 intel_gt_debugfs_reset_store(gt, val);
522
523 return 0;
524}
525
526DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
527 i915_wedged_get, i915_wedged_set,
528 "%llu\n");
529
530static int
531i915_perf_noa_delay_set(void *data, u64 val)
532{
533 struct drm_i915_private *i915 = data;
534
535 /*
536 * This would lead to infinite waits as we're doing timestamp
537 * difference on the CS with only 32bits.
538 */
539 if (intel_gt_ns_to_clock_interval(gt: to_gt(i915), ns: val) > U32_MAX)
540 return -EINVAL;
541
542 atomic64_set(v: &i915->perf.noa_programming_delay, i: val);
543 return 0;
544}
545
546static int
547i915_perf_noa_delay_get(void *data, u64 *val)
548{
549 struct drm_i915_private *i915 = data;
550
551 *val = atomic64_read(v: &i915->perf.noa_programming_delay);
552 return 0;
553}
554
555DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
556 i915_perf_noa_delay_get,
557 i915_perf_noa_delay_set,
558 "%llu\n");
559
560#define DROP_UNBOUND BIT(0)
561#define DROP_BOUND BIT(1)
562#define DROP_RETIRE BIT(2)
563#define DROP_ACTIVE BIT(3)
564#define DROP_FREED BIT(4)
565#define DROP_SHRINK_ALL BIT(5)
566#define DROP_IDLE BIT(6)
567#define DROP_RESET_ACTIVE BIT(7)
568#define DROP_RESET_SEQNO BIT(8)
569#define DROP_RCU BIT(9)
570#define DROP_ALL (DROP_UNBOUND | \
571 DROP_BOUND | \
572 DROP_RETIRE | \
573 DROP_ACTIVE | \
574 DROP_FREED | \
575 DROP_SHRINK_ALL |\
576 DROP_IDLE | \
577 DROP_RESET_ACTIVE | \
578 DROP_RESET_SEQNO | \
579 DROP_RCU)
580static int
581i915_drop_caches_get(void *data, u64 *val)
582{
583 *val = DROP_ALL;
584
585 return 0;
586}
587
588static int
589gt_drop_caches(struct intel_gt *gt, u64 val)
590{
591 int ret;
592
593 if (val & DROP_RESET_ACTIVE &&
594 wait_for(intel_engines_are_idle(gt), 200))
595 intel_gt_set_wedged(gt);
596
597 if (val & DROP_RETIRE)
598 intel_gt_retire_requests(gt);
599
600 if (val & (DROP_IDLE | DROP_ACTIVE)) {
601 ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
602 if (ret)
603 return ret;
604 }
605
606 if (val & DROP_IDLE) {
607 ret = intel_gt_pm_wait_for_idle(gt);
608 if (ret)
609 return ret;
610 }
611
612 if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
613 intel_gt_handle_error(gt, ALL_ENGINES, flags: 0, NULL);
614
615 if (val & DROP_FREED)
616 intel_gt_flush_buffer_pool(gt);
617
618 return 0;
619}
620
621static int
622i915_drop_caches_set(void *data, u64 val)
623{
624 struct drm_i915_private *i915 = data;
625 struct intel_gt *gt;
626 unsigned int flags;
627 unsigned int i;
628 int ret;
629
630 drm_dbg(&i915->drm, "Dropping caches: 0x%08llx [0x%08llx]\n",
631 val, val & DROP_ALL);
632
633 for_each_gt(gt, i915, i) {
634 ret = gt_drop_caches(gt, val);
635 if (ret)
636 return ret;
637 }
638
639 fs_reclaim_acquire(GFP_KERNEL);
640 flags = memalloc_noreclaim_save();
641 if (val & DROP_BOUND)
642 i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
643
644 if (val & DROP_UNBOUND)
645 i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
646
647 if (val & DROP_SHRINK_ALL)
648 i915_gem_shrink_all(i915);
649 memalloc_noreclaim_restore(flags);
650 fs_reclaim_release(GFP_KERNEL);
651
652 if (val & DROP_RCU)
653 rcu_barrier();
654
655 if (val & DROP_FREED)
656 i915_gem_drain_freed_objects(i915);
657
658 return 0;
659}
660
661DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
662 i915_drop_caches_get, i915_drop_caches_set,
663 "0x%08llx\n");
664
665static int i915_sseu_status(struct seq_file *m, void *unused)
666{
667 struct drm_i915_private *i915 = node_to_i915(node: m->private);
668 struct intel_gt *gt = to_gt(i915);
669
670 return intel_sseu_status(m, gt);
671}
672
673static int i915_forcewake_open(struct inode *inode, struct file *file)
674{
675 struct drm_i915_private *i915 = inode->i_private;
676 struct intel_gt *gt;
677 unsigned int i;
678
679 for_each_gt(gt, i915, i)
680 intel_gt_pm_debugfs_forcewake_user_open(gt);
681
682 return 0;
683}
684
685static int i915_forcewake_release(struct inode *inode, struct file *file)
686{
687 struct drm_i915_private *i915 = inode->i_private;
688 struct intel_gt *gt;
689 unsigned int i;
690
691 for_each_gt(gt, i915, i)
692 intel_gt_pm_debugfs_forcewake_user_release(gt);
693
694 return 0;
695}
696
697static const struct file_operations i915_forcewake_fops = {
698 .owner = THIS_MODULE,
699 .open = i915_forcewake_open,
700 .release = i915_forcewake_release,
701};
702
703static const struct drm_info_list i915_debugfs_list[] = {
704 {"i915_capabilities", i915_capabilities, 0},
705 {"i915_gem_objects", i915_gem_object_info, 0},
706 {"i915_frequency_info", i915_frequency_info, 0},
707 {"i915_swizzle_info", i915_swizzle_info, 0},
708 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
709 {"i915_engine_info", i915_engine_info, 0},
710 {"i915_wa_registers", i915_wa_registers, 0},
711 {"i915_sseu_status", i915_sseu_status, 0},
712 {"i915_rps_boost_info", i915_rps_boost_info, 0},
713};
714
715static const struct i915_debugfs_files {
716 const char *name;
717 const struct file_operations *fops;
718} i915_debugfs_files[] = {
719 {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
720 {"i915_wedged", &i915_wedged_fops},
721 {"i915_gem_drop_caches", &i915_drop_caches_fops},
722};
723
724void i915_debugfs_register(struct drm_i915_private *i915)
725{
726 struct dentry *debugfs_root = i915->drm.debugfs_root;
727 int i;
728
729 i915_debugfs_params(i915);
730
731 debugfs_create_file("i915_forcewake_user", S_IRUSR, debugfs_root,
732 i915, &i915_forcewake_fops);
733 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
734 debugfs_create_file(i915_debugfs_files[i].name, S_IRUGO | S_IWUSR,
735 debugfs_root, i915,
736 i915_debugfs_files[i].fops);
737 }
738
739 drm_debugfs_create_files(files: i915_debugfs_list,
740 ARRAY_SIZE(i915_debugfs_list),
741 root: debugfs_root, minor: i915->drm.primary);
742
743 i915_gpu_error_debugfs_register(i915);
744}
745