1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/slab.h>
32#include <linux/sysrq.h>
33
34#include <drm/drm_drv.h>
35
36#include "display/intel_display_core.h"
37#include "display/intel_display_irq.h"
38#include "display/intel_hotplug.h"
39#include "display/intel_hotplug_irq.h"
40#include "display/intel_lpe_audio.h"
41#include "display/intel_psr_regs.h"
42
43#include "gt/intel_breadcrumbs.h"
44#include "gt/intel_gt.h"
45#include "gt/intel_gt_irq.h"
46#include "gt/intel_gt_pm_irq.h"
47#include "gt/intel_gt_regs.h"
48#include "gt/intel_rps.h"
49
50#include "i915_driver.h"
51#include "i915_drv.h"
52#include "i915_irq.h"
53#include "i915_reg.h"
54
55/**
56 * DOC: interrupt handling
57 *
58 * These functions provide the basic support for enabling and disabling the
59 * interrupt handling support. There's a lot more functionality in i915_irq.c
60 * and related files, but that will be described in separate chapters.
61 */
62
63/*
64 * Interrupt statistic for PMU. Increments the counter only if the
65 * interrupt originated from the GPU so interrupts from a device which
66 * shares the interrupt line are not accounted.
67 */
68static inline void pmu_irq_stats(struct drm_i915_private *i915,
69 irqreturn_t res)
70{
71 if (unlikely(res != IRQ_HANDLED))
72 return;
73
74 /*
75 * A clever compiler translates that into INC. A not so clever one
76 * should at least prevent store tearing.
77 */
78 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
79}
80
81void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs)
82{
83 intel_uncore_write(uncore, reg: regs.imr, val: 0xffffffff);
84 intel_uncore_posting_read(uncore, regs.imr);
85
86 intel_uncore_write(uncore, reg: regs.ier, val: 0);
87
88 /* IIR can theoretically queue up two events. Be paranoid. */
89 intel_uncore_write(uncore, reg: regs.iir, val: 0xffffffff);
90 intel_uncore_posting_read(uncore, regs.iir);
91 intel_uncore_write(uncore, reg: regs.iir, val: 0xffffffff);
92 intel_uncore_posting_read(uncore, regs.iir);
93}
94
95/*
96 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
97 */
98void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
99{
100 u32 val = intel_uncore_read(uncore, reg);
101
102 if (val == 0)
103 return;
104
105 drm_WARN(&uncore->i915->drm, 1,
106 "Interrupt register 0x%x is not zero: 0x%08x\n",
107 i915_mmio_reg_offset(reg), val);
108 intel_uncore_write(uncore, reg, val: 0xffffffff);
109 intel_uncore_posting_read(uncore, reg);
110 intel_uncore_write(uncore, reg, val: 0xffffffff);
111 intel_uncore_posting_read(uncore, reg);
112}
113
114void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
115 u32 imr_val, u32 ier_val)
116{
117 gen2_assert_iir_is_zero(uncore, reg: regs.iir);
118
119 intel_uncore_write(uncore, reg: regs.ier, val: ier_val);
120 intel_uncore_write(uncore, reg: regs.imr, val: imr_val);
121 intel_uncore_posting_read(uncore, regs.imr);
122}
123
124void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs)
125{
126 intel_uncore_write(uncore, reg: regs.emr, val: 0xffffffff);
127 intel_uncore_posting_read(uncore, regs.emr);
128
129 intel_uncore_write(uncore, reg: regs.eir, val: 0xffffffff);
130 intel_uncore_posting_read(uncore, regs.eir);
131 intel_uncore_write(uncore, reg: regs.eir, val: 0xffffffff);
132 intel_uncore_posting_read(uncore, regs.eir);
133}
134
135void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs,
136 u32 emr_val)
137{
138 intel_uncore_write(uncore, reg: regs.eir, val: 0xffffffff);
139 intel_uncore_posting_read(uncore, regs.eir);
140 intel_uncore_write(uncore, reg: regs.eir, val: 0xffffffff);
141 intel_uncore_posting_read(uncore, regs.eir);
142
143 intel_uncore_write(uncore, reg: regs.emr, val: emr_val);
144 intel_uncore_posting_read(uncore, regs.emr);
145}
146
147/**
148 * ivb_parity_work - Workqueue called when a parity error interrupt
149 * occurred.
150 * @work: workqueue struct
151 *
152 * Doesn't actually do anything except notify userspace. As a consequence of
153 * this event, userspace should try to remap the bad rows since statistically
154 * it is likely the same row is more likely to go bad again.
155 */
156static void ivb_parity_work(struct work_struct *work)
157{
158 struct drm_i915_private *dev_priv =
159 container_of(work, typeof(*dev_priv), l3_parity.error_work);
160 struct intel_gt *gt = to_gt(i915: dev_priv);
161 u32 error_status, row, bank, subbank;
162 char *parity_event[6];
163 u32 misccpctl;
164 u8 slice = 0;
165
166
167 /* If we've screwed up tracking, just let the interrupt fire again */
168 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
169 goto out;
170
171 misccpctl = intel_uncore_rmw(uncore: &dev_priv->uncore, GEN7_MISCCPCTL,
172 GEN7_DOP_CLOCK_GATE_ENABLE, set: 0);
173 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
174
175 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
176 i915_reg_t reg;
177
178 slice--;
179 if (drm_WARN_ON_ONCE(&dev_priv->drm,
180 slice >= NUM_L3_SLICES(dev_priv)))
181 break;
182
183 dev_priv->l3_parity.which_slice &= ~(1<<slice);
184
185 reg = GEN7_L3CDERRST1(slice);
186
187 error_status = intel_uncore_read(uncore: &dev_priv->uncore, reg);
188 row = GEN7_PARITY_ERROR_ROW(error_status);
189 bank = GEN7_PARITY_ERROR_BANK(error_status);
190 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
191
192 intel_uncore_write(uncore: &dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
193 intel_uncore_posting_read(&dev_priv->uncore, reg);
194
195 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
196 parity_event[1] = kasprintf(GFP_KERNEL, fmt: "ROW=%d", row);
197 parity_event[2] = kasprintf(GFP_KERNEL, fmt: "BANK=%d", bank);
198 parity_event[3] = kasprintf(GFP_KERNEL, fmt: "SUBBANK=%d", subbank);
199 parity_event[4] = kasprintf(GFP_KERNEL, fmt: "SLICE=%d", slice);
200 parity_event[5] = NULL;
201
202 kobject_uevent_env(kobj: &dev_priv->drm.primary->kdev->kobj,
203 action: KOBJ_CHANGE, envp: parity_event);
204
205 drm_dbg(&dev_priv->drm,
206 "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
207 slice, row, bank, subbank);
208
209 kfree(objp: parity_event[4]);
210 kfree(objp: parity_event[3]);
211 kfree(objp: parity_event[2]);
212 kfree(objp: parity_event[1]);
213 }
214
215 intel_uncore_write(uncore: &dev_priv->uncore, GEN7_MISCCPCTL, val: misccpctl);
216
217out:
218 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
219 spin_lock_irq(lock: gt->irq_lock);
220 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
221 spin_unlock_irq(lock: gt->irq_lock);
222
223}
224
225static irqreturn_t valleyview_irq_handler(int irq, void *arg)
226{
227 struct drm_i915_private *dev_priv = arg;
228 struct intel_display *display = dev_priv->display;
229 irqreturn_t ret = IRQ_NONE;
230
231 if (!intel_irqs_enabled(dev_priv))
232 return IRQ_NONE;
233
234 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
235 disable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm);
236
237 do {
238 u32 iir, gt_iir, pm_iir;
239 u32 eir = 0, dpinvgtt = 0;
240 u32 pipe_stats[I915_MAX_PIPES] = {};
241 u32 hotplug_status = 0;
242 u32 ier = 0;
243
244 gt_iir = intel_uncore_read(uncore: &dev_priv->uncore, GTIIR);
245 pm_iir = intel_uncore_read(uncore: &dev_priv->uncore, GEN6_PMIIR);
246 iir = intel_uncore_read(uncore: &dev_priv->uncore, VLV_IIR);
247
248 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
249 break;
250
251 ret = IRQ_HANDLED;
252
253 /*
254 * Theory on interrupt generation, based on empirical evidence:
255 *
256 * x = ((VLV_IIR & VLV_IER) ||
257 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
258 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
259 *
260 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
261 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
262 * guarantee the CPU interrupt will be raised again even if we
263 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
264 * bits this time around.
265 */
266 intel_uncore_write(uncore: &dev_priv->uncore, VLV_MASTER_IER, val: 0);
267 ier = intel_uncore_rmw(uncore: &dev_priv->uncore, VLV_IER, clear: ~0, set: 0);
268
269 if (gt_iir)
270 intel_uncore_write(uncore: &dev_priv->uncore, GTIIR, val: gt_iir);
271 if (pm_iir)
272 intel_uncore_write(uncore: &dev_priv->uncore, GEN6_PMIIR, val: pm_iir);
273
274 if (iir & I915_DISPLAY_PORT_INTERRUPT)
275 hotplug_status = i9xx_hpd_irq_ack(display);
276
277 if (iir & I915_MASTER_ERROR_INTERRUPT)
278 vlv_display_error_irq_ack(display, eir: &eir, dpinvgtt: &dpinvgtt);
279
280 /* Call regardless, as some status bits might not be
281 * signalled in IIR */
282 i9xx_pipestat_irq_ack(display, iir, pipe_stats);
283
284 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
285 I915_LPE_PIPE_B_INTERRUPT))
286 intel_lpe_audio_irq_handler(display);
287
288 /*
289 * VLV_IIR is single buffered, and reflects the level
290 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
291 */
292 if (iir)
293 intel_uncore_write(uncore: &dev_priv->uncore, VLV_IIR, val: iir);
294
295 intel_uncore_write(uncore: &dev_priv->uncore, VLV_IER, val: ier);
296 intel_uncore_write(uncore: &dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
297
298 if (gt_iir)
299 gen6_gt_irq_handler(gt: to_gt(i915: dev_priv), gt_iir);
300 if (pm_iir)
301 gen6_rps_irq_handler(rps: &to_gt(i915: dev_priv)->rps, pm_iir);
302
303 if (hotplug_status)
304 i9xx_hpd_irq_handler(display, hotplug_status);
305
306 if (iir & I915_MASTER_ERROR_INTERRUPT)
307 vlv_display_error_irq_handler(display, eir, dpinvgtt);
308
309 valleyview_pipestat_irq_handler(display, pipe_stats);
310 } while (0);
311
312 pmu_irq_stats(i915: dev_priv, res: ret);
313
314 enable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm);
315
316 return ret;
317}
318
319static irqreturn_t cherryview_irq_handler(int irq, void *arg)
320{
321 struct drm_i915_private *dev_priv = arg;
322 struct intel_display *display = dev_priv->display;
323 irqreturn_t ret = IRQ_NONE;
324
325 if (!intel_irqs_enabled(dev_priv))
326 return IRQ_NONE;
327
328 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
329 disable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm);
330
331 do {
332 u32 master_ctl, iir;
333 u32 eir = 0, dpinvgtt = 0;
334 u32 pipe_stats[I915_MAX_PIPES] = {};
335 u32 hotplug_status = 0;
336 u32 ier = 0;
337
338 master_ctl = intel_uncore_read(uncore: &dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
339 iir = intel_uncore_read(uncore: &dev_priv->uncore, VLV_IIR);
340
341 if (master_ctl == 0 && iir == 0)
342 break;
343
344 ret = IRQ_HANDLED;
345
346 /*
347 * Theory on interrupt generation, based on empirical evidence:
348 *
349 * x = ((VLV_IIR & VLV_IER) ||
350 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
351 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
352 *
353 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
354 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
355 * guarantee the CPU interrupt will be raised again even if we
356 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
357 * bits this time around.
358 */
359 intel_uncore_write(uncore: &dev_priv->uncore, GEN8_MASTER_IRQ, val: 0);
360 ier = intel_uncore_rmw(uncore: &dev_priv->uncore, VLV_IER, clear: ~0, set: 0);
361
362 gen8_gt_irq_handler(gt: to_gt(i915: dev_priv), master_ctl);
363
364 if (iir & I915_DISPLAY_PORT_INTERRUPT)
365 hotplug_status = i9xx_hpd_irq_ack(display);
366
367 if (iir & I915_MASTER_ERROR_INTERRUPT)
368 vlv_display_error_irq_ack(display, eir: &eir, dpinvgtt: &dpinvgtt);
369
370 /* Call regardless, as some status bits might not be
371 * signalled in IIR */
372 i9xx_pipestat_irq_ack(display, iir, pipe_stats);
373
374 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
375 I915_LPE_PIPE_B_INTERRUPT |
376 I915_LPE_PIPE_C_INTERRUPT))
377 intel_lpe_audio_irq_handler(display);
378
379 /*
380 * VLV_IIR is single buffered, and reflects the level
381 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
382 */
383 if (iir)
384 intel_uncore_write(uncore: &dev_priv->uncore, VLV_IIR, val: iir);
385
386 intel_uncore_write(uncore: &dev_priv->uncore, VLV_IER, val: ier);
387 intel_uncore_write(uncore: &dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
388
389 if (hotplug_status)
390 i9xx_hpd_irq_handler(display, hotplug_status);
391
392 if (iir & I915_MASTER_ERROR_INTERRUPT)
393 vlv_display_error_irq_handler(display, eir, dpinvgtt);
394
395 valleyview_pipestat_irq_handler(display, pipe_stats);
396 } while (0);
397
398 pmu_irq_stats(i915: dev_priv, res: ret);
399
400 enable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm);
401
402 return ret;
403}
404
405/*
406 * To handle irqs with the minimum potential races with fresh interrupts, we:
407 * 1 - Disable Master Interrupt Control.
408 * 2 - Find the source(s) of the interrupt.
409 * 3 - Clear the Interrupt Identity bits (IIR).
410 * 4 - Process the interrupt(s) that had bits set in the IIRs.
411 * 5 - Re-enable Master Interrupt Control.
412 */
413static irqreturn_t ilk_irq_handler(int irq, void *arg)
414{
415 struct drm_i915_private *i915 = arg;
416 struct intel_display *display = i915->display;
417 void __iomem * const regs = intel_uncore_regs(uncore: &i915->uncore);
418 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
419 irqreturn_t ret = IRQ_NONE;
420
421 if (unlikely(!intel_irqs_enabled(i915)))
422 return IRQ_NONE;
423
424 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
425 disable_rpm_wakeref_asserts(rpm: &i915->runtime_pm);
426
427 /* disable master interrupt before clearing iir */
428 de_ier = raw_reg_read(regs, DEIER);
429 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
430
431 /* Disable south interrupts. We'll only write to SDEIIR once, so further
432 * interrupts will will be stored on its back queue, and then we'll be
433 * able to process them after we restore SDEIER (as soon as we restore
434 * it, we'll get an interrupt if SDEIIR still has something to process
435 * due to its back queue). */
436 if (!HAS_PCH_NOP(display)) {
437 sde_ier = raw_reg_read(regs, SDEIER);
438 raw_reg_write(regs, SDEIER, 0);
439 }
440
441 /* Find, clear, then process each source of interrupt */
442
443 gt_iir = raw_reg_read(regs, GTIIR);
444 if (gt_iir) {
445 raw_reg_write(regs, GTIIR, gt_iir);
446 if (GRAPHICS_VER(i915) >= 6)
447 gen6_gt_irq_handler(gt: to_gt(i915), gt_iir);
448 else
449 gen5_gt_irq_handler(gt: to_gt(i915), gt_iir);
450 ret = IRQ_HANDLED;
451 }
452
453 de_iir = raw_reg_read(regs, DEIIR);
454 if (de_iir) {
455 raw_reg_write(regs, DEIIR, de_iir);
456 if (DISPLAY_VER(display) >= 7)
457 ivb_display_irq_handler(display, de_iir);
458 else
459 ilk_display_irq_handler(display, de_iir);
460 ret = IRQ_HANDLED;
461 }
462
463 if (GRAPHICS_VER(i915) >= 6) {
464 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
465 if (pm_iir) {
466 raw_reg_write(regs, GEN6_PMIIR, pm_iir);
467 gen6_rps_irq_handler(rps: &to_gt(i915)->rps, pm_iir);
468 ret = IRQ_HANDLED;
469 }
470 }
471
472 raw_reg_write(regs, DEIER, de_ier);
473 if (sde_ier)
474 raw_reg_write(regs, SDEIER, sde_ier);
475
476 pmu_irq_stats(i915, res: ret);
477
478 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
479 enable_rpm_wakeref_asserts(rpm: &i915->runtime_pm);
480
481 return ret;
482}
483
484static inline u32 gen8_master_intr_disable(void __iomem * const regs)
485{
486 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
487
488 /*
489 * Now with master disabled, get a sample of level indications
490 * for this interrupt. Indications will be cleared on related acks.
491 * New indications can and will light up during processing,
492 * and will generate new interrupt after enabling master.
493 */
494 return raw_reg_read(regs, GEN8_MASTER_IRQ);
495}
496
497static inline void gen8_master_intr_enable(void __iomem * const regs)
498{
499 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
500}
501
502static irqreturn_t gen8_irq_handler(int irq, void *arg)
503{
504 struct drm_i915_private *dev_priv = arg;
505 struct intel_display *display = dev_priv->display;
506 void __iomem * const regs = intel_uncore_regs(uncore: &dev_priv->uncore);
507 u32 master_ctl;
508
509 if (!intel_irqs_enabled(dev_priv))
510 return IRQ_NONE;
511
512 master_ctl = gen8_master_intr_disable(regs);
513 if (!master_ctl) {
514 gen8_master_intr_enable(regs);
515 return IRQ_NONE;
516 }
517
518 /* Find, queue (onto bottom-halves), then clear each source */
519 gen8_gt_irq_handler(gt: to_gt(i915: dev_priv), master_ctl);
520
521 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
522 if (master_ctl & ~GEN8_GT_IRQS) {
523 disable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm);
524 gen8_de_irq_handler(display, master_ctl);
525 enable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm);
526 }
527
528 gen8_master_intr_enable(regs);
529
530 pmu_irq_stats(i915: dev_priv, res: IRQ_HANDLED);
531
532 return IRQ_HANDLED;
533}
534
535static inline u32 gen11_master_intr_disable(void __iomem * const regs)
536{
537 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
538
539 /*
540 * Now with master disabled, get a sample of level indications
541 * for this interrupt. Indications will be cleared on related acks.
542 * New indications can and will light up during processing,
543 * and will generate new interrupt after enabling master.
544 */
545 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
546}
547
548static inline void gen11_master_intr_enable(void __iomem * const regs)
549{
550 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
551}
552
553static irqreturn_t gen11_irq_handler(int irq, void *arg)
554{
555 struct drm_i915_private *i915 = arg;
556 struct intel_display *display = i915->display;
557 void __iomem * const regs = intel_uncore_regs(uncore: &i915->uncore);
558 struct intel_gt *gt = to_gt(i915);
559 u32 master_ctl;
560 u32 gu_misc_iir;
561
562 if (!intel_irqs_enabled(dev_priv: i915))
563 return IRQ_NONE;
564
565 master_ctl = gen11_master_intr_disable(regs);
566 if (!master_ctl) {
567 gen11_master_intr_enable(regs);
568 return IRQ_NONE;
569 }
570
571 /* Find, queue (onto bottom-halves), then clear each source */
572 gen11_gt_irq_handler(gt, master_ctl);
573
574 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
575 if (master_ctl & GEN11_DISPLAY_IRQ)
576 gen11_display_irq_handler(display);
577
578 gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
579
580 gen11_master_intr_enable(regs);
581
582 gen11_gu_misc_irq_handler(display, iir: gu_misc_iir);
583
584 pmu_irq_stats(i915, res: IRQ_HANDLED);
585
586 return IRQ_HANDLED;
587}
588
589static inline u32 dg1_master_intr_disable(void __iomem * const regs)
590{
591 u32 val;
592
593 /* First disable interrupts */
594 raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
595
596 /* Get the indication levels and ack the master unit */
597 val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
598 if (unlikely(!val))
599 return 0;
600
601 raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
602
603 return val;
604}
605
606static inline void dg1_master_intr_enable(void __iomem * const regs)
607{
608 raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
609}
610
611static irqreturn_t dg1_irq_handler(int irq, void *arg)
612{
613 struct drm_i915_private * const i915 = arg;
614 struct intel_display *display = i915->display;
615 struct intel_gt *gt = to_gt(i915);
616 void __iomem * const regs = intel_uncore_regs(uncore: gt->uncore);
617 u32 master_tile_ctl, master_ctl;
618 u32 gu_misc_iir;
619
620 if (!intel_irqs_enabled(dev_priv: i915))
621 return IRQ_NONE;
622
623 master_tile_ctl = dg1_master_intr_disable(regs);
624 if (!master_tile_ctl) {
625 dg1_master_intr_enable(regs);
626 return IRQ_NONE;
627 }
628
629 /* FIXME: we only support tile 0 for now. */
630 if (master_tile_ctl & DG1_MSTR_TILE(0)) {
631 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
632 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
633 } else {
634 drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
635 master_tile_ctl);
636 dg1_master_intr_enable(regs);
637 return IRQ_NONE;
638 }
639
640 gen11_gt_irq_handler(gt, master_ctl);
641
642 if (master_ctl & GEN11_DISPLAY_IRQ)
643 gen11_display_irq_handler(display);
644
645 gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
646
647 dg1_master_intr_enable(regs);
648
649 gen11_gu_misc_irq_handler(display, iir: gu_misc_iir);
650
651 pmu_irq_stats(i915, res: IRQ_HANDLED);
652
653 return IRQ_HANDLED;
654}
655
656static void ilk_irq_reset(struct drm_i915_private *dev_priv)
657{
658 struct intel_display *display = dev_priv->display;
659 struct intel_uncore *uncore = &dev_priv->uncore;
660
661 gen2_irq_reset(uncore, DE_IRQ_REGS);
662 dev_priv->irq_mask = ~0u;
663
664 if (GRAPHICS_VER(dev_priv) == 7)
665 intel_uncore_write(uncore, GEN7_ERR_INT, val: 0xffffffff);
666
667 if (IS_HASWELL(dev_priv)) {
668 intel_uncore_write(uncore, EDP_PSR_IMR, val: 0xffffffff);
669 intel_uncore_write(uncore, EDP_PSR_IIR, val: 0xffffffff);
670 }
671
672 gen5_gt_irq_reset(gt: to_gt(i915: dev_priv));
673
674 ibx_display_irq_reset(display);
675}
676
677static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
678{
679 struct intel_display *display = dev_priv->display;
680
681 intel_uncore_write(uncore: &dev_priv->uncore, VLV_MASTER_IER, val: 0);
682 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
683
684 gen5_gt_irq_reset(gt: to_gt(i915: dev_priv));
685
686 vlv_display_irq_reset(display);
687}
688
689static void gen8_irq_reset(struct drm_i915_private *dev_priv)
690{
691 struct intel_display *display = dev_priv->display;
692 struct intel_uncore *uncore = &dev_priv->uncore;
693
694 gen8_master_intr_disable(regs: intel_uncore_regs(uncore));
695
696 gen8_gt_irq_reset(gt: to_gt(i915: dev_priv));
697 gen8_display_irq_reset(display);
698 gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
699}
700
701static void gen11_irq_reset(struct drm_i915_private *dev_priv)
702{
703 struct intel_display *display = dev_priv->display;
704 struct intel_gt *gt = to_gt(i915: dev_priv);
705 struct intel_uncore *uncore = gt->uncore;
706
707 gen11_master_intr_disable(regs: intel_uncore_regs(uncore: &dev_priv->uncore));
708
709 gen11_gt_irq_reset(gt);
710 gen11_display_irq_reset(display);
711
712 gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
713 gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
714}
715
716static void dg1_irq_reset(struct drm_i915_private *dev_priv)
717{
718 struct intel_display *display = dev_priv->display;
719 struct intel_uncore *uncore = &dev_priv->uncore;
720 struct intel_gt *gt;
721 unsigned int i;
722
723 dg1_master_intr_disable(regs: intel_uncore_regs(uncore: &dev_priv->uncore));
724
725 for_each_gt(gt, dev_priv, i)
726 gen11_gt_irq_reset(gt);
727
728 gen11_display_irq_reset(display);
729
730 gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
731 gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
732
733 intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, val: ~0);
734}
735
736static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
737{
738 struct intel_display *display = dev_priv->display;
739 struct intel_uncore *uncore = &dev_priv->uncore;
740
741 intel_uncore_write(uncore, GEN8_MASTER_IRQ, val: 0);
742 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
743
744 gen8_gt_irq_reset(gt: to_gt(i915: dev_priv));
745
746 gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
747
748 vlv_display_irq_reset(display);
749}
750
751static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
752{
753 struct intel_display *display = dev_priv->display;
754
755 gen5_gt_irq_postinstall(gt: to_gt(i915: dev_priv));
756
757 ilk_de_irq_postinstall(display);
758}
759
760static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
761{
762 struct intel_display *display = dev_priv->display;
763
764 gen5_gt_irq_postinstall(gt: to_gt(i915: dev_priv));
765
766 vlv_display_irq_postinstall(display);
767
768 intel_uncore_write(uncore: &dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
769 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
770}
771
772static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
773{
774 struct intel_display *display = dev_priv->display;
775
776 gen8_gt_irq_postinstall(gt: to_gt(i915: dev_priv));
777 gen8_de_irq_postinstall(display);
778
779 gen8_master_intr_enable(regs: intel_uncore_regs(uncore: &dev_priv->uncore));
780}
781
782static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
783{
784 struct intel_display *display = dev_priv->display;
785 struct intel_gt *gt = to_gt(i915: dev_priv);
786 struct intel_uncore *uncore = gt->uncore;
787 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
788
789 gen11_gt_irq_postinstall(gt);
790 gen11_de_irq_postinstall(display);
791
792 gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, imr_val: ~gu_misc_masked, ier_val: gu_misc_masked);
793
794 gen11_master_intr_enable(regs: intel_uncore_regs(uncore));
795 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
796}
797
798static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
799{
800 struct intel_display *display = dev_priv->display;
801 struct intel_uncore *uncore = &dev_priv->uncore;
802 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
803 struct intel_gt *gt;
804 unsigned int i;
805
806 for_each_gt(gt, dev_priv, i)
807 gen11_gt_irq_postinstall(gt);
808
809 gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, imr_val: ~gu_misc_masked, ier_val: gu_misc_masked);
810
811 dg1_de_irq_postinstall(display);
812
813 dg1_master_intr_enable(regs: intel_uncore_regs(uncore));
814 intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
815}
816
817static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
818{
819 struct intel_display *display = dev_priv->display;
820
821 gen8_gt_irq_postinstall(gt: to_gt(i915: dev_priv));
822
823 vlv_display_irq_postinstall(display);
824
825 intel_uncore_write(uncore: &dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
826 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
827}
828
829static u32 i9xx_error_mask(struct drm_i915_private *i915)
830{
831 struct intel_display *display = i915->display;
832 /*
833 * On gen2/3 FBC generates (seemingly spurious)
834 * display INVALID_GTT/INVALID_GTT_PTE table errors.
835 *
836 * Also gen3 bspec has this to say:
837 * "DISPA_INVALID_GTT_PTE
838 " [DevNapa] : Reserved. This bit does not reflect the page
839 " table error for the display plane A."
840 *
841 * Unfortunately we can't mask off individual PGTBL_ER bits,
842 * so we just have to mask off all page table errors via EMR.
843 */
844 if (HAS_FBC(display))
845 return I915_ERROR_MEMORY_REFRESH;
846 else
847 return I915_ERROR_PAGE_TABLE |
848 I915_ERROR_MEMORY_REFRESH;
849}
850
851static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
852 u32 *eir, u32 *eir_stuck)
853{
854 u32 emr;
855
856 *eir = intel_uncore_read(uncore: &dev_priv->uncore, EIR);
857 intel_uncore_write(uncore: &dev_priv->uncore, EIR, val: *eir);
858
859 *eir_stuck = intel_uncore_read(uncore: &dev_priv->uncore, EIR);
860 if (*eir_stuck == 0)
861 return;
862
863 /*
864 * Toggle all EMR bits to make sure we get an edge
865 * in the ISR master error bit if we don't clear
866 * all the EIR bits. Otherwise the edge triggered
867 * IIR on i965/g4x wouldn't notice that an interrupt
868 * is still pending. Also some EIR bits can't be
869 * cleared except by handling the underlying error
870 * (or by a GPU reset) so we mask any bit that
871 * remains set.
872 */
873 emr = intel_uncore_read(uncore: &dev_priv->uncore, EMR);
874 intel_uncore_write(uncore: &dev_priv->uncore, EMR, val: 0xffffffff);
875 intel_uncore_write(uncore: &dev_priv->uncore, EMR, val: emr | *eir_stuck);
876}
877
878static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
879 u32 eir, u32 eir_stuck)
880{
881 drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
882
883 if (eir_stuck)
884 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
885 eir_stuck);
886
887 drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
888 intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
889}
890
891static void i915_irq_reset(struct drm_i915_private *dev_priv)
892{
893 struct intel_display *display = dev_priv->display;
894 struct intel_uncore *uncore = &dev_priv->uncore;
895
896 i9xx_display_irq_reset(display);
897
898 gen2_error_reset(uncore, GEN2_ERROR_REGS);
899 gen2_irq_reset(uncore, GEN2_IRQ_REGS);
900 dev_priv->irq_mask = ~0u;
901}
902
903static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
904{
905 struct intel_display *display = dev_priv->display;
906 struct intel_uncore *uncore = &dev_priv->uncore;
907 u32 enable_mask;
908
909 gen2_error_init(uncore, GEN2_ERROR_REGS, emr_val: ~i9xx_error_mask(i915: dev_priv));
910
911 dev_priv->irq_mask =
912 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
913 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
914 I915_MASTER_ERROR_INTERRUPT);
915
916 enable_mask =
917 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
918 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
919 I915_MASTER_ERROR_INTERRUPT |
920 I915_USER_INTERRUPT;
921
922 if (DISPLAY_VER(display) >= 3) {
923 dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT;
924 enable_mask |= I915_ASLE_INTERRUPT;
925 }
926
927 if (HAS_HOTPLUG(display)) {
928 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
929 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
930 }
931
932 gen2_irq_init(uncore, GEN2_IRQ_REGS, imr_val: dev_priv->irq_mask, ier_val: enable_mask);
933
934 i915_display_irq_postinstall(display);
935}
936
937static irqreturn_t i915_irq_handler(int irq, void *arg)
938{
939 struct drm_i915_private *dev_priv = arg;
940 struct intel_display *display = dev_priv->display;
941 irqreturn_t ret = IRQ_NONE;
942
943 if (!intel_irqs_enabled(dev_priv))
944 return IRQ_NONE;
945
946 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
947 disable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm);
948
949 do {
950 u32 pipe_stats[I915_MAX_PIPES] = {};
951 u32 eir = 0, eir_stuck = 0;
952 u32 hotplug_status = 0;
953 u32 iir;
954
955 iir = intel_uncore_read(uncore: &dev_priv->uncore, GEN2_IIR);
956 if (iir == 0)
957 break;
958
959 ret = IRQ_HANDLED;
960
961 if (HAS_HOTPLUG(display) &&
962 iir & I915_DISPLAY_PORT_INTERRUPT)
963 hotplug_status = i9xx_hpd_irq_ack(display);
964
965 /* Call regardless, as some status bits might not be
966 * signalled in IIR */
967 i9xx_pipestat_irq_ack(display, iir, pipe_stats);
968
969 if (iir & I915_MASTER_ERROR_INTERRUPT)
970 i9xx_error_irq_ack(dev_priv, eir: &eir, eir_stuck: &eir_stuck);
971
972 intel_uncore_write(uncore: &dev_priv->uncore, GEN2_IIR, val: iir);
973
974 if (iir & I915_USER_INTERRUPT)
975 intel_engine_cs_irq(engine: to_gt(i915: dev_priv)->engine[RCS0], iir);
976
977 if (iir & I915_MASTER_ERROR_INTERRUPT)
978 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
979
980 if (hotplug_status)
981 i9xx_hpd_irq_handler(display, hotplug_status);
982
983 i915_pipestat_irq_handler(display, iir, pipe_stats);
984 } while (0);
985
986 pmu_irq_stats(i915: dev_priv, res: ret);
987
988 enable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm);
989
990 return ret;
991}
992
993static void i965_irq_reset(struct drm_i915_private *dev_priv)
994{
995 struct intel_display *display = dev_priv->display;
996 struct intel_uncore *uncore = &dev_priv->uncore;
997
998 i9xx_display_irq_reset(display);
999
1000 gen2_error_reset(uncore, GEN2_ERROR_REGS);
1001 gen2_irq_reset(uncore, GEN2_IRQ_REGS);
1002 dev_priv->irq_mask = ~0u;
1003}
1004
1005static u32 i965_error_mask(struct drm_i915_private *i915)
1006{
1007 /*
1008 * Enable some error detection, note the instruction error mask
1009 * bit is reserved, so we leave it masked.
1010 *
1011 * i965 FBC no longer generates spurious GTT errors,
1012 * so we can always enable the page table errors.
1013 */
1014 if (IS_G4X(i915))
1015 return GM45_ERROR_PAGE_TABLE |
1016 GM45_ERROR_MEM_PRIV |
1017 GM45_ERROR_CP_PRIV |
1018 I915_ERROR_MEMORY_REFRESH;
1019 else
1020 return I915_ERROR_PAGE_TABLE |
1021 I915_ERROR_MEMORY_REFRESH;
1022}
1023
1024static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
1025{
1026 struct intel_display *display = dev_priv->display;
1027 struct intel_uncore *uncore = &dev_priv->uncore;
1028 u32 enable_mask;
1029
1030 gen2_error_init(uncore, GEN2_ERROR_REGS, emr_val: ~i965_error_mask(i915: dev_priv));
1031
1032 dev_priv->irq_mask =
1033 ~(I915_ASLE_INTERRUPT |
1034 I915_DISPLAY_PORT_INTERRUPT |
1035 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1036 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1037 I915_MASTER_ERROR_INTERRUPT);
1038
1039 enable_mask =
1040 I915_ASLE_INTERRUPT |
1041 I915_DISPLAY_PORT_INTERRUPT |
1042 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1043 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1044 I915_MASTER_ERROR_INTERRUPT |
1045 I915_USER_INTERRUPT;
1046
1047 if (IS_G4X(dev_priv))
1048 enable_mask |= I915_BSD_USER_INTERRUPT;
1049
1050 gen2_irq_init(uncore, GEN2_IRQ_REGS, imr_val: dev_priv->irq_mask, ier_val: enable_mask);
1051
1052 i965_display_irq_postinstall(display);
1053}
1054
1055static irqreturn_t i965_irq_handler(int irq, void *arg)
1056{
1057 struct drm_i915_private *dev_priv = arg;
1058 struct intel_display *display = dev_priv->display;
1059 irqreturn_t ret = IRQ_NONE;
1060
1061 if (!intel_irqs_enabled(dev_priv))
1062 return IRQ_NONE;
1063
1064 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1065 disable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm);
1066
1067 do {
1068 u32 pipe_stats[I915_MAX_PIPES] = {};
1069 u32 eir = 0, eir_stuck = 0;
1070 u32 hotplug_status = 0;
1071 u32 iir;
1072
1073 iir = intel_uncore_read(uncore: &dev_priv->uncore, GEN2_IIR);
1074 if (iir == 0)
1075 break;
1076
1077 ret = IRQ_HANDLED;
1078
1079 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1080 hotplug_status = i9xx_hpd_irq_ack(display);
1081
1082 /* Call regardless, as some status bits might not be
1083 * signalled in IIR */
1084 i9xx_pipestat_irq_ack(display, iir, pipe_stats);
1085
1086 if (iir & I915_MASTER_ERROR_INTERRUPT)
1087 i9xx_error_irq_ack(dev_priv, eir: &eir, eir_stuck: &eir_stuck);
1088
1089 intel_uncore_write(uncore: &dev_priv->uncore, GEN2_IIR, val: iir);
1090
1091 if (iir & I915_USER_INTERRUPT)
1092 intel_engine_cs_irq(engine: to_gt(i915: dev_priv)->engine[RCS0],
1093 iir);
1094
1095 if (iir & I915_BSD_USER_INTERRUPT)
1096 intel_engine_cs_irq(engine: to_gt(i915: dev_priv)->engine[VCS0],
1097 iir: iir >> 25);
1098
1099 if (iir & I915_MASTER_ERROR_INTERRUPT)
1100 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1101
1102 if (hotplug_status)
1103 i9xx_hpd_irq_handler(display, hotplug_status);
1104
1105 i965_pipestat_irq_handler(display, iir, pipe_stats);
1106 } while (0);
1107
1108 pmu_irq_stats(i915: dev_priv, res: IRQ_HANDLED);
1109
1110 enable_rpm_wakeref_asserts(rpm: &dev_priv->runtime_pm);
1111
1112 return ret;
1113}
1114
1115/**
1116 * intel_irq_init - initializes irq support
1117 * @dev_priv: i915 device instance
1118 *
1119 * This function initializes all the irq support including work items, timers
1120 * and all the vtables. It does not setup the interrupt itself though.
1121 */
1122void intel_irq_init(struct drm_i915_private *dev_priv)
1123{
1124 int i;
1125
1126 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
1127 for (i = 0; i < MAX_L3_SLICES; ++i)
1128 dev_priv->l3_parity.remap_info[i] = NULL;
1129
1130 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
1131 if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
1132 to_gt(i915: dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
1133}
1134
1135/**
1136 * intel_irq_fini - deinitializes IRQ support
1137 * @i915: i915 device instance
1138 *
1139 * This function deinitializes all the IRQ support.
1140 */
1141void intel_irq_fini(struct drm_i915_private *i915)
1142{
1143 int i;
1144
1145 for (i = 0; i < MAX_L3_SLICES; ++i)
1146 kfree(objp: i915->l3_parity.remap_info[i]);
1147}
1148
1149static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
1150{
1151 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1152 return dg1_irq_handler;
1153 else if (GRAPHICS_VER(dev_priv) >= 11)
1154 return gen11_irq_handler;
1155 else if (IS_CHERRYVIEW(dev_priv))
1156 return cherryview_irq_handler;
1157 else if (GRAPHICS_VER(dev_priv) >= 8)
1158 return gen8_irq_handler;
1159 else if (IS_VALLEYVIEW(dev_priv))
1160 return valleyview_irq_handler;
1161 else if (GRAPHICS_VER(dev_priv) >= 5)
1162 return ilk_irq_handler;
1163 else if (GRAPHICS_VER(dev_priv) == 4)
1164 return i965_irq_handler;
1165 else
1166 return i915_irq_handler;
1167}
1168
1169static void intel_irq_reset(struct drm_i915_private *dev_priv)
1170{
1171 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1172 dg1_irq_reset(dev_priv);
1173 else if (GRAPHICS_VER(dev_priv) >= 11)
1174 gen11_irq_reset(dev_priv);
1175 else if (IS_CHERRYVIEW(dev_priv))
1176 cherryview_irq_reset(dev_priv);
1177 else if (GRAPHICS_VER(dev_priv) >= 8)
1178 gen8_irq_reset(dev_priv);
1179 else if (IS_VALLEYVIEW(dev_priv))
1180 valleyview_irq_reset(dev_priv);
1181 else if (GRAPHICS_VER(dev_priv) >= 5)
1182 ilk_irq_reset(dev_priv);
1183 else if (GRAPHICS_VER(dev_priv) == 4)
1184 i965_irq_reset(dev_priv);
1185 else
1186 i915_irq_reset(dev_priv);
1187}
1188
1189static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
1190{
1191 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
1192 dg1_irq_postinstall(dev_priv);
1193 else if (GRAPHICS_VER(dev_priv) >= 11)
1194 gen11_irq_postinstall(dev_priv);
1195 else if (IS_CHERRYVIEW(dev_priv))
1196 cherryview_irq_postinstall(dev_priv);
1197 else if (GRAPHICS_VER(dev_priv) >= 8)
1198 gen8_irq_postinstall(dev_priv);
1199 else if (IS_VALLEYVIEW(dev_priv))
1200 valleyview_irq_postinstall(dev_priv);
1201 else if (GRAPHICS_VER(dev_priv) >= 5)
1202 ilk_irq_postinstall(dev_priv);
1203 else if (GRAPHICS_VER(dev_priv) == 4)
1204 i965_irq_postinstall(dev_priv);
1205 else
1206 i915_irq_postinstall(dev_priv);
1207}
1208
1209/**
1210 * intel_irq_install - enables the hardware interrupt
1211 * @dev_priv: i915 device instance
1212 *
1213 * This function enables the hardware interrupt handling, but leaves the hotplug
1214 * handling still disabled. It is called after intel_irq_init().
1215 *
1216 * In the driver load and resume code we need working interrupts in a few places
1217 * but don't want to deal with the hassle of concurrent probe and hotplug
1218 * workers. Hence the split into this two-stage approach.
1219 */
1220int intel_irq_install(struct drm_i915_private *dev_priv)
1221{
1222 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1223 int ret;
1224
1225 /*
1226 * We enable some interrupt sources in our postinstall hooks, so mark
1227 * interrupts as enabled _before_ actually enabling them to avoid
1228 * special cases in our ordering checks.
1229 */
1230 dev_priv->irqs_enabled = true;
1231
1232 intel_irq_reset(dev_priv);
1233
1234 ret = request_irq(irq, handler: intel_irq_handler(dev_priv),
1235 IRQF_SHARED, DRIVER_NAME, dev: dev_priv);
1236 if (ret < 0) {
1237 dev_priv->irqs_enabled = false;
1238 return ret;
1239 }
1240
1241 intel_irq_postinstall(dev_priv);
1242
1243 return ret;
1244}
1245
1246/**
1247 * intel_irq_uninstall - finalizes all irq handling
1248 * @dev_priv: i915 device instance
1249 *
1250 * This stops interrupt and hotplug handling and unregisters and frees all
1251 * resources acquired in the init functions.
1252 */
1253void intel_irq_uninstall(struct drm_i915_private *dev_priv)
1254{
1255 struct intel_display *display = dev_priv->display;
1256 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1257
1258 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
1259 return;
1260
1261 intel_irq_reset(dev_priv);
1262
1263 free_irq(irq, dev_priv);
1264
1265 intel_hpd_cancel_work(display);
1266 dev_priv->irqs_enabled = false;
1267}
1268
1269/**
1270 * intel_irq_suspend - Suspend interrupts
1271 * @i915: i915 device instance
1272 *
1273 * This function is used to disable interrupts at runtime.
1274 */
1275void intel_irq_suspend(struct drm_i915_private *i915)
1276{
1277 intel_irq_reset(dev_priv: i915);
1278 i915->irqs_enabled = false;
1279 intel_synchronize_irq(i915);
1280}
1281
1282/**
1283 * intel_irq_resume - Resume interrupts
1284 * @i915: i915 device instance
1285 *
1286 * This function is used to enable interrupts at runtime.
1287 */
1288void intel_irq_resume(struct drm_i915_private *i915)
1289{
1290 i915->irqs_enabled = true;
1291 intel_irq_reset(dev_priv: i915);
1292 intel_irq_postinstall(dev_priv: i915);
1293}
1294
1295bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
1296{
1297 return dev_priv->irqs_enabled;
1298}
1299
1300void intel_synchronize_irq(struct drm_i915_private *i915)
1301{
1302 synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
1303}
1304
1305void intel_synchronize_hardirq(struct drm_i915_private *i915)
1306{
1307 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
1308}
1309