1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2020 Intel Corporation
4 */
5
6#include <linux/debugfs.h>
7#include <linux/iopoll.h>
8
9#include <drm/drm_print.h>
10
11#include "g4x_dp.h"
12#include "i915_reg.h"
13#include "i915_utils.h"
14#include "intel_de.h"
15#include "intel_display_power_well.h"
16#include "intel_display_regs.h"
17#include "intel_display_types.h"
18#include "intel_dp.h"
19#include "intel_dpio_phy.h"
20#include "intel_dpll.h"
21#include "intel_lvds.h"
22#include "intel_lvds_regs.h"
23#include "intel_pps.h"
24#include "intel_pps_regs.h"
25#include "intel_quirks.h"
26
27static void vlv_steal_power_sequencer(struct intel_display *display,
28 enum pipe pipe);
29
30static void pps_init_delays(struct intel_dp *intel_dp);
31static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
32
33static const char *pps_name(struct intel_dp *intel_dp)
34{
35 struct intel_display *display = to_intel_display(intel_dp);
36 struct intel_pps *pps = &intel_dp->pps;
37
38 if (display->platform.valleyview || display->platform.cherryview) {
39 switch (pps->vlv_pps_pipe) {
40 case INVALID_PIPE:
41 /*
42 * FIXME would be nice if we can guarantee
43 * to always have a valid PPS when calling this.
44 */
45 return "PPS <none>";
46 case PIPE_A:
47 return "PPS A";
48 case PIPE_B:
49 return "PPS B";
50 default:
51 MISSING_CASE(pps->vlv_pps_pipe);
52 break;
53 }
54 } else {
55 switch (pps->pps_idx) {
56 case 0:
57 return "PPS 0";
58 case 1:
59 return "PPS 1";
60 default:
61 MISSING_CASE(pps->pps_idx);
62 break;
63 }
64 }
65
66 return "PPS <invalid>";
67}
68
69intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
70{
71 struct intel_display *display = to_intel_display(intel_dp);
72 intel_wakeref_t wakeref;
73
74 /*
75 * See vlv_pps_reset_all() why we need a power domain reference here.
76 */
77 wakeref = intel_display_power_get(display, domain: POWER_DOMAIN_DISPLAY_CORE);
78 mutex_lock(lock: &display->pps.mutex);
79
80 return wakeref;
81}
82
83intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
84 intel_wakeref_t wakeref)
85{
86 struct intel_display *display = to_intel_display(intel_dp);
87
88 mutex_unlock(lock: &display->pps.mutex);
89 intel_display_power_put(display, domain: POWER_DOMAIN_DISPLAY_CORE, wakeref);
90
91 return NULL;
92}
93
94static void
95vlv_power_sequencer_kick(struct intel_dp *intel_dp)
96{
97 struct intel_display *display = to_intel_display(intel_dp);
98 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
99 enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
100 bool pll_enabled, release_cl_override = false;
101 enum dpio_phy phy = vlv_pipe_to_phy(pipe);
102 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
103 u32 DP;
104
105 if (drm_WARN(display->drm,
106 intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN,
107 "skipping %s kick due to [ENCODER:%d:%s] being active\n",
108 pps_name(intel_dp),
109 dig_port->base.base.base.id, dig_port->base.base.name))
110 return;
111
112 drm_dbg_kms(display->drm,
113 "kicking %s for [ENCODER:%d:%s]\n",
114 pps_name(intel_dp),
115 dig_port->base.base.base.id, dig_port->base.base.name);
116
117 /* Preserve the BIOS-computed detected bit. This is
118 * supposed to be read-only.
119 */
120 DP = intel_de_read(display, reg: intel_dp->output_reg) & DP_DETECTED;
121 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
122 DP |= DP_PORT_WIDTH(1);
123 DP |= DP_LINK_TRAIN_PAT_1;
124
125 if (display->platform.cherryview)
126 DP |= DP_PIPE_SEL_CHV(pipe);
127 else
128 DP |= DP_PIPE_SEL(pipe);
129
130 pll_enabled = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
131
132 /*
133 * The DPLL for the pipe must be enabled for this to work.
134 * So enable temporarily it if it's not already enabled.
135 */
136 if (!pll_enabled) {
137 release_cl_override = display->platform.cherryview &&
138 !chv_phy_powergate_ch(display, phy, ch, override: true);
139
140 if (vlv_force_pll_on(display, pipe, dpll: vlv_get_dpll(display))) {
141 drm_err(display->drm,
142 "Failed to force on PLL for pipe %c!\n",
143 pipe_name(pipe));
144 return;
145 }
146 }
147
148 /*
149 * Similar magic as in intel_dp_enable_port().
150 * We _must_ do this port enable + disable trick
151 * to make this power sequencer lock onto the port.
152 * Otherwise even VDD force bit won't work.
153 */
154 intel_de_write(display, reg: intel_dp->output_reg, val: DP);
155 intel_de_posting_read(display, reg: intel_dp->output_reg);
156
157 intel_de_write(display, reg: intel_dp->output_reg, val: DP | DP_PORT_EN);
158 intel_de_posting_read(display, reg: intel_dp->output_reg);
159
160 intel_de_write(display, reg: intel_dp->output_reg, val: DP & ~DP_PORT_EN);
161 intel_de_posting_read(display, reg: intel_dp->output_reg);
162
163 if (!pll_enabled) {
164 vlv_force_pll_off(display, pipe);
165
166 if (release_cl_override)
167 chv_phy_powergate_ch(display, phy, ch, override: false);
168 }
169}
170
171static enum pipe vlv_find_free_pps(struct intel_display *display)
172{
173 struct intel_encoder *encoder;
174 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
175
176 /*
177 * We don't have power sequencer currently.
178 * Pick one that's not used by other ports.
179 */
180 for_each_intel_dp(display->drm, encoder) {
181 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
182
183 if (encoder->type == INTEL_OUTPUT_EDP) {
184 drm_WARN_ON(display->drm,
185 intel_dp->pps.vlv_active_pipe != INVALID_PIPE &&
186 intel_dp->pps.vlv_active_pipe !=
187 intel_dp->pps.vlv_pps_pipe);
188
189 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE)
190 pipes &= ~(1 << intel_dp->pps.vlv_pps_pipe);
191 } else {
192 drm_WARN_ON(display->drm,
193 intel_dp->pps.vlv_pps_pipe != INVALID_PIPE);
194
195 if (intel_dp->pps.vlv_active_pipe != INVALID_PIPE)
196 pipes &= ~(1 << intel_dp->pps.vlv_active_pipe);
197 }
198 }
199
200 if (pipes == 0)
201 return INVALID_PIPE;
202
203 return ffs(pipes) - 1;
204}
205
206static enum pipe
207vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
208{
209 struct intel_display *display = to_intel_display(intel_dp);
210 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
211 enum pipe pipe;
212
213 lockdep_assert_held(&display->pps.mutex);
214
215 /* We should never land here with regular DP ports */
216 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
217
218 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE &&
219 intel_dp->pps.vlv_active_pipe != intel_dp->pps.vlv_pps_pipe);
220
221 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE)
222 return intel_dp->pps.vlv_pps_pipe;
223
224 pipe = vlv_find_free_pps(display);
225
226 /*
227 * Didn't find one. This should not happen since there
228 * are two power sequencers and up to two eDP ports.
229 */
230 if (drm_WARN_ON(display->drm, pipe == INVALID_PIPE))
231 pipe = PIPE_A;
232
233 vlv_steal_power_sequencer(display, pipe);
234 intel_dp->pps.vlv_pps_pipe = pipe;
235
236 drm_dbg_kms(display->drm,
237 "picked %s for [ENCODER:%d:%s]\n",
238 pps_name(intel_dp),
239 dig_port->base.base.base.id, dig_port->base.base.name);
240
241 /* init power sequencer on this pipe and port */
242 pps_init_delays(intel_dp);
243 pps_init_registers(intel_dp, force_disable_vdd: true);
244
245 /*
246 * Even vdd force doesn't work until we've made
247 * the power sequencer lock in on the port.
248 */
249 vlv_power_sequencer_kick(intel_dp);
250
251 return intel_dp->pps.vlv_pps_pipe;
252}
253
254static int
255bxt_power_sequencer_idx(struct intel_dp *intel_dp)
256{
257 struct intel_display *display = to_intel_display(intel_dp);
258 int pps_idx = intel_dp->pps.pps_idx;
259
260 lockdep_assert_held(&display->pps.mutex);
261
262 /* We should never land here with regular DP ports */
263 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
264
265 if (!intel_dp->pps.bxt_pps_reset)
266 return pps_idx;
267
268 intel_dp->pps.bxt_pps_reset = false;
269
270 /*
271 * Only the HW needs to be reprogrammed, the SW state is fixed and
272 * has been setup during connector init.
273 */
274 pps_init_registers(intel_dp, force_disable_vdd: false);
275
276 return pps_idx;
277}
278
279typedef bool (*pps_check)(struct intel_display *display, int pps_idx);
280
281static bool pps_has_pp_on(struct intel_display *display, int pps_idx)
282{
283 return intel_de_read(display, PP_STATUS(display, pps_idx)) & PP_ON;
284}
285
286static bool pps_has_vdd_on(struct intel_display *display, int pps_idx)
287{
288 return intel_de_read(display, PP_CONTROL(display, pps_idx)) & EDP_FORCE_VDD;
289}
290
291static bool pps_any(struct intel_display *display, int pps_idx)
292{
293 return true;
294}
295
296static enum pipe
297vlv_initial_pps_pipe(struct intel_display *display,
298 enum port port, pps_check check)
299{
300 enum pipe pipe;
301
302 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
303 u32 port_sel = intel_de_read(display,
304 PP_ON_DELAYS(display, pipe)) &
305 PANEL_PORT_SELECT_MASK;
306
307 if (port_sel != PANEL_PORT_SELECT_VLV(port))
308 continue;
309
310 if (!check(display, pipe))
311 continue;
312
313 return pipe;
314 }
315
316 return INVALID_PIPE;
317}
318
319static void
320vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
321{
322 struct intel_display *display = to_intel_display(intel_dp);
323 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
324 enum port port = dig_port->base.port;
325
326 lockdep_assert_held(&display->pps.mutex);
327
328 /* try to find a pipe with this port selected */
329 /* first pick one where the panel is on */
330 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
331 check: pps_has_pp_on);
332 /* didn't find one? pick one where vdd is on */
333 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
334 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
335 check: pps_has_vdd_on);
336 /* didn't find one? pick one with just the correct port */
337 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
338 intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
339 check: pps_any);
340
341 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
342 if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) {
343 drm_dbg_kms(display->drm,
344 "[ENCODER:%d:%s] no initial power sequencer\n",
345 dig_port->base.base.base.id, dig_port->base.base.name);
346 return;
347 }
348
349 drm_dbg_kms(display->drm,
350 "[ENCODER:%d:%s] initial power sequencer: %s\n",
351 dig_port->base.base.base.id, dig_port->base.base.name,
352 pps_name(intel_dp));
353}
354
355static int intel_num_pps(struct intel_display *display)
356{
357 if (display->platform.valleyview || display->platform.cherryview)
358 return 2;
359
360 if (display->platform.geminilake || display->platform.broxton)
361 return 2;
362
363 if (INTEL_PCH_TYPE(display) >= PCH_MTL)
364 return 2;
365
366 if (INTEL_PCH_TYPE(display) >= PCH_DG1)
367 return 1;
368
369 if (INTEL_PCH_TYPE(display) >= PCH_ICP)
370 return 2;
371
372 return 1;
373}
374
375static bool intel_pps_is_valid(struct intel_dp *intel_dp)
376{
377 struct intel_display *display = to_intel_display(intel_dp);
378
379 if (intel_dp->pps.pps_idx == 1 &&
380 INTEL_PCH_TYPE(display) >= PCH_ICP &&
381 INTEL_PCH_TYPE(display) <= PCH_ADP)
382 return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
383
384 return true;
385}
386
387static int
388bxt_initial_pps_idx(struct intel_display *display, pps_check check)
389{
390 int pps_idx, pps_num = intel_num_pps(display);
391
392 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
393 if (check(display, pps_idx))
394 return pps_idx;
395 }
396
397 return -1;
398}
399
400static bool
401pps_initial_setup(struct intel_dp *intel_dp)
402{
403 struct intel_display *display = to_intel_display(intel_dp);
404 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
405 struct intel_connector *connector = intel_dp->attached_connector;
406
407 lockdep_assert_held(&display->pps.mutex);
408
409 if (display->platform.valleyview || display->platform.cherryview) {
410 vlv_initial_power_sequencer_setup(intel_dp);
411 return true;
412 }
413
414 /* first ask the VBT */
415 if (intel_num_pps(display) > 1)
416 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
417 else
418 intel_dp->pps.pps_idx = 0;
419
420 if (drm_WARN_ON(display->drm, intel_dp->pps.pps_idx >= intel_num_pps(display)))
421 intel_dp->pps.pps_idx = -1;
422
423 /* VBT wasn't parsed yet? pick one where the panel is on */
424 if (intel_dp->pps.pps_idx < 0)
425 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, check: pps_has_pp_on);
426 /* didn't find one? pick one where vdd is on */
427 if (intel_dp->pps.pps_idx < 0)
428 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, check: pps_has_vdd_on);
429 /* didn't find one? pick any */
430 if (intel_dp->pps.pps_idx < 0) {
431 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, check: pps_any);
432
433 drm_dbg_kms(display->drm,
434 "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n",
435 encoder->base.base.id, encoder->base.name,
436 pps_name(intel_dp));
437 } else {
438 drm_dbg_kms(display->drm,
439 "[ENCODER:%d:%s] initial power sequencer: %s\n",
440 encoder->base.base.id, encoder->base.name,
441 pps_name(intel_dp));
442 }
443
444 return intel_pps_is_valid(intel_dp);
445}
446
447void vlv_pps_reset_all(struct intel_display *display)
448{
449 struct intel_encoder *encoder;
450
451 if (!HAS_DISPLAY(display))
452 return;
453
454 /*
455 * We can't grab pps_mutex here due to deadlock with power_domain
456 * mutex when power_domain functions are called while holding pps_mutex.
457 * That also means that in order to use vlv_pps_pipe the code needs to
458 * hold both a power domain reference and pps_mutex, and the power domain
459 * reference get/put must be done while _not_ holding pps_mutex.
460 * pps_{lock,unlock}() do these steps in the correct order, so one
461 * should use them always.
462 */
463
464 for_each_intel_dp(display->drm, encoder) {
465 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
466
467 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
468
469 if (encoder->type == INTEL_OUTPUT_EDP)
470 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
471 }
472}
473
474void bxt_pps_reset_all(struct intel_display *display)
475{
476 struct intel_encoder *encoder;
477
478 if (!HAS_DISPLAY(display))
479 return;
480
481 /* See vlv_pps_reset_all() for why we can't grab pps_mutex here. */
482
483 for_each_intel_dp(display->drm, encoder) {
484 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
485
486 if (encoder->type == INTEL_OUTPUT_EDP)
487 intel_dp->pps.bxt_pps_reset = true;
488 }
489}
490
491struct pps_registers {
492 i915_reg_t pp_ctrl;
493 i915_reg_t pp_stat;
494 i915_reg_t pp_on;
495 i915_reg_t pp_off;
496 i915_reg_t pp_div;
497};
498
499static void intel_pps_get_registers(struct intel_dp *intel_dp,
500 struct pps_registers *regs)
501{
502 struct intel_display *display = to_intel_display(intel_dp);
503 int pps_idx;
504
505 memset(s: regs, c: 0, n: sizeof(*regs));
506
507 if (display->platform.valleyview || display->platform.cherryview)
508 pps_idx = vlv_power_sequencer_pipe(intel_dp);
509 else if (display->platform.geminilake || display->platform.broxton)
510 pps_idx = bxt_power_sequencer_idx(intel_dp);
511 else
512 pps_idx = intel_dp->pps.pps_idx;
513
514 regs->pp_ctrl = PP_CONTROL(display, pps_idx);
515 regs->pp_stat = PP_STATUS(display, pps_idx);
516 regs->pp_on = PP_ON_DELAYS(display, pps_idx);
517 regs->pp_off = PP_OFF_DELAYS(display, pps_idx);
518
519 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
520 if (display->platform.geminilake || display->platform.broxton ||
521 INTEL_PCH_TYPE(display) >= PCH_CNP)
522 regs->pp_div = INVALID_MMIO_REG;
523 else
524 regs->pp_div = PP_DIVISOR(display, pps_idx);
525}
526
527static i915_reg_t
528_pp_ctrl_reg(struct intel_dp *intel_dp)
529{
530 struct pps_registers regs;
531
532 intel_pps_get_registers(intel_dp, regs: &regs);
533
534 return regs.pp_ctrl;
535}
536
537static i915_reg_t
538_pp_stat_reg(struct intel_dp *intel_dp)
539{
540 struct pps_registers regs;
541
542 intel_pps_get_registers(intel_dp, regs: &regs);
543
544 return regs.pp_stat;
545}
546
547static bool edp_have_panel_power(struct intel_dp *intel_dp)
548{
549 struct intel_display *display = to_intel_display(intel_dp);
550
551 lockdep_assert_held(&display->pps.mutex);
552
553 if ((display->platform.valleyview || display->platform.cherryview) &&
554 intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
555 return false;
556
557 return (intel_de_read(display, reg: _pp_stat_reg(intel_dp)) & PP_ON) != 0;
558}
559
560static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
561{
562 struct intel_display *display = to_intel_display(intel_dp);
563
564 lockdep_assert_held(&display->pps.mutex);
565
566 if ((display->platform.valleyview || display->platform.cherryview) &&
567 intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
568 return false;
569
570 return intel_de_read(display, reg: _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
571}
572
573void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
574{
575 struct intel_display *display = to_intel_display(intel_dp);
576 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
577
578 if (!intel_dp_is_edp(intel_dp))
579 return;
580
581 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
582 drm_WARN(display->drm, 1,
583 "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n",
584 dig_port->base.base.base.id, dig_port->base.base.name,
585 pps_name(intel_dp));
586 drm_dbg_kms(display->drm,
587 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
588 dig_port->base.base.base.id, dig_port->base.base.name,
589 pps_name(intel_dp),
590 intel_de_read(display, _pp_stat_reg(intel_dp)),
591 intel_de_read(display, _pp_ctrl_reg(intel_dp)));
592 }
593}
594
595#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
596#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
597
598#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
599#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
600
601#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
602#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
603
604static void intel_pps_verify_state(struct intel_dp *intel_dp);
605
606static void wait_panel_status(struct intel_dp *intel_dp,
607 u32 mask, u32 value)
608{
609 struct intel_display *display = to_intel_display(intel_dp);
610 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
611 i915_reg_t pp_stat_reg, pp_ctrl_reg;
612 int ret;
613 u32 val;
614
615 lockdep_assert_held(&display->pps.mutex);
616
617 intel_pps_verify_state(intel_dp);
618
619 pp_stat_reg = _pp_stat_reg(intel_dp);
620 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
621
622 drm_dbg_kms(display->drm,
623 "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
624 dig_port->base.base.base.id, dig_port->base.base.name,
625 pps_name(intel_dp),
626 mask, value,
627 intel_de_read(display, pp_stat_reg),
628 intel_de_read(display, pp_ctrl_reg));
629
630 ret = poll_timeout_us(val = intel_de_read(display, pp_stat_reg),
631 (val & mask) == value,
632 10 * 1000, 5000 * 1000, true);
633 if (ret) {
634 drm_err(display->drm,
635 "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
636 dig_port->base.base.base.id, dig_port->base.base.name,
637 pps_name(intel_dp),
638 intel_de_read(display, pp_stat_reg),
639 intel_de_read(display, pp_ctrl_reg));
640 return;
641 }
642
643 drm_dbg_kms(display->drm, "Wait complete\n");
644}
645
646static void wait_panel_on(struct intel_dp *intel_dp)
647{
648 struct intel_display *display = to_intel_display(intel_dp);
649 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
650
651 drm_dbg_kms(display->drm,
652 "[ENCODER:%d:%s] %s wait for panel power on\n",
653 dig_port->base.base.base.id, dig_port->base.base.name,
654 pps_name(intel_dp));
655 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
656}
657
658static void wait_panel_off(struct intel_dp *intel_dp)
659{
660 struct intel_display *display = to_intel_display(intel_dp);
661 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
662
663 drm_dbg_kms(display->drm,
664 "[ENCODER:%d:%s] %s wait for panel power off time\n",
665 dig_port->base.base.base.id, dig_port->base.base.name,
666 pps_name(intel_dp));
667 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
668}
669
670static void wait_panel_power_cycle(struct intel_dp *intel_dp)
671{
672 struct intel_display *display = to_intel_display(intel_dp);
673 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
674 ktime_t panel_power_on_time;
675 s64 panel_power_off_duration, remaining;
676
677 /* take the difference of current time and panel power off time
678 * and then make panel wait for power_cycle if needed. */
679 panel_power_on_time = ktime_get_boottime();
680 panel_power_off_duration = ktime_ms_delta(later: panel_power_on_time, earlier: intel_dp->pps.panel_power_off_time);
681
682 remaining = max(0, intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
683
684 drm_dbg_kms(display->drm,
685 "[ENCODER:%d:%s] %s wait for panel power cycle (%lld ms remaining)\n",
686 dig_port->base.base.base.id, dig_port->base.base.name,
687 pps_name(intel_dp), remaining);
688
689 /* When we disable the VDD override bit last we have to do the manual
690 * wait. */
691 if (remaining)
692 wait_remaining_ms_from_jiffies(timestamp_jiffies: jiffies, to_wait_ms: remaining);
693
694 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
695}
696
697void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
698{
699 intel_wakeref_t wakeref;
700
701 if (!intel_dp_is_edp(intel_dp))
702 return;
703
704 with_intel_pps_lock(intel_dp, wakeref)
705 wait_panel_power_cycle(intel_dp);
706}
707
708static void wait_backlight_on(struct intel_dp *intel_dp)
709{
710 wait_remaining_ms_from_jiffies(timestamp_jiffies: intel_dp->pps.last_power_on,
711 to_wait_ms: intel_dp->pps.backlight_on_delay);
712}
713
714static void edp_wait_backlight_off(struct intel_dp *intel_dp)
715{
716 wait_remaining_ms_from_jiffies(timestamp_jiffies: intel_dp->pps.last_backlight_off,
717 to_wait_ms: intel_dp->pps.backlight_off_delay);
718}
719
720/* Read the current pp_control value, unlocking the register if it
721 * is locked
722 */
723
724static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
725{
726 struct intel_display *display = to_intel_display(intel_dp);
727 u32 control;
728
729 lockdep_assert_held(&display->pps.mutex);
730
731 control = intel_de_read(display, reg: _pp_ctrl_reg(intel_dp));
732 if (drm_WARN_ON(display->drm, !HAS_DDI(display) &&
733 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
734 control &= ~PANEL_UNLOCK_MASK;
735 control |= PANEL_UNLOCK_REGS;
736 }
737 return control;
738}
739
740/*
741 * Must be paired with intel_pps_vdd_off_unlocked().
742 * Must hold pps_mutex around the whole on/off sequence.
743 * Can be nested with intel_pps_vdd_{on,off}() calls.
744 */
745bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
746{
747 struct intel_display *display = to_intel_display(intel_dp);
748 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
749 u32 pp;
750 i915_reg_t pp_stat_reg, pp_ctrl_reg;
751 bool need_to_disable = !intel_dp->pps.want_panel_vdd;
752
753 if (!intel_dp_is_edp(intel_dp))
754 return false;
755
756 lockdep_assert_held(&display->pps.mutex);
757
758 cancel_delayed_work(dwork: &intel_dp->pps.panel_vdd_work);
759 intel_dp->pps.want_panel_vdd = true;
760
761 if (edp_have_panel_vdd(intel_dp))
762 return need_to_disable;
763
764 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
765 intel_dp->pps.vdd_wakeref = intel_display_power_get(display,
766 domain: intel_aux_power_domain(dig_port));
767
768 pp_stat_reg = _pp_stat_reg(intel_dp);
769 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
770
771 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
772 dig_port->base.base.base.id, dig_port->base.base.name,
773 pps_name(intel_dp));
774
775 if (!edp_have_panel_power(intel_dp))
776 wait_panel_power_cycle(intel_dp);
777
778 pp = ilk_get_pp_control(intel_dp);
779 pp |= EDP_FORCE_VDD;
780
781 intel_de_write(display, reg: pp_ctrl_reg, val: pp);
782 intel_de_posting_read(display, reg: pp_ctrl_reg);
783 drm_dbg_kms(display->drm,
784 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
785 dig_port->base.base.base.id, dig_port->base.base.name,
786 pps_name(intel_dp),
787 intel_de_read(display, pp_stat_reg),
788 intel_de_read(display, pp_ctrl_reg));
789 /*
790 * If the panel wasn't on, delay before accessing aux channel
791 */
792 if (!edp_have_panel_power(intel_dp)) {
793 drm_dbg_kms(display->drm,
794 "[ENCODER:%d:%s] %s panel power wasn't enabled\n",
795 dig_port->base.base.base.id, dig_port->base.base.name,
796 pps_name(intel_dp));
797 msleep(msecs: intel_dp->pps.panel_power_up_delay);
798 }
799
800 return need_to_disable;
801}
802
803/*
804 * Must be paired with intel_pps_vdd_off() or - to disable
805 * both VDD and panel power - intel_pps_off().
806 * Nested calls to these functions are not allowed since
807 * we drop the lock. Caller must use some higher level
808 * locking to prevent nested calls from other threads.
809 */
810void intel_pps_vdd_on(struct intel_dp *intel_dp)
811{
812 struct intel_display *display = to_intel_display(intel_dp);
813 intel_wakeref_t wakeref;
814 bool vdd;
815
816 if (!intel_dp_is_edp(intel_dp))
817 return;
818
819 vdd = false;
820 with_intel_pps_lock(intel_dp, wakeref)
821 vdd = intel_pps_vdd_on_unlocked(intel_dp);
822 INTEL_DISPLAY_STATE_WARN(display, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
823 dp_to_dig_port(intel_dp)->base.base.base.id,
824 dp_to_dig_port(intel_dp)->base.base.name,
825 pps_name(intel_dp));
826}
827
828static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
829{
830 struct intel_display *display = to_intel_display(intel_dp);
831 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
832 u32 pp;
833 i915_reg_t pp_stat_reg, pp_ctrl_reg;
834
835 lockdep_assert_held(&display->pps.mutex);
836
837 drm_WARN_ON(display->drm, intel_dp->pps.want_panel_vdd);
838
839 if (!edp_have_panel_vdd(intel_dp))
840 return;
841
842 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
843 dig_port->base.base.base.id, dig_port->base.base.name,
844 pps_name(intel_dp));
845
846 pp = ilk_get_pp_control(intel_dp);
847 pp &= ~EDP_FORCE_VDD;
848
849 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
850 pp_stat_reg = _pp_stat_reg(intel_dp);
851
852 intel_de_write(display, reg: pp_ctrl_reg, val: pp);
853 intel_de_posting_read(display, reg: pp_ctrl_reg);
854
855 /* Make sure sequencer is idle before allowing subsequent activity */
856 drm_dbg_kms(display->drm,
857 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
858 dig_port->base.base.base.id, dig_port->base.base.name,
859 pps_name(intel_dp),
860 intel_de_read(display, pp_stat_reg),
861 intel_de_read(display, pp_ctrl_reg));
862
863 if ((pp & PANEL_POWER_ON) == 0) {
864 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
865 intel_dp_invalidate_source_oui(intel_dp);
866 }
867
868 intel_display_power_put(display,
869 domain: intel_aux_power_domain(dig_port),
870 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
871}
872
873void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
874{
875 intel_wakeref_t wakeref;
876
877 if (!intel_dp_is_edp(intel_dp))
878 return;
879
880 cancel_delayed_work_sync(dwork: &intel_dp->pps.panel_vdd_work);
881 /*
882 * vdd might still be enabled due to the delayed vdd off.
883 * Make sure vdd is actually turned off here.
884 */
885 with_intel_pps_lock(intel_dp, wakeref)
886 intel_pps_vdd_off_sync_unlocked(intel_dp);
887}
888
889static void edp_panel_vdd_work(struct work_struct *__work)
890{
891 struct intel_pps *pps = container_of(to_delayed_work(__work),
892 struct intel_pps, panel_vdd_work);
893 struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
894 intel_wakeref_t wakeref;
895
896 with_intel_pps_lock(intel_dp, wakeref) {
897 if (!intel_dp->pps.want_panel_vdd)
898 intel_pps_vdd_off_sync_unlocked(intel_dp);
899 }
900}
901
902static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
903{
904 struct intel_display *display = to_intel_display(intel_dp);
905 unsigned long delay;
906
907 /*
908 * We may not yet know the real power sequencing delays,
909 * so keep VDD enabled until we're done with init.
910 */
911 if (intel_dp->pps.initializing)
912 return;
913
914 /*
915 * Queue the timer to fire a long time from now (relative to the power
916 * down delay) to keep the panel power up across a sequence of
917 * operations.
918 */
919 delay = msecs_to_jiffies(m: intel_dp->pps.panel_power_cycle_delay * 5);
920 queue_delayed_work(wq: display->wq.unordered,
921 dwork: &intel_dp->pps.panel_vdd_work, delay);
922}
923
924/*
925 * Must be paired with edp_panel_vdd_on().
926 * Must hold pps_mutex around the whole on/off sequence.
927 * Can be nested with intel_pps_vdd_{on,off}() calls.
928 */
929void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
930{
931 struct intel_display *display = to_intel_display(intel_dp);
932
933 if (!intel_dp_is_edp(intel_dp))
934 return;
935
936 lockdep_assert_held(&display->pps.mutex);
937
938 INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd,
939 "[ENCODER:%d:%s] %s VDD not forced on",
940 dp_to_dig_port(intel_dp)->base.base.base.id,
941 dp_to_dig_port(intel_dp)->base.base.name,
942 pps_name(intel_dp));
943
944 intel_dp->pps.want_panel_vdd = false;
945
946 if (sync)
947 intel_pps_vdd_off_sync_unlocked(intel_dp);
948 else
949 edp_panel_vdd_schedule_off(intel_dp);
950}
951
952void intel_pps_vdd_off(struct intel_dp *intel_dp)
953{
954 intel_wakeref_t wakeref;
955
956 if (!intel_dp_is_edp(intel_dp))
957 return;
958
959 with_intel_pps_lock(intel_dp, wakeref)
960 intel_pps_vdd_off_unlocked(intel_dp, sync: false);
961}
962
963void intel_pps_on_unlocked(struct intel_dp *intel_dp)
964{
965 struct intel_display *display = to_intel_display(intel_dp);
966 u32 pp;
967 i915_reg_t pp_ctrl_reg;
968
969 lockdep_assert_held(&display->pps.mutex);
970
971 if (!intel_dp_is_edp(intel_dp))
972 return;
973
974 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
975 dp_to_dig_port(intel_dp)->base.base.base.id,
976 dp_to_dig_port(intel_dp)->base.base.name,
977 pps_name(intel_dp));
978
979 if (drm_WARN(display->drm, edp_have_panel_power(intel_dp),
980 "[ENCODER:%d:%s] %s panel power already on\n",
981 dp_to_dig_port(intel_dp)->base.base.base.id,
982 dp_to_dig_port(intel_dp)->base.base.name,
983 pps_name(intel_dp)))
984 return;
985
986 wait_panel_power_cycle(intel_dp);
987
988 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
989 pp = ilk_get_pp_control(intel_dp);
990 if (display->platform.ironlake) {
991 /* ILK workaround: disable reset around power sequence */
992 pp &= ~PANEL_POWER_RESET;
993 intel_de_write(display, reg: pp_ctrl_reg, val: pp);
994 intel_de_posting_read(display, reg: pp_ctrl_reg);
995 }
996
997 /*
998 * WA: 22019252566
999 * Disable DPLS gating around power sequence.
1000 */
1001 if (IS_DISPLAY_VER(display, 13, 14))
1002 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1003 clear: 0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
1004
1005 pp |= PANEL_POWER_ON;
1006 if (!display->platform.ironlake)
1007 pp |= PANEL_POWER_RESET;
1008
1009 intel_de_write(display, reg: pp_ctrl_reg, val: pp);
1010 intel_de_posting_read(display, reg: pp_ctrl_reg);
1011
1012 wait_panel_on(intel_dp);
1013 intel_dp->pps.last_power_on = jiffies;
1014
1015 if (IS_DISPLAY_VER(display, 13, 14))
1016 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1017 PCH_DPLSUNIT_CLOCK_GATE_DISABLE, set: 0);
1018
1019 if (display->platform.ironlake) {
1020 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1021 intel_de_write(display, reg: pp_ctrl_reg, val: pp);
1022 intel_de_posting_read(display, reg: pp_ctrl_reg);
1023 }
1024}
1025
1026void intel_pps_on(struct intel_dp *intel_dp)
1027{
1028 intel_wakeref_t wakeref;
1029
1030 if (!intel_dp_is_edp(intel_dp))
1031 return;
1032
1033 with_intel_pps_lock(intel_dp, wakeref)
1034 intel_pps_on_unlocked(intel_dp);
1035}
1036
1037void intel_pps_off_unlocked(struct intel_dp *intel_dp)
1038{
1039 struct intel_display *display = to_intel_display(intel_dp);
1040 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1041 u32 pp;
1042 i915_reg_t pp_ctrl_reg;
1043
1044 lockdep_assert_held(&display->pps.mutex);
1045
1046 if (!intel_dp_is_edp(intel_dp))
1047 return;
1048
1049 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
1050 dig_port->base.base.base.id, dig_port->base.base.name,
1051 pps_name(intel_dp));
1052
1053 drm_WARN(display->drm, !intel_dp->pps.want_panel_vdd,
1054 "[ENCODER:%d:%s] %s need VDD to turn off panel\n",
1055 dig_port->base.base.base.id, dig_port->base.base.name,
1056 pps_name(intel_dp));
1057
1058 pp = ilk_get_pp_control(intel_dp);
1059 /* We need to switch off panel power _and_ force vdd, for otherwise some
1060 * panels get very unhappy and cease to work. */
1061 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1062 EDP_BLC_ENABLE);
1063
1064 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1065
1066 intel_dp->pps.want_panel_vdd = false;
1067
1068 intel_de_write(display, reg: pp_ctrl_reg, val: pp);
1069 intel_de_posting_read(display, reg: pp_ctrl_reg);
1070
1071 wait_panel_off(intel_dp);
1072 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
1073
1074 intel_dp_invalidate_source_oui(intel_dp);
1075
1076 /* We got a reference when we enabled the VDD. */
1077 intel_display_power_put(display,
1078 domain: intel_aux_power_domain(dig_port),
1079 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
1080}
1081
1082void intel_pps_off(struct intel_dp *intel_dp)
1083{
1084 intel_wakeref_t wakeref;
1085
1086 if (!intel_dp_is_edp(intel_dp))
1087 return;
1088
1089 with_intel_pps_lock(intel_dp, wakeref)
1090 intel_pps_off_unlocked(intel_dp);
1091}
1092
1093/* Enable backlight in the panel power control. */
1094void intel_pps_backlight_on(struct intel_dp *intel_dp)
1095{
1096 struct intel_display *display = to_intel_display(intel_dp);
1097 intel_wakeref_t wakeref;
1098
1099 /*
1100 * If we enable the backlight right away following a panel power
1101 * on, we may see slight flicker as the panel syncs with the eDP
1102 * link. So delay a bit to make sure the image is solid before
1103 * allowing it to appear.
1104 */
1105 wait_backlight_on(intel_dp);
1106
1107 with_intel_pps_lock(intel_dp, wakeref) {
1108 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1109 u32 pp;
1110
1111 pp = ilk_get_pp_control(intel_dp);
1112 pp |= EDP_BLC_ENABLE;
1113
1114 intel_de_write(display, reg: pp_ctrl_reg, val: pp);
1115 intel_de_posting_read(display, reg: pp_ctrl_reg);
1116 }
1117}
1118
1119/* Disable backlight in the panel power control. */
1120void intel_pps_backlight_off(struct intel_dp *intel_dp)
1121{
1122 struct intel_display *display = to_intel_display(intel_dp);
1123 intel_wakeref_t wakeref;
1124
1125 if (!intel_dp_is_edp(intel_dp))
1126 return;
1127
1128 with_intel_pps_lock(intel_dp, wakeref) {
1129 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1130 u32 pp;
1131
1132 pp = ilk_get_pp_control(intel_dp);
1133 pp &= ~EDP_BLC_ENABLE;
1134
1135 intel_de_write(display, reg: pp_ctrl_reg, val: pp);
1136 intel_de_posting_read(display, reg: pp_ctrl_reg);
1137 }
1138
1139 intel_dp->pps.last_backlight_off = jiffies;
1140 edp_wait_backlight_off(intel_dp);
1141}
1142
1143/*
1144 * Hook for controlling the panel power control backlight through the bl_power
1145 * sysfs attribute. Take care to handle multiple calls.
1146 */
1147void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
1148{
1149 struct intel_display *display = to_intel_display(connector);
1150 struct intel_dp *intel_dp = intel_attached_dp(connector);
1151 intel_wakeref_t wakeref;
1152 bool is_enabled;
1153
1154 is_enabled = false;
1155 with_intel_pps_lock(intel_dp, wakeref)
1156 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1157 if (is_enabled == enable)
1158 return;
1159
1160 drm_dbg_kms(display->drm, "panel power control backlight %s\n",
1161 str_enable_disable(enable));
1162
1163 if (enable)
1164 intel_pps_backlight_on(intel_dp);
1165 else
1166 intel_pps_backlight_off(intel_dp);
1167}
1168
1169static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
1170{
1171 struct intel_display *display = to_intel_display(intel_dp);
1172 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1173 enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
1174 i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe);
1175
1176 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
1177
1178 if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
1179 return;
1180
1181 intel_pps_vdd_off_sync_unlocked(intel_dp);
1182
1183 /*
1184 * VLV seems to get confused when multiple power sequencers
1185 * have the same port selected (even if only one has power/vdd
1186 * enabled). The failure manifests as vlv_wait_port_ready() failing
1187 * CHV on the other hand doesn't seem to mind having the same port
1188 * selected in multiple power sequencers, but let's clear the
1189 * port select always when logically disconnecting a power sequencer
1190 * from a port.
1191 */
1192 drm_dbg_kms(display->drm,
1193 "detaching %s from [ENCODER:%d:%s]\n",
1194 pps_name(intel_dp),
1195 dig_port->base.base.base.id, dig_port->base.base.name);
1196 intel_de_write(display, reg: pp_on_reg, val: 0);
1197 intel_de_posting_read(display, reg: pp_on_reg);
1198
1199 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
1200}
1201
1202static void vlv_steal_power_sequencer(struct intel_display *display,
1203 enum pipe pipe)
1204{
1205 struct intel_encoder *encoder;
1206
1207 lockdep_assert_held(&display->pps.mutex);
1208
1209 for_each_intel_dp(display->drm, encoder) {
1210 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1211
1212 drm_WARN(display->drm, intel_dp->pps.vlv_active_pipe == pipe,
1213 "stealing PPS %c from active [ENCODER:%d:%s]\n",
1214 pipe_name(pipe), encoder->base.base.id,
1215 encoder->base.name);
1216
1217 if (intel_dp->pps.vlv_pps_pipe != pipe)
1218 continue;
1219
1220 drm_dbg_kms(display->drm,
1221 "stealing PPS %c from [ENCODER:%d:%s]\n",
1222 pipe_name(pipe), encoder->base.base.id,
1223 encoder->base.name);
1224
1225 /* make sure vdd is off before we steal it */
1226 vlv_detach_power_sequencer(intel_dp);
1227 }
1228}
1229
1230static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
1231{
1232 struct intel_display *display = to_intel_display(intel_dp);
1233 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1234 enum pipe pipe;
1235
1236 if (g4x_dp_port_enabled(display, dp_reg: intel_dp->output_reg,
1237 port: encoder->port, pipe: &pipe))
1238 return pipe;
1239
1240 return INVALID_PIPE;
1241}
1242
1243/* Call on all DP, not just eDP */
1244void vlv_pps_pipe_init(struct intel_dp *intel_dp)
1245{
1246 intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
1247 intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp);
1248}
1249
1250/* Call on all DP, not just eDP */
1251void vlv_pps_pipe_reset(struct intel_dp *intel_dp)
1252{
1253 intel_wakeref_t wakeref;
1254
1255 with_intel_pps_lock(intel_dp, wakeref)
1256 intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp);
1257}
1258
1259enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp)
1260{
1261 enum pipe pipe;
1262
1263 /*
1264 * Figure out the current pipe for the initial backlight setup. If the
1265 * current pipe isn't valid, try the PPS pipe, and if that fails just
1266 * assume pipe A.
1267 */
1268 pipe = vlv_active_pipe(intel_dp);
1269
1270 if (pipe != PIPE_A && pipe != PIPE_B)
1271 pipe = intel_dp->pps.vlv_pps_pipe;
1272
1273 if (pipe != PIPE_A && pipe != PIPE_B)
1274 pipe = PIPE_A;
1275
1276 return pipe;
1277}
1278
1279/* Call on all DP, not just eDP */
1280void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder,
1281 const struct intel_crtc_state *crtc_state)
1282{
1283 struct intel_display *display = to_intel_display(encoder);
1284 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1285 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1286
1287 lockdep_assert_held(&display->pps.mutex);
1288
1289 drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
1290
1291 if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE &&
1292 intel_dp->pps.vlv_pps_pipe != crtc->pipe) {
1293 /*
1294 * If another power sequencer was being used on this
1295 * port previously make sure to turn off vdd there while
1296 * we still have control of it.
1297 */
1298 vlv_detach_power_sequencer(intel_dp);
1299 }
1300
1301 /*
1302 * We may be stealing the power
1303 * sequencer from another port.
1304 */
1305 vlv_steal_power_sequencer(display, pipe: crtc->pipe);
1306
1307 intel_dp->pps.vlv_active_pipe = crtc->pipe;
1308
1309 if (!intel_dp_is_edp(intel_dp))
1310 return;
1311
1312 /* now it's all ours */
1313 intel_dp->pps.vlv_pps_pipe = crtc->pipe;
1314
1315 drm_dbg_kms(display->drm,
1316 "initializing %s for [ENCODER:%d:%s]\n",
1317 pps_name(intel_dp),
1318 encoder->base.base.id, encoder->base.name);
1319
1320 /* init power sequencer on this pipe and port */
1321 pps_init_delays(intel_dp);
1322 pps_init_registers(intel_dp, force_disable_vdd: true);
1323}
1324
1325/* Call on all DP, not just eDP */
1326void vlv_pps_port_disable(struct intel_encoder *encoder,
1327 const struct intel_crtc_state *crtc_state)
1328{
1329 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1330
1331 intel_wakeref_t wakeref;
1332
1333 with_intel_pps_lock(intel_dp, wakeref)
1334 intel_dp->pps.vlv_active_pipe = INVALID_PIPE;
1335}
1336
1337static void pps_vdd_init(struct intel_dp *intel_dp)
1338{
1339 struct intel_display *display = to_intel_display(intel_dp);
1340 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1341
1342 lockdep_assert_held(&display->pps.mutex);
1343
1344 if (!edp_have_panel_vdd(intel_dp))
1345 return;
1346
1347 /*
1348 * The VDD bit needs a power domain reference, so if the bit is
1349 * already enabled when we boot or resume, grab this reference and
1350 * schedule a vdd off, so we don't hold on to the reference
1351 * indefinitely.
1352 */
1353 drm_dbg_kms(display->drm,
1354 "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n",
1355 dig_port->base.base.base.id, dig_port->base.base.name,
1356 pps_name(intel_dp));
1357 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
1358 intel_dp->pps.vdd_wakeref = intel_display_power_get(display,
1359 domain: intel_aux_power_domain(dig_port));
1360}
1361
1362bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
1363{
1364 intel_wakeref_t wakeref;
1365 bool have_power = false;
1366
1367 with_intel_pps_lock(intel_dp, wakeref) {
1368 have_power = edp_have_panel_power(intel_dp) ||
1369 edp_have_panel_vdd(intel_dp);
1370 }
1371
1372 return have_power;
1373}
1374
1375static void pps_init_timestamps(struct intel_dp *intel_dp)
1376{
1377 /*
1378 * Initialize panel power off time to 0, assuming panel power could have
1379 * been toggled between kernel boot and now only by a previously loaded
1380 * and removed i915, which has already ensured sufficient power off
1381 * delay at module remove.
1382 */
1383 intel_dp->pps.panel_power_off_time = 0;
1384 intel_dp->pps.last_power_on = jiffies;
1385 intel_dp->pps.last_backlight_off = jiffies;
1386}
1387
1388static void
1389intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct intel_pps_delays *seq)
1390{
1391 struct intel_display *display = to_intel_display(intel_dp);
1392 u32 pp_on, pp_off, pp_ctl, power_cycle_delay;
1393 struct pps_registers regs;
1394
1395 intel_pps_get_registers(intel_dp, regs: &regs);
1396
1397 pp_ctl = ilk_get_pp_control(intel_dp);
1398
1399 /* Ensure PPS is unlocked */
1400 if (!HAS_DDI(display))
1401 intel_de_write(display, reg: regs.pp_ctrl, val: pp_ctl);
1402
1403 pp_on = intel_de_read(display, reg: regs.pp_on);
1404 pp_off = intel_de_read(display, reg: regs.pp_off);
1405
1406 /* Pull timing values out of registers */
1407 seq->power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1408 seq->backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1409 seq->backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1410 seq->power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1411
1412 if (i915_mmio_reg_valid(regs.pp_div)) {
1413 u32 pp_div;
1414
1415 pp_div = intel_de_read(display, reg: regs.pp_div);
1416
1417 power_cycle_delay = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div);
1418 } else {
1419 power_cycle_delay = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl);
1420 }
1421
1422 /* hardware wants <delay>+1 in 100ms units */
1423 seq->power_cycle = power_cycle_delay ? (power_cycle_delay - 1) * 1000 : 0;
1424}
1425
1426static void
1427intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
1428 const struct intel_pps_delays *seq)
1429{
1430 struct intel_display *display = to_intel_display(intel_dp);
1431
1432 drm_dbg_kms(display->drm,
1433 "%s power_up %d backlight_on %d backlight_off %d power_down %d power_cycle %d\n",
1434 state_name, seq->power_up, seq->backlight_on,
1435 seq->backlight_off, seq->power_down, seq->power_cycle);
1436}
1437
1438static void
1439intel_pps_verify_state(struct intel_dp *intel_dp)
1440{
1441 struct intel_display *display = to_intel_display(intel_dp);
1442 struct intel_pps_delays hw;
1443 struct intel_pps_delays *sw = &intel_dp->pps.pps_delays;
1444
1445 intel_pps_readout_hw_state(intel_dp, seq: &hw);
1446
1447 if (hw.power_up != sw->power_up ||
1448 hw.backlight_on != sw->backlight_on ||
1449 hw.backlight_off != sw->backlight_off ||
1450 hw.power_down != sw->power_down ||
1451 hw.power_cycle != sw->power_cycle) {
1452 drm_err(display->drm, "PPS state mismatch\n");
1453 intel_pps_dump_state(intel_dp, state_name: "sw", seq: sw);
1454 intel_pps_dump_state(intel_dp, state_name: "hw", seq: &hw);
1455 }
1456}
1457
1458static bool pps_delays_valid(struct intel_pps_delays *delays)
1459{
1460 return delays->power_up || delays->backlight_on || delays->backlight_off ||
1461 delays->power_down || delays->power_cycle;
1462}
1463
1464static int msecs_to_pps_units(int msecs)
1465{
1466 /* PPS uses 100us units */
1467 return msecs * 10;
1468}
1469
1470static int pps_units_to_msecs(int val)
1471{
1472 /* PPS uses 100us units */
1473 return DIV_ROUND_UP(val, 10);
1474}
1475
1476static void pps_init_delays_bios(struct intel_dp *intel_dp,
1477 struct intel_pps_delays *bios)
1478{
1479 struct intel_display *display = to_intel_display(intel_dp);
1480
1481 lockdep_assert_held(&display->pps.mutex);
1482
1483 if (!pps_delays_valid(delays: &intel_dp->pps.bios_pps_delays))
1484 intel_pps_readout_hw_state(intel_dp, seq: &intel_dp->pps.bios_pps_delays);
1485
1486 *bios = intel_dp->pps.bios_pps_delays;
1487
1488 intel_pps_dump_state(intel_dp, state_name: "bios", seq: bios);
1489}
1490
1491static void pps_init_delays_vbt(struct intel_dp *intel_dp,
1492 struct intel_pps_delays *vbt)
1493{
1494 struct intel_display *display = to_intel_display(intel_dp);
1495 struct intel_connector *connector = intel_dp->attached_connector;
1496
1497 *vbt = connector->panel.vbt.edp.pps;
1498
1499 if (!pps_delays_valid(delays: vbt))
1500 return;
1501
1502 /*
1503 * On Toshiba Satellite P50-C-18C system the VBT T12 delay
1504 * of 500ms appears to be too short. Occasionally the panel
1505 * just fails to power back on. Increasing the delay to 800ms
1506 * seems sufficient to avoid this problem.
1507 */
1508 if (intel_has_quirk(display, quirk: QUIRK_INCREASE_T12_DELAY)) {
1509 vbt->power_cycle = max_t(u16, vbt->power_cycle, msecs_to_pps_units(1300));
1510 drm_dbg_kms(display->drm,
1511 "Increasing T12 panel delay as per the quirk to %d\n",
1512 vbt->power_cycle);
1513 }
1514
1515 intel_pps_dump_state(intel_dp, state_name: "vbt", seq: vbt);
1516}
1517
1518static void pps_init_delays_spec(struct intel_dp *intel_dp,
1519 struct intel_pps_delays *spec)
1520{
1521 struct intel_display *display = to_intel_display(intel_dp);
1522
1523 lockdep_assert_held(&display->pps.mutex);
1524
1525 /* Upper limits from eDP 1.3 spec */
1526 spec->power_up = msecs_to_pps_units(msecs: 10 + 200); /* T1+T3 */
1527 spec->backlight_on = msecs_to_pps_units(msecs: 50); /* no limit for T8, use T7 instead */
1528 spec->backlight_off = msecs_to_pps_units(msecs: 50); /* no limit for T9, make it symmetric with T8 */
1529 spec->power_down = msecs_to_pps_units(msecs: 500); /* T10 */
1530 spec->power_cycle = msecs_to_pps_units(msecs: 10 + 500); /* T11+T12 */
1531
1532 intel_pps_dump_state(intel_dp, state_name: "spec", seq: spec);
1533}
1534
1535static void pps_init_delays(struct intel_dp *intel_dp)
1536{
1537 struct intel_display *display = to_intel_display(intel_dp);
1538 struct intel_pps_delays cur, vbt, spec,
1539 *final = &intel_dp->pps.pps_delays;
1540
1541 lockdep_assert_held(&display->pps.mutex);
1542
1543 /* already initialized? */
1544 if (pps_delays_valid(delays: final))
1545 return;
1546
1547 pps_init_delays_bios(intel_dp, bios: &cur);
1548 pps_init_delays_vbt(intel_dp, vbt: &vbt);
1549 pps_init_delays_spec(intel_dp, spec: &spec);
1550
1551 /* Use the max of the register settings and vbt. If both are
1552 * unset, fall back to the spec limits. */
1553#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
1554 spec.field : \
1555 max(cur.field, vbt.field))
1556 assign_final(power_up);
1557 assign_final(backlight_on);
1558 assign_final(backlight_off);
1559 assign_final(power_down);
1560 assign_final(power_cycle);
1561#undef assign_final
1562
1563 intel_dp->pps.panel_power_up_delay = pps_units_to_msecs(val: final->power_up);
1564 intel_dp->pps.backlight_on_delay = pps_units_to_msecs(val: final->backlight_on);
1565 intel_dp->pps.backlight_off_delay = pps_units_to_msecs(val: final->backlight_off);
1566 intel_dp->pps.panel_power_down_delay = pps_units_to_msecs(val: final->power_down);
1567 intel_dp->pps.panel_power_cycle_delay = pps_units_to_msecs(val: final->power_cycle);
1568
1569 drm_dbg_kms(display->drm,
1570 "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1571 intel_dp->pps.panel_power_up_delay,
1572 intel_dp->pps.panel_power_down_delay,
1573 intel_dp->pps.panel_power_cycle_delay);
1574
1575 drm_dbg_kms(display->drm, "backlight on delay %d, off delay %d\n",
1576 intel_dp->pps.backlight_on_delay,
1577 intel_dp->pps.backlight_off_delay);
1578
1579 /*
1580 * We override the HW backlight delays to 1 because we do manual waits
1581 * on them. For backlight_on, even BSpec recommends doing it. For
1582 * backlight_off, if we don't do this, we'll end up waiting for the
1583 * backlight off delay twice: once when we do the manual sleep, and
1584 * once when we disable the panel and wait for the PP_STATUS bit to
1585 * become zero.
1586 */
1587 final->backlight_on = 1;
1588 final->backlight_off = 1;
1589
1590 /*
1591 * HW has only a 100msec granularity for power_cycle so round it up
1592 * accordingly.
1593 */
1594 final->power_cycle = roundup(final->power_cycle, msecs_to_pps_units(100));
1595}
1596
1597static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1598{
1599 struct intel_display *display = to_intel_display(intel_dp);
1600 u32 pp_on, pp_off, port_sel = 0;
1601 int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
1602 struct pps_registers regs;
1603 enum port port = dp_to_dig_port(intel_dp)->base.port;
1604 const struct intel_pps_delays *seq = &intel_dp->pps.pps_delays;
1605
1606 lockdep_assert_held(&display->pps.mutex);
1607
1608 intel_pps_get_registers(intel_dp, regs: &regs);
1609
1610 /*
1611 * On some VLV machines the BIOS can leave the VDD
1612 * enabled even on power sequencers which aren't
1613 * hooked up to any port. This would mess up the
1614 * power domain tracking the first time we pick
1615 * one of these power sequencers for use since
1616 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1617 * already on and therefore wouldn't grab the power
1618 * domain reference. Disable VDD first to avoid this.
1619 * This also avoids spuriously turning the VDD on as
1620 * soon as the new power sequencer gets initialized.
1621 */
1622 if (force_disable_vdd) {
1623 u32 pp = ilk_get_pp_control(intel_dp);
1624
1625 drm_WARN(display->drm, pp & PANEL_POWER_ON,
1626 "Panel power already on\n");
1627
1628 if (pp & EDP_FORCE_VDD)
1629 drm_dbg_kms(display->drm,
1630 "VDD already on, disabling first\n");
1631
1632 pp &= ~EDP_FORCE_VDD;
1633
1634 intel_de_write(display, reg: regs.pp_ctrl, val: pp);
1635 }
1636
1637 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->power_up) |
1638 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->backlight_on);
1639 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->backlight_off) |
1640 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->power_down);
1641
1642 /* Haswell doesn't have any port selection bits for the panel
1643 * power sequencer any more. */
1644 if (display->platform.valleyview || display->platform.cherryview) {
1645 port_sel = PANEL_PORT_SELECT_VLV(port);
1646 } else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) {
1647 switch (port) {
1648 case PORT_A:
1649 port_sel = PANEL_PORT_SELECT_DPA;
1650 break;
1651 case PORT_C:
1652 port_sel = PANEL_PORT_SELECT_DPC;
1653 break;
1654 case PORT_D:
1655 port_sel = PANEL_PORT_SELECT_DPD;
1656 break;
1657 default:
1658 MISSING_CASE(port);
1659 break;
1660 }
1661 }
1662
1663 pp_on |= port_sel;
1664
1665 intel_de_write(display, reg: regs.pp_on, val: pp_on);
1666 intel_de_write(display, reg: regs.pp_off, val: pp_off);
1667
1668 /*
1669 * Compute the divisor for the pp clock, simply match the Bspec formula.
1670 */
1671 if (i915_mmio_reg_valid(regs.pp_div))
1672 intel_de_write(display, reg: regs.pp_div,
1673 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK,
1674 (100 * div) / 2 - 1) |
1675 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
1676 DIV_ROUND_UP(seq->power_cycle, 1000) + 1));
1677 else
1678 intel_de_rmw(display, reg: regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
1679 REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
1680 DIV_ROUND_UP(seq->power_cycle, 1000) + 1));
1681
1682 drm_dbg_kms(display->drm,
1683 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1684 intel_de_read(display, regs.pp_on),
1685 intel_de_read(display, regs.pp_off),
1686 i915_mmio_reg_valid(regs.pp_div) ?
1687 intel_de_read(display, regs.pp_div) :
1688 (intel_de_read(display, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1689}
1690
1691void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1692{
1693 struct intel_display *display = to_intel_display(intel_dp);
1694 intel_wakeref_t wakeref;
1695
1696 if (!intel_dp_is_edp(intel_dp))
1697 return;
1698
1699 with_intel_pps_lock(intel_dp, wakeref) {
1700 /*
1701 * Reinit the power sequencer also on the resume path, in case
1702 * BIOS did something nasty with it.
1703 */
1704 if (display->platform.valleyview || display->platform.cherryview)
1705 vlv_initial_power_sequencer_setup(intel_dp);
1706
1707 pps_init_delays(intel_dp);
1708 pps_init_registers(intel_dp, force_disable_vdd: false);
1709 pps_vdd_init(intel_dp);
1710
1711 if (edp_have_panel_vdd(intel_dp))
1712 edp_panel_vdd_schedule_off(intel_dp);
1713 }
1714}
1715
1716bool intel_pps_init(struct intel_dp *intel_dp)
1717{
1718 intel_wakeref_t wakeref;
1719 bool ret;
1720
1721 intel_dp->pps.initializing = true;
1722 INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1723
1724 pps_init_timestamps(intel_dp);
1725
1726 with_intel_pps_lock(intel_dp, wakeref) {
1727 ret = pps_initial_setup(intel_dp);
1728
1729 pps_init_delays(intel_dp);
1730 pps_init_registers(intel_dp, force_disable_vdd: false);
1731 pps_vdd_init(intel_dp);
1732 }
1733
1734 return ret;
1735}
1736
1737static void pps_init_late(struct intel_dp *intel_dp)
1738{
1739 struct intel_display *display = to_intel_display(intel_dp);
1740 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1741 struct intel_connector *connector = intel_dp->attached_connector;
1742
1743 if (display->platform.valleyview || display->platform.cherryview)
1744 return;
1745
1746 if (intel_num_pps(display) < 2)
1747 return;
1748
1749 drm_WARN(display->drm,
1750 connector->panel.vbt.backlight.controller >= 0 &&
1751 intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller,
1752 "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n",
1753 encoder->base.base.id, encoder->base.name,
1754 intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller);
1755
1756 if (connector->panel.vbt.backlight.controller >= 0)
1757 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
1758}
1759
1760void intel_pps_init_late(struct intel_dp *intel_dp)
1761{
1762 intel_wakeref_t wakeref;
1763
1764 with_intel_pps_lock(intel_dp, wakeref) {
1765 /* Reinit delays after per-panel info has been parsed from VBT */
1766 pps_init_late(intel_dp);
1767
1768 memset(s: &intel_dp->pps.pps_delays, c: 0, n: sizeof(intel_dp->pps.pps_delays));
1769 pps_init_delays(intel_dp);
1770 pps_init_registers(intel_dp, force_disable_vdd: false);
1771
1772 intel_dp->pps.initializing = false;
1773
1774 if (edp_have_panel_vdd(intel_dp))
1775 edp_panel_vdd_schedule_off(intel_dp);
1776 }
1777}
1778
1779void intel_pps_unlock_regs_wa(struct intel_display *display)
1780{
1781 int pps_num;
1782 int pps_idx;
1783
1784 if (!HAS_DISPLAY(display) || HAS_DDI(display))
1785 return;
1786 /*
1787 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1788 * everywhere where registers can be write protected.
1789 */
1790 pps_num = intel_num_pps(display);
1791
1792 for (pps_idx = 0; pps_idx < pps_num; pps_idx++)
1793 intel_de_rmw(display, PP_CONTROL(display, pps_idx),
1794 PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS);
1795}
1796
1797void intel_pps_setup(struct intel_display *display)
1798{
1799 if (HAS_PCH_SPLIT(display) || display->platform.geminilake || display->platform.broxton)
1800 display->pps.mmio_base = PCH_PPS_BASE;
1801 else if (display->platform.valleyview || display->platform.cherryview)
1802 display->pps.mmio_base = VLV_PPS_BASE;
1803 else
1804 display->pps.mmio_base = PPS_BASE;
1805}
1806
1807static int intel_pps_show(struct seq_file *m, void *data)
1808{
1809 struct intel_connector *connector = m->private;
1810 struct intel_dp *intel_dp = intel_attached_dp(connector);
1811
1812 if (connector->base.status != connector_status_connected)
1813 return -ENODEV;
1814
1815 seq_printf(m, fmt: "Panel power up delay: %d\n",
1816 intel_dp->pps.panel_power_up_delay);
1817 seq_printf(m, fmt: "Panel power down delay: %d\n",
1818 intel_dp->pps.panel_power_down_delay);
1819 seq_printf(m, fmt: "Panel power cycle delay: %d\n",
1820 intel_dp->pps.panel_power_cycle_delay);
1821 seq_printf(m, fmt: "Backlight on delay: %d\n",
1822 intel_dp->pps.backlight_on_delay);
1823 seq_printf(m, fmt: "Backlight off delay: %d\n",
1824 intel_dp->pps.backlight_off_delay);
1825
1826 return 0;
1827}
1828DEFINE_SHOW_ATTRIBUTE(intel_pps);
1829
1830void intel_pps_connector_debugfs_add(struct intel_connector *connector)
1831{
1832 struct dentry *root = connector->base.debugfs_entry;
1833 int connector_type = connector->base.connector_type;
1834
1835 if (connector_type == DRM_MODE_CONNECTOR_eDP)
1836 debugfs_create_file("i915_panel_timings", 0444, root,
1837 connector, &intel_pps_fops);
1838}
1839
1840void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
1841{
1842 i915_reg_t pp_reg;
1843 u32 val;
1844 enum pipe panel_pipe = INVALID_PIPE;
1845 bool locked = true;
1846
1847 if (drm_WARN_ON(display->drm, HAS_DDI(display)))
1848 return;
1849
1850 if (HAS_PCH_SPLIT(display)) {
1851 u32 port_sel;
1852
1853 pp_reg = PP_CONTROL(display, 0);
1854 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
1855 PANEL_PORT_SELECT_MASK;
1856
1857 switch (port_sel) {
1858 case PANEL_PORT_SELECT_LVDS:
1859 intel_lvds_port_enabled(display, PCH_LVDS, pipe: &panel_pipe);
1860 break;
1861 case PANEL_PORT_SELECT_DPA:
1862 g4x_dp_port_enabled(display, DP_A, port: PORT_A, pipe: &panel_pipe);
1863 break;
1864 case PANEL_PORT_SELECT_DPC:
1865 g4x_dp_port_enabled(display, PCH_DP_C, port: PORT_C, pipe: &panel_pipe);
1866 break;
1867 case PANEL_PORT_SELECT_DPD:
1868 g4x_dp_port_enabled(display, PCH_DP_D, port: PORT_D, pipe: &panel_pipe);
1869 break;
1870 default:
1871 MISSING_CASE(port_sel);
1872 break;
1873 }
1874 } else if (display->platform.valleyview || display->platform.cherryview) {
1875 /* presumably write lock depends on pipe, not port select */
1876 pp_reg = PP_CONTROL(display, pipe);
1877 panel_pipe = pipe;
1878 } else {
1879 u32 port_sel;
1880
1881 pp_reg = PP_CONTROL(display, 0);
1882 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
1883 PANEL_PORT_SELECT_MASK;
1884
1885 drm_WARN_ON(display->drm,
1886 port_sel != PANEL_PORT_SELECT_LVDS);
1887 intel_lvds_port_enabled(display, LVDS, pipe: &panel_pipe);
1888 }
1889
1890 val = intel_de_read(display, reg: pp_reg);
1891 if (!(val & PANEL_POWER_ON) ||
1892 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1893 locked = false;
1894
1895 INTEL_DISPLAY_STATE_WARN(display, panel_pipe == pipe && locked,
1896 "panel assertion failure, pipe %c regs locked\n",
1897 pipe_name(pipe));
1898}
1899