1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include <linux/bitops.h>
7
8#include <drm/drm_print.h>
9
10#include "i915_utils.h"
11#include "intel_atomic.h"
12#include "intel_bw.h"
13#include "intel_cdclk.h"
14#include "intel_de.h"
15#include "intel_display_regs.h"
16#include "intel_display_trace.h"
17#include "intel_pmdemand.h"
18#include "intel_step.h"
19#include "skl_watermark.h"
20
21struct pmdemand_params {
22 u16 qclk_gv_bw;
23 u8 voltage_index;
24 u8 qclk_gv_index;
25 u8 active_pipes;
26 u8 active_dbufs; /* pre-Xe3 only */
27 /* Total number of non type C active phys from active_phys_mask */
28 u8 active_phys;
29 u8 plls;
30 u16 cdclk_freq_mhz;
31 /* max from ddi_clocks[] */
32 u16 ddiclk_max;
33 u8 scalers; /* pre-Xe3 only */
34};
35
36struct intel_pmdemand_state {
37 struct intel_global_state base;
38
39 /* Maintain a persistent list of port clocks across all crtcs */
40 int ddi_clocks[I915_MAX_PIPES];
41
42 /* Maintain a persistent list of non type C phys mask */
43 u16 active_combo_phys_mask;
44
45 /* Parameters to be configured in the pmdemand registers */
46 struct pmdemand_params params;
47};
48
49struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state)
50{
51 return container_of(obj_state, struct intel_pmdemand_state, base);
52}
53
54static struct intel_global_state *
55intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
56{
57 struct intel_pmdemand_state *pmdemand_state;
58
59 pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL);
60 if (!pmdemand_state)
61 return NULL;
62
63 return &pmdemand_state->base;
64}
65
66static void intel_pmdemand_destroy_state(struct intel_global_obj *obj,
67 struct intel_global_state *state)
68{
69 kfree(objp: state);
70}
71
72static const struct intel_global_state_funcs intel_pmdemand_funcs = {
73 .atomic_duplicate_state = intel_pmdemand_duplicate_state,
74 .atomic_destroy_state = intel_pmdemand_destroy_state,
75};
76
77static struct intel_pmdemand_state *
78intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
79{
80 struct intel_display *display = to_intel_display(state);
81 struct intel_global_state *pmdemand_state =
82 intel_atomic_get_global_obj_state(state,
83 obj: &display->pmdemand.obj);
84
85 if (IS_ERR(ptr: pmdemand_state))
86 return ERR_CAST(ptr: pmdemand_state);
87
88 return to_intel_pmdemand_state(obj_state: pmdemand_state);
89}
90
91static struct intel_pmdemand_state *
92intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
93{
94 struct intel_display *display = to_intel_display(state);
95 struct intel_global_state *pmdemand_state =
96 intel_atomic_get_old_global_obj_state(state,
97 obj: &display->pmdemand.obj);
98
99 if (!pmdemand_state)
100 return NULL;
101
102 return to_intel_pmdemand_state(obj_state: pmdemand_state);
103}
104
105static struct intel_pmdemand_state *
106intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
107{
108 struct intel_display *display = to_intel_display(state);
109 struct intel_global_state *pmdemand_state =
110 intel_atomic_get_new_global_obj_state(state,
111 obj: &display->pmdemand.obj);
112
113 if (!pmdemand_state)
114 return NULL;
115
116 return to_intel_pmdemand_state(obj_state: pmdemand_state);
117}
118
119int intel_pmdemand_init(struct intel_display *display)
120{
121 struct intel_pmdemand_state *pmdemand_state;
122
123 pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL);
124 if (!pmdemand_state)
125 return -ENOMEM;
126
127 intel_atomic_global_obj_init(display, obj: &display->pmdemand.obj,
128 state: &pmdemand_state->base,
129 funcs: &intel_pmdemand_funcs);
130
131 if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0))
132 /* Wa_14016740474 */
133 intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, clear: 0, DMD_RSP_TIMEOUT_DISABLE);
134
135 return 0;
136}
137
138void intel_pmdemand_init_early(struct intel_display *display)
139{
140 mutex_init(&display->pmdemand.lock);
141 init_waitqueue_head(&display->pmdemand.waitqueue);
142}
143
144void
145intel_pmdemand_update_phys_mask(struct intel_display *display,
146 struct intel_encoder *encoder,
147 struct intel_pmdemand_state *pmdemand_state,
148 bool set_bit)
149{
150 enum phy phy;
151
152 if (DISPLAY_VER(display) < 14)
153 return;
154
155 if (!encoder)
156 return;
157
158 if (intel_encoder_is_tc(encoder))
159 return;
160
161 phy = intel_encoder_to_phy(encoder);
162
163 if (set_bit)
164 pmdemand_state->active_combo_phys_mask |= BIT(phy);
165 else
166 pmdemand_state->active_combo_phys_mask &= ~BIT(phy);
167}
168
169void
170intel_pmdemand_update_port_clock(struct intel_display *display,
171 struct intel_pmdemand_state *pmdemand_state,
172 enum pipe pipe, int port_clock)
173{
174 if (DISPLAY_VER(display) < 14)
175 return;
176
177 pmdemand_state->ddi_clocks[pipe] = port_clock;
178}
179
180static void
181intel_pmdemand_update_max_ddiclk(struct intel_display *display,
182 struct intel_atomic_state *state,
183 struct intel_pmdemand_state *pmdemand_state)
184{
185 int max_ddiclk = 0;
186 const struct intel_crtc_state *new_crtc_state;
187 struct intel_crtc *crtc;
188 int i;
189
190 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
191 intel_pmdemand_update_port_clock(display, pmdemand_state,
192 pipe: crtc->pipe,
193 port_clock: new_crtc_state->port_clock);
194
195 for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++)
196 max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk);
197
198 pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000);
199}
200
201static void
202intel_pmdemand_update_connector_phys(struct intel_display *display,
203 struct intel_atomic_state *state,
204 struct drm_connector_state *conn_state,
205 bool set_bit,
206 struct intel_pmdemand_state *pmdemand_state)
207{
208 struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
209 struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
210 struct intel_crtc_state *crtc_state;
211
212 if (!crtc)
213 return;
214
215 if (set_bit)
216 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
217 else
218 crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
219
220 if (!crtc_state->hw.active)
221 return;
222
223 intel_pmdemand_update_phys_mask(display, encoder, pmdemand_state,
224 set_bit);
225}
226
227static void
228intel_pmdemand_update_active_non_tc_phys(struct intel_display *display,
229 struct intel_atomic_state *state,
230 struct intel_pmdemand_state *pmdemand_state)
231{
232 struct drm_connector_state *old_conn_state;
233 struct drm_connector_state *new_conn_state;
234 struct drm_connector *connector;
235 int i;
236
237 for_each_oldnew_connector_in_state(&state->base, connector,
238 old_conn_state, new_conn_state, i) {
239 if (!intel_connector_needs_modeset(state, connector))
240 continue;
241
242 /* First clear the active phys in the old connector state */
243 intel_pmdemand_update_connector_phys(display, state,
244 conn_state: old_conn_state, set_bit: false,
245 pmdemand_state);
246
247 /* Then set the active phys in new connector state */
248 intel_pmdemand_update_connector_phys(display, state,
249 conn_state: new_conn_state, set_bit: true,
250 pmdemand_state);
251 }
252
253 pmdemand_state->params.active_phys =
254 min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask),
255 7);
256}
257
258static bool
259intel_pmdemand_encoder_has_tc_phy(struct intel_display *display,
260 struct intel_encoder *encoder)
261{
262 return encoder && intel_encoder_is_tc(encoder);
263}
264
265static bool
266intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
267{
268 struct intel_display *display = to_intel_display(state);
269 struct drm_connector_state *old_conn_state;
270 struct drm_connector_state *new_conn_state;
271 struct drm_connector *connector;
272 int i;
273
274 for_each_oldnew_connector_in_state(&state->base, connector,
275 old_conn_state, new_conn_state, i) {
276 struct intel_encoder *old_encoder =
277 to_intel_encoder(old_conn_state->best_encoder);
278 struct intel_encoder *new_encoder =
279 to_intel_encoder(new_conn_state->best_encoder);
280
281 if (!intel_connector_needs_modeset(state, connector))
282 continue;
283
284 if (old_encoder == new_encoder ||
285 (intel_pmdemand_encoder_has_tc_phy(display, encoder: old_encoder) &&
286 intel_pmdemand_encoder_has_tc_phy(display, encoder: new_encoder)))
287 continue;
288
289 return true;
290 }
291
292 return false;
293}
294
295static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
296{
297 const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
298 struct intel_crtc *crtc;
299 int i;
300
301 if (intel_bw_pmdemand_needs_update(state))
302 return true;
303
304 if (intel_dbuf_pmdemand_needs_update(state))
305 return true;
306
307 if (intel_cdclk_pmdemand_needs_update(state))
308 return true;
309
310 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
311 new_crtc_state, i)
312 if (new_crtc_state->port_clock != old_crtc_state->port_clock)
313 return true;
314
315 return intel_pmdemand_connector_needs_update(state);
316}
317
318int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
319{
320 struct intel_display *display = to_intel_display(state);
321 const struct intel_bw_state *new_bw_state;
322 const struct intel_cdclk_state *new_cdclk_state;
323 const struct intel_dbuf_state *new_dbuf_state;
324 struct intel_pmdemand_state *new_pmdemand_state;
325
326 if (DISPLAY_VER(display) < 14)
327 return 0;
328
329 if (!intel_pmdemand_needs_update(state))
330 return 0;
331
332 new_pmdemand_state = intel_atomic_get_pmdemand_state(state);
333 if (IS_ERR(ptr: new_pmdemand_state))
334 return PTR_ERR(ptr: new_pmdemand_state);
335
336 new_bw_state = intel_atomic_get_bw_state(state);
337 if (IS_ERR(ptr: new_bw_state))
338 return PTR_ERR(ptr: new_bw_state);
339
340 /* firmware will calculate the qclk_gv_index, requirement is set to 0 */
341 new_pmdemand_state->params.qclk_gv_index = 0;
342 new_pmdemand_state->params.qclk_gv_bw = intel_bw_qgv_point_peakbw(bw_state: new_bw_state);
343
344 new_dbuf_state = intel_atomic_get_dbuf_state(state);
345 if (IS_ERR(ptr: new_dbuf_state))
346 return PTR_ERR(ptr: new_dbuf_state);
347
348 if (DISPLAY_VER(display) < 30) {
349 new_pmdemand_state->params.active_dbufs =
350 min_t(u8, intel_dbuf_num_enabled_slices(new_dbuf_state), 3);
351 new_pmdemand_state->params.active_pipes =
352 min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), 3);
353 } else {
354 new_pmdemand_state->params.active_pipes =
355 min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), INTEL_NUM_PIPES(display));
356 }
357
358 new_cdclk_state = intel_atomic_get_cdclk_state(state);
359 if (IS_ERR(ptr: new_cdclk_state))
360 return PTR_ERR(ptr: new_cdclk_state);
361
362 new_pmdemand_state->params.voltage_index =
363 intel_cdclk_actual_voltage_level(cdclk_state: new_cdclk_state);
364 new_pmdemand_state->params.cdclk_freq_mhz =
365 DIV_ROUND_UP(intel_cdclk_actual(new_cdclk_state), 1000);
366
367 intel_pmdemand_update_max_ddiclk(display, state, pmdemand_state: new_pmdemand_state);
368
369 intel_pmdemand_update_active_non_tc_phys(display, state, pmdemand_state: new_pmdemand_state);
370
371 /*
372 * Active_PLLs starts with 1 because of CDCLK PLL.
373 * TODO: Missing to account genlock filter when it gets used.
374 */
375 new_pmdemand_state->params.plls =
376 min_t(u16, new_pmdemand_state->params.active_phys + 1, 7);
377
378 /*
379 * Setting scalers to max as it can not be calculated during flips and
380 * fastsets without taking global states locks.
381 */
382 new_pmdemand_state->params.scalers = 7;
383
384 if (state->base.allow_modeset)
385 return intel_atomic_serialize_global_state(obj_state: &new_pmdemand_state->base);
386 else
387 return intel_atomic_lock_global_state(obj_state: &new_pmdemand_state->base);
388}
389
390static bool intel_pmdemand_check_prev_transaction(struct intel_display *display)
391{
392 return !(intel_de_wait_for_clear(display,
393 XELPDP_INITIATE_PMDEMAND_REQUEST(1),
394 XELPDP_PMDEMAND_REQ_ENABLE, timeout_ms: 10) ||
395 intel_de_wait_for_clear(display,
396 GEN12_DCPR_STATUS_1,
397 XELPDP_PMDEMAND_INFLIGHT_STATUS, timeout_ms: 10));
398}
399
400void
401intel_pmdemand_init_pmdemand_params(struct intel_display *display,
402 struct intel_pmdemand_state *pmdemand_state)
403{
404 u32 reg1, reg2;
405
406 if (DISPLAY_VER(display) < 14)
407 return;
408
409 mutex_lock(lock: &display->pmdemand.lock);
410 if (drm_WARN_ON(display->drm,
411 !intel_pmdemand_check_prev_transaction(display))) {
412 memset(s: &pmdemand_state->params, c: 0,
413 n: sizeof(pmdemand_state->params));
414 goto unlock;
415 }
416
417 reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
418
419 reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
420
421 pmdemand_state->params.qclk_gv_bw =
422 REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
423 pmdemand_state->params.voltage_index =
424 REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
425 pmdemand_state->params.qclk_gv_index =
426 REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
427 pmdemand_state->params.active_phys =
428 REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
429
430 pmdemand_state->params.cdclk_freq_mhz =
431 REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
432 pmdemand_state->params.ddiclk_max =
433 REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
434
435 if (DISPLAY_VER(display) >= 30) {
436 pmdemand_state->params.active_pipes =
437 REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
438 } else {
439 pmdemand_state->params.active_pipes =
440 REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
441 pmdemand_state->params.active_dbufs =
442 REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
443
444 pmdemand_state->params.scalers =
445 REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
446 }
447
448unlock:
449 mutex_unlock(lock: &display->pmdemand.lock);
450}
451
452static bool intel_pmdemand_req_complete(struct intel_display *display)
453{
454 return !(intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
455 XELPDP_PMDEMAND_REQ_ENABLE);
456}
457
458static void intel_pmdemand_poll(struct intel_display *display)
459{
460 const unsigned int timeout_ms = 10;
461 u32 status;
462 int ret;
463
464 ret = intel_de_wait_custom(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
465 XELPDP_PMDEMAND_REQ_ENABLE, value: 0,
466 fast_timeout_us: 50, slow_timeout_ms: timeout_ms, out_value: &status);
467
468 if (ret == -ETIMEDOUT)
469 drm_err(display->drm,
470 "timed out waiting for Punit PM Demand Response within %ums (status 0x%08x)\n",
471 timeout_ms, status);
472}
473
474static void intel_pmdemand_wait(struct intel_display *display)
475{
476 /* Wa_14024400148 For lnl use polling method */
477 if (DISPLAY_VER(display) == 20) {
478 intel_pmdemand_poll(display);
479 } else {
480 if (!wait_event_timeout(display->pmdemand.waitqueue,
481 intel_pmdemand_req_complete(display),
482 msecs_to_jiffies_timeout(10)))
483 drm_err(display->drm,
484 "timed out waiting for Punit PM Demand Response\n");
485 }
486}
487
488/* Required to be programmed during Display Init Sequences. */
489void intel_pmdemand_program_dbuf(struct intel_display *display,
490 u8 dbuf_slices)
491{
492 u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
493
494 /* PM Demand only tracks active dbufs on pre-Xe3 platforms */
495 if (DISPLAY_VER(display) >= 30)
496 return;
497
498 mutex_lock(lock: &display->pmdemand.lock);
499 if (drm_WARN_ON(display->drm,
500 !intel_pmdemand_check_prev_transaction(display)))
501 goto unlock;
502
503 intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
504 XELPDP_PMDEMAND_DBUFS_MASK,
505 REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
506 intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), clear: 0,
507 XELPDP_PMDEMAND_REQ_ENABLE);
508
509 intel_pmdemand_wait(display);
510
511unlock:
512 mutex_unlock(lock: &display->pmdemand.lock);
513}
514
515static void
516intel_pmdemand_update_params(struct intel_display *display,
517 const struct intel_pmdemand_state *new,
518 const struct intel_pmdemand_state *old,
519 u32 *reg1, u32 *reg2, bool serialized)
520{
521 /*
522 * The pmdemand parameter updates happens in two steps. Pre plane and
523 * post plane updates. During the pre plane, as DE might still be
524 * handling with some old operations, to avoid unexpected performance
525 * issues, program the pmdemand parameters with higher of old and new
526 * values. And then after once settled, use the new parameter values
527 * as part of the post plane update.
528 *
529 * If the pmdemand params update happens without modeset allowed, this
530 * means we can't serialize the updates. So that implies possibility of
531 * some parallel atomic commits affecting the pmdemand parameters. In
532 * that case, we need to consider the current values from the register
533 * as well. So in pre-plane case, we need to check the max of old, new
534 * and current register value if not serialized. In post plane update
535 * we need to consider max of new and current register value if not
536 * serialized
537 */
538
539#define update_reg(reg, field, mask) do { \
540 u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \
541 u32 old_val = old ? old->params.field : 0; \
542 u32 new_val = new->params.field; \
543\
544 *(reg) &= ~(mask); \
545 *(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \
546} while (0)
547
548 /* Set 1*/
549 update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
550 update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
551 update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
552 update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
553
554 /* Set 2*/
555 update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
556 update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
557 update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
558
559 if (DISPLAY_VER(display) >= 30) {
560 update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK);
561 } else {
562 update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
563 update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
564
565 update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
566 }
567
568#undef update_reg
569}
570
571static void
572intel_pmdemand_program_params(struct intel_display *display,
573 const struct intel_pmdemand_state *new,
574 const struct intel_pmdemand_state *old,
575 bool serialized)
576{
577 bool changed = false;
578 u32 reg1, mod_reg1;
579 u32 reg2, mod_reg2;
580
581 mutex_lock(lock: &display->pmdemand.lock);
582 if (drm_WARN_ON(display->drm,
583 !intel_pmdemand_check_prev_transaction(display)))
584 goto unlock;
585
586 reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
587 mod_reg1 = reg1;
588
589 reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
590 mod_reg2 = reg2;
591
592 intel_pmdemand_update_params(display, new, old, reg1: &mod_reg1, reg2: &mod_reg2,
593 serialized);
594
595 if (reg1 != mod_reg1) {
596 intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
597 val: mod_reg1);
598 changed = true;
599 }
600
601 if (reg2 != mod_reg2) {
602 intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
603 val: mod_reg2);
604 changed = true;
605 }
606
607 /* Initiate pm demand request only if register values are changed */
608 if (!changed)
609 goto unlock;
610
611 drm_dbg_kms(display->drm,
612 "initiate pmdemand request values: (0x%x 0x%x)\n",
613 mod_reg1, mod_reg2);
614
615 intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), clear: 0,
616 XELPDP_PMDEMAND_REQ_ENABLE);
617
618 intel_pmdemand_wait(display);
619
620unlock:
621 mutex_unlock(lock: &display->pmdemand.lock);
622}
623
624static bool
625intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
626 const struct intel_pmdemand_state *old)
627{
628 return memcmp(&new->params, &old->params, sizeof(new->params)) != 0;
629}
630
631void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
632{
633 struct intel_display *display = to_intel_display(state);
634 const struct intel_pmdemand_state *new_pmdemand_state =
635 intel_atomic_get_new_pmdemand_state(state);
636 const struct intel_pmdemand_state *old_pmdemand_state =
637 intel_atomic_get_old_pmdemand_state(state);
638
639 if (DISPLAY_VER(display) < 14)
640 return;
641
642 if (!new_pmdemand_state ||
643 !intel_pmdemand_state_changed(new: new_pmdemand_state,
644 old: old_pmdemand_state))
645 return;
646
647 WARN_ON(!new_pmdemand_state->base.changed);
648
649 intel_pmdemand_program_params(display, new: new_pmdemand_state,
650 old: old_pmdemand_state,
651 serialized: intel_atomic_global_state_is_serialized(state));
652}
653
654void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
655{
656 struct intel_display *display = to_intel_display(state);
657 const struct intel_pmdemand_state *new_pmdemand_state =
658 intel_atomic_get_new_pmdemand_state(state);
659 const struct intel_pmdemand_state *old_pmdemand_state =
660 intel_atomic_get_old_pmdemand_state(state);
661
662 if (DISPLAY_VER(display) < 14)
663 return;
664
665 if (!new_pmdemand_state ||
666 !intel_pmdemand_state_changed(new: new_pmdemand_state,
667 old: old_pmdemand_state))
668 return;
669
670 WARN_ON(!new_pmdemand_state->base.changed);
671
672 intel_pmdemand_program_params(display, new: new_pmdemand_state, NULL,
673 serialized: intel_atomic_global_state_is_serialized(state));
674}
675