1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2020 Intel Corporation
4 */
5#include <linux/kernel.h>
6#include <linux/pm_qos.h>
7#include <linux/slab.h>
8
9#include <drm/drm_atomic_helper.h>
10#include <drm/drm_fourcc.h>
11#include <drm/drm_plane.h>
12#include <drm/drm_vblank.h>
13#include <drm/drm_vblank_work.h>
14
15#include "i915_drv.h"
16#include "i915_vgpu.h"
17#include "i9xx_plane.h"
18#include "icl_dsi.h"
19#include "intel_atomic.h"
20#include "intel_color.h"
21#include "intel_crtc.h"
22#include "intel_cursor.h"
23#include "intel_display_debugfs.h"
24#include "intel_display_irq.h"
25#include "intel_display_trace.h"
26#include "intel_display_types.h"
27#include "intel_drrs.h"
28#include "intel_dsi.h"
29#include "intel_fifo_underrun.h"
30#include "intel_pipe_crc.h"
31#include "intel_plane.h"
32#include "intel_psr.h"
33#include "intel_sprite.h"
34#include "intel_vblank.h"
35#include "intel_vrr.h"
36#include "skl_universal_plane.h"
37
38static void assert_vblank_disabled(struct drm_crtc *crtc)
39{
40 struct intel_display *display = to_intel_display(crtc->dev);
41
42 if (INTEL_DISPLAY_STATE_WARN(display, drm_crtc_vblank_get(crtc) == 0,
43 "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n",
44 crtc->base.id, crtc->name))
45 drm_crtc_vblank_put(crtc);
46}
47
48struct intel_crtc *intel_first_crtc(struct intel_display *display)
49{
50 return to_intel_crtc(drm_crtc_from_index(display->drm, 0));
51}
52
53struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display,
54 enum pipe pipe)
55{
56 struct intel_crtc *crtc;
57
58 for_each_intel_crtc(display->drm, crtc) {
59 if (crtc->pipe == pipe)
60 return crtc;
61 }
62
63 return NULL;
64}
65
66void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc)
67{
68 drm_crtc_wait_one_vblank(crtc: &crtc->base);
69}
70
71void intel_wait_for_vblank_if_active(struct intel_display *display,
72 enum pipe pipe)
73{
74 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
75
76 if (crtc->active)
77 intel_crtc_wait_for_next_vblank(crtc);
78}
79
80u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
81{
82 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc: &crtc->base);
83
84 if (!crtc->active)
85 return 0;
86
87 if (!vblank->max_vblank_count)
88 return (u32)drm_crtc_accurate_vblank_count(crtc: &crtc->base);
89
90 return crtc->base.funcs->get_vblank_counter(&crtc->base);
91}
92
93u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
94{
95 struct intel_display *display = to_intel_display(crtc_state);
96
97 /*
98 * From Gen 11, in case of dsi cmd mode, frame counter wouldn't
99 * have updated at the beginning of TE, if we want to use
100 * the hw counter, then we would find it updated in only
101 * the next TE, hence switching to sw counter.
102 */
103 if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 |
104 I915_MODE_FLAG_DSI_USE_TE1))
105 return 0;
106
107 /*
108 * On i965gm the hardware frame counter reads
109 * zero when the TV encoder is enabled :(
110 */
111 if (display->platform.i965gm &&
112 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
113 return 0;
114
115 if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
116 return 0xffffffff; /* full 32 bit counter */
117 else if (DISPLAY_VER(display) >= 3)
118 return 0xffffff; /* only 24 bits of frame count */
119 else
120 return 0; /* Gen2 doesn't have a hardware frame counter */
121}
122
123void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
124{
125 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
126
127 crtc->vblank_psr_notify = intel_psr_needs_vblank_notification(crtc_state);
128
129 assert_vblank_disabled(crtc: &crtc->base);
130 drm_crtc_set_max_vblank_count(crtc: &crtc->base,
131 max_vblank_count: intel_crtc_max_vblank_count(crtc_state));
132 drm_crtc_vblank_on(crtc: &crtc->base);
133
134 /*
135 * Should really happen exactly when we enable the pipe
136 * but we want the frame counters in the trace, and that
137 * requires vblank support on some platforms/outputs.
138 */
139 trace_intel_pipe_enable(crtc);
140}
141
142void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
143{
144 struct intel_display *display = to_intel_display(crtc_state);
145 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
146
147 /*
148 * Should really happen exactly when we disable the pipe
149 * but we want the frame counters in the trace, and that
150 * requires vblank support on some platforms/outputs.
151 */
152 trace_intel_pipe_disable(crtc);
153
154 drm_crtc_vblank_off(crtc: &crtc->base);
155 assert_vblank_disabled(crtc: &crtc->base);
156
157 crtc->vblank_psr_notify = false;
158
159 flush_work(work: &display->irq.vblank_notify_work);
160}
161
162struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
163{
164 struct intel_crtc_state *crtc_state;
165
166 crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
167
168 if (crtc_state)
169 intel_crtc_state_reset(crtc_state, crtc);
170
171 return crtc_state;
172}
173
174void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
175 struct intel_crtc *crtc)
176{
177 memset(s: crtc_state, c: 0, n: sizeof(*crtc_state));
178
179 __drm_atomic_helper_crtc_state_reset(state: &crtc_state->uapi, crtc: &crtc->base);
180
181 crtc_state->cpu_transcoder = INVALID_TRANSCODER;
182 crtc_state->master_transcoder = INVALID_TRANSCODER;
183 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
184 crtc_state->scaler_state.scaler_id = -1;
185 crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
186 crtc_state->max_link_bpp_x16 = INT_MAX;
187}
188
189static struct intel_crtc *intel_crtc_alloc(void)
190{
191 struct intel_crtc_state *crtc_state;
192 struct intel_crtc *crtc;
193
194 crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
195 if (!crtc)
196 return ERR_PTR(error: -ENOMEM);
197
198 crtc_state = intel_crtc_state_alloc(crtc);
199 if (!crtc_state) {
200 kfree(objp: crtc);
201 return ERR_PTR(error: -ENOMEM);
202 }
203
204 crtc->base.state = &crtc_state->uapi;
205 crtc->config = crtc_state;
206
207 return crtc;
208}
209
210static void intel_crtc_free(struct intel_crtc *crtc)
211{
212 intel_crtc_destroy_state(crtc: &crtc->base, state: crtc->base.state);
213 kfree(objp: crtc);
214}
215
216static void intel_crtc_destroy(struct drm_crtc *_crtc)
217{
218 struct intel_crtc *crtc = to_intel_crtc(_crtc);
219
220 cpu_latency_qos_remove_request(req: &crtc->vblank_pm_qos);
221
222 drm_crtc_cleanup(crtc: &crtc->base);
223 kfree(objp: crtc);
224}
225
226static int intel_crtc_late_register(struct drm_crtc *crtc)
227{
228 intel_crtc_debugfs_add(to_intel_crtc(crtc));
229 return 0;
230}
231
232#define INTEL_CRTC_FUNCS \
233 .set_config = drm_atomic_helper_set_config, \
234 .destroy = intel_crtc_destroy, \
235 .page_flip = drm_atomic_helper_page_flip, \
236 .atomic_duplicate_state = intel_crtc_duplicate_state, \
237 .atomic_destroy_state = intel_crtc_destroy_state, \
238 .set_crc_source = intel_crtc_set_crc_source, \
239 .verify_crc_source = intel_crtc_verify_crc_source, \
240 .get_crc_sources = intel_crtc_get_crc_sources, \
241 .late_register = intel_crtc_late_register
242
243static const struct drm_crtc_funcs bdw_crtc_funcs = {
244 INTEL_CRTC_FUNCS,
245
246 .get_vblank_counter = g4x_get_vblank_counter,
247 .enable_vblank = bdw_enable_vblank,
248 .disable_vblank = bdw_disable_vblank,
249 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
250};
251
252static const struct drm_crtc_funcs ilk_crtc_funcs = {
253 INTEL_CRTC_FUNCS,
254
255 .get_vblank_counter = g4x_get_vblank_counter,
256 .enable_vblank = ilk_enable_vblank,
257 .disable_vblank = ilk_disable_vblank,
258 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
259};
260
261static const struct drm_crtc_funcs g4x_crtc_funcs = {
262 INTEL_CRTC_FUNCS,
263
264 .get_vblank_counter = g4x_get_vblank_counter,
265 .enable_vblank = i965_enable_vblank,
266 .disable_vblank = i965_disable_vblank,
267 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
268};
269
270static const struct drm_crtc_funcs i965_crtc_funcs = {
271 INTEL_CRTC_FUNCS,
272
273 .get_vblank_counter = i915_get_vblank_counter,
274 .enable_vblank = i965_enable_vblank,
275 .disable_vblank = i965_disable_vblank,
276 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
277};
278
279static const struct drm_crtc_funcs i915gm_crtc_funcs = {
280 INTEL_CRTC_FUNCS,
281
282 .get_vblank_counter = i915_get_vblank_counter,
283 .enable_vblank = i915gm_enable_vblank,
284 .disable_vblank = i915gm_disable_vblank,
285 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
286};
287
288static const struct drm_crtc_funcs i915_crtc_funcs = {
289 INTEL_CRTC_FUNCS,
290
291 .get_vblank_counter = i915_get_vblank_counter,
292 .enable_vblank = i8xx_enable_vblank,
293 .disable_vblank = i8xx_disable_vblank,
294 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
295};
296
297static const struct drm_crtc_funcs i8xx_crtc_funcs = {
298 INTEL_CRTC_FUNCS,
299
300 /* no hw vblank counter */
301 .enable_vblank = i8xx_enable_vblank,
302 .disable_vblank = i8xx_disable_vblank,
303 .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
304};
305
306int intel_crtc_init(struct intel_display *display, enum pipe pipe)
307{
308 struct intel_plane *primary, *cursor;
309 const struct drm_crtc_funcs *funcs;
310 struct intel_crtc *crtc;
311 int sprite, ret;
312
313 crtc = intel_crtc_alloc();
314 if (IS_ERR(ptr: crtc))
315 return PTR_ERR(ptr: crtc);
316
317 crtc->pipe = pipe;
318 crtc->num_scalers = DISPLAY_RUNTIME_INFO(display)->num_scalers[pipe];
319
320 if (DISPLAY_VER(display) >= 9)
321 primary = skl_universal_plane_create(display, pipe, plane_id: PLANE_1);
322 else
323 primary = intel_primary_plane_create(display, pipe);
324 if (IS_ERR(ptr: primary)) {
325 ret = PTR_ERR(ptr: primary);
326 goto fail;
327 }
328 crtc->plane_ids_mask |= BIT(primary->id);
329
330 intel_init_fifo_underrun_reporting(display, crtc, enable: false);
331
332 for_each_sprite(display, pipe, sprite) {
333 struct intel_plane *plane;
334
335 if (DISPLAY_VER(display) >= 9)
336 plane = skl_universal_plane_create(display, pipe, plane_id: PLANE_2 + sprite);
337 else
338 plane = intel_sprite_plane_create(display, pipe, plane: sprite);
339 if (IS_ERR(ptr: plane)) {
340 ret = PTR_ERR(ptr: plane);
341 goto fail;
342 }
343 crtc->plane_ids_mask |= BIT(plane->id);
344 }
345
346 cursor = intel_cursor_plane_create(display, pipe);
347 if (IS_ERR(ptr: cursor)) {
348 ret = PTR_ERR(ptr: cursor);
349 goto fail;
350 }
351 crtc->plane_ids_mask |= BIT(cursor->id);
352
353 if (HAS_GMCH(display)) {
354 if (display->platform.cherryview ||
355 display->platform.valleyview ||
356 display->platform.g4x)
357 funcs = &g4x_crtc_funcs;
358 else if (DISPLAY_VER(display) == 4)
359 funcs = &i965_crtc_funcs;
360 else if (display->platform.i945gm ||
361 display->platform.i915gm)
362 funcs = &i915gm_crtc_funcs;
363 else if (DISPLAY_VER(display) == 3)
364 funcs = &i915_crtc_funcs;
365 else
366 funcs = &i8xx_crtc_funcs;
367 } else {
368 if (DISPLAY_VER(display) >= 8)
369 funcs = &bdw_crtc_funcs;
370 else
371 funcs = &ilk_crtc_funcs;
372 }
373
374 ret = drm_crtc_init_with_planes(dev: display->drm, crtc: &crtc->base,
375 primary: &primary->base, cursor: &cursor->base,
376 funcs, name: "pipe %c", pipe_name(pipe));
377 if (ret)
378 goto fail;
379
380 if (DISPLAY_VER(display) >= 11)
381 drm_crtc_create_scaling_filter_property(crtc: &crtc->base,
382 BIT(DRM_SCALING_FILTER_DEFAULT) |
383 BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
384
385 intel_color_crtc_init(crtc);
386 intel_drrs_crtc_init(crtc);
387 intel_crtc_crc_init(crtc);
388
389 cpu_latency_qos_add_request(req: &crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE);
390
391 drm_WARN_ON(display->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
392
393 return 0;
394
395fail:
396 intel_crtc_free(crtc);
397
398 return ret;
399}
400
401int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
402 struct drm_file *file)
403{
404 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
405 struct drm_crtc *drm_crtc;
406 struct intel_crtc *crtc;
407
408 drm_crtc = drm_crtc_find(dev, file_priv: file, id: pipe_from_crtc_id->crtc_id);
409 if (!drm_crtc)
410 return -ENOENT;
411
412 crtc = to_intel_crtc(drm_crtc);
413 pipe_from_crtc_id->pipe = crtc->pipe;
414
415 return 0;
416}
417
418static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state)
419{
420 struct intel_display *display = to_intel_display(crtc_state);
421
422 return crtc_state->hw.active &&
423 !crtc_state->preload_luts &&
424 !intel_crtc_needs_modeset(crtc_state) &&
425 (intel_crtc_needs_color_update(crtc_state) &&
426 !HAS_DOUBLE_BUFFERED_LUT(display)) &&
427 !intel_color_uses_dsb(crtc_state) &&
428 !crtc_state->use_dsb;
429}
430
431static void intel_crtc_vblank_work(struct kthread_work *base)
432{
433 struct drm_vblank_work *work = to_drm_vblank_work(base);
434 struct intel_crtc_state *crtc_state =
435 container_of(work, typeof(*crtc_state), vblank_work);
436 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
437
438 trace_intel_crtc_vblank_work_start(crtc);
439
440 intel_color_load_luts(crtc_state);
441
442 if (crtc_state->uapi.event) {
443 spin_lock_irq(lock: &crtc->base.dev->event_lock);
444 drm_crtc_send_vblank_event(crtc: &crtc->base, e: crtc_state->uapi.event);
445 spin_unlock_irq(lock: &crtc->base.dev->event_lock);
446 crtc_state->uapi.event = NULL;
447 }
448
449 trace_intel_crtc_vblank_work_end(crtc);
450}
451
452static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state)
453{
454 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
455
456 drm_vblank_work_init(work: &crtc_state->vblank_work, crtc: &crtc->base,
457 func: intel_crtc_vblank_work);
458 /*
459 * Interrupt latency is critical for getting the vblank
460 * work executed as early as possible during the vblank.
461 */
462 cpu_latency_qos_update_request(req: &crtc->vblank_pm_qos, new_value: 0);
463}
464
465void intel_wait_for_vblank_workers(struct intel_atomic_state *state)
466{
467 struct intel_crtc_state *crtc_state;
468 struct intel_crtc *crtc;
469 int i;
470
471 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
472 if (!intel_crtc_needs_vblank_work(crtc_state))
473 continue;
474
475 drm_vblank_work_flush(work: &crtc_state->vblank_work);
476 cpu_latency_qos_update_request(req: &crtc->vblank_pm_qos,
477 PM_QOS_DEFAULT_VALUE);
478 }
479}
480
481int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
482 int usecs)
483{
484 /* paranoia */
485 if (!adjusted_mode->crtc_htotal)
486 return 1;
487
488 return DIV_ROUND_UP_ULL(mul_u32_u32(usecs, adjusted_mode->crtc_clock),
489 1000 * adjusted_mode->crtc_htotal);
490}
491
492int intel_scanlines_to_usecs(const struct drm_display_mode *adjusted_mode,
493 int scanlines)
494{
495 /* paranoia */
496 if (!adjusted_mode->crtc_clock)
497 return 1;
498
499 return DIV_ROUND_UP_ULL(mul_u32_u32(scanlines, adjusted_mode->crtc_htotal * 1000),
500 adjusted_mode->crtc_clock);
501}
502
503/**
504 * intel_pipe_update_start() - start update of a set of display registers
505 * @state: the atomic state
506 * @crtc: the crtc
507 *
508 * Mark the start of an update to pipe registers that should be updated
509 * atomically regarding vblank. If the next vblank will happens within
510 * the next 100 us, this function waits until the vblank passes.
511 *
512 * After a successful call to this function, interrupts will be disabled
513 * until a subsequent call to intel_pipe_update_end(). That is done to
514 * avoid random delays.
515 */
516void intel_pipe_update_start(struct intel_atomic_state *state,
517 struct intel_crtc *crtc)
518{
519 struct intel_display *display = to_intel_display(state);
520 const struct intel_crtc_state *old_crtc_state =
521 intel_atomic_get_old_crtc_state(state, crtc);
522 struct intel_crtc_state *new_crtc_state =
523 intel_atomic_get_new_crtc_state(state, crtc);
524 struct intel_vblank_evade_ctx evade;
525 int scanline;
526
527 drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
528
529 intel_psr_lock(crtc_state: new_crtc_state);
530
531 if (new_crtc_state->do_async_flip) {
532 intel_crtc_prepare_vblank_event(crtc_state: new_crtc_state,
533 event: &crtc->flip_done_event);
534 return;
535 }
536
537 if (intel_crtc_needs_vblank_work(crtc_state: new_crtc_state))
538 intel_crtc_vblank_work_init(crtc_state: new_crtc_state);
539
540 if (state->base.legacy_cursor_update) {
541 struct intel_plane *plane;
542 struct intel_plane_state *old_plane_state, *new_plane_state;
543 int i;
544
545 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
546 new_plane_state, i) {
547 if (old_plane_state->uapi.crtc == &crtc->base)
548 intel_plane_init_cursor_vblank_work(old_plane_state,
549 new_plane_state);
550 }
551 }
552
553 intel_vblank_evade_init(old_crtc_state, new_crtc_state, evade: &evade);
554
555 if (drm_WARN_ON(display->drm, drm_crtc_vblank_get(&crtc->base)))
556 goto irq_disable;
557
558 /*
559 * Wait for psr to idle out after enabling the VBL interrupts
560 * VBL interrupts will start the PSR exit and prevent a PSR
561 * re-entry as well.
562 */
563 intel_psr_wait_for_idle_locked(new_crtc_state);
564
565 local_irq_disable();
566
567 crtc->debug.min_vbl = evade.min;
568 crtc->debug.max_vbl = evade.max;
569 trace_intel_pipe_update_start(crtc);
570
571 scanline = intel_vblank_evade(evade: &evade);
572
573 drm_crtc_vblank_put(crtc: &crtc->base);
574
575 crtc->debug.scanline_start = scanline;
576 crtc->debug.start_vbl_time = ktime_get();
577 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
578
579 trace_intel_pipe_update_vblank_evaded(crtc);
580 return;
581
582irq_disable:
583 local_irq_disable();
584}
585
586#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
587static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
588{
589 u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
590 unsigned int h;
591
592 h = ilog2(delta >> 9);
593 if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
594 h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
595 crtc->debug.vbl.times[h]++;
596
597 crtc->debug.vbl.sum += delta;
598 if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
599 crtc->debug.vbl.min = delta;
600 if (delta > crtc->debug.vbl.max)
601 crtc->debug.vbl.max = delta;
602
603 if (delta > 1000 * VBLANK_EVASION_TIME_US) {
604 drm_dbg_kms(crtc->base.dev,
605 "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
606 pipe_name(crtc->pipe),
607 div_u64(delta, 1000),
608 VBLANK_EVASION_TIME_US);
609 crtc->debug.vbl.over++;
610 }
611}
612#else
613static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
614#endif
615
616void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state)
617{
618 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
619 unsigned long irqflags;
620
621 if (!crtc_state->uapi.event)
622 return;
623
624 drm_WARN_ON(crtc->base.dev, drm_crtc_vblank_get(&crtc->base) != 0);
625
626 spin_lock_irqsave(&crtc->base.dev->event_lock, irqflags);
627 drm_crtc_arm_vblank_event(crtc: &crtc->base, e: crtc_state->uapi.event);
628 spin_unlock_irqrestore(lock: &crtc->base.dev->event_lock, flags: irqflags);
629
630 crtc_state->uapi.event = NULL;
631}
632
633void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state,
634 struct drm_pending_vblank_event **event)
635{
636 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
637 unsigned long irqflags;
638
639 spin_lock_irqsave(&crtc->base.dev->event_lock, irqflags);
640 *event = crtc_state->uapi.event;
641 spin_unlock_irqrestore(lock: &crtc->base.dev->event_lock, flags: irqflags);
642
643 crtc_state->uapi.event = NULL;
644}
645
646/**
647 * intel_pipe_update_end() - end update of a set of display registers
648 * @state: the atomic state
649 * @crtc: the crtc
650 *
651 * Mark the end of an update started with intel_pipe_update_start(). This
652 * re-enables interrupts and verifies the update was actually completed
653 * before a vblank.
654 */
655void intel_pipe_update_end(struct intel_atomic_state *state,
656 struct intel_crtc *crtc)
657{
658 struct intel_display *display = to_intel_display(state);
659 struct intel_crtc_state *new_crtc_state =
660 intel_atomic_get_new_crtc_state(state, crtc);
661 enum pipe pipe = crtc->pipe;
662 int scanline_end = intel_get_crtc_scanline(crtc);
663 u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
664 ktime_t end_vbl_time = ktime_get();
665 struct drm_i915_private *dev_priv = to_i915(dev: crtc->base.dev);
666
667 drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
668
669 if (new_crtc_state->do_async_flip)
670 goto out;
671
672 trace_intel_pipe_update_end(crtc, frame: end_vbl_count, scanline_end);
673
674 /*
675 * Incase of mipi dsi command mode, we need to set frame update
676 * request for every commit.
677 */
678 if (DISPLAY_VER(display) >= 11 &&
679 intel_crtc_has_type(crtc_state: new_crtc_state, type: INTEL_OUTPUT_DSI))
680 icl_dsi_frame_update(crtc_state: new_crtc_state);
681
682 /* We're still in the vblank-evade critical section, this can't race.
683 * Would be slightly nice to just grab the vblank count and arm the
684 * event outside of the critical section - the spinlock might spin for a
685 * while ... */
686 if (intel_crtc_needs_vblank_work(crtc_state: new_crtc_state)) {
687 drm_vblank_work_schedule(work: &new_crtc_state->vblank_work,
688 count: drm_crtc_accurate_vblank_count(crtc: &crtc->base) + 1,
689 nextonmiss: false);
690 } else {
691 intel_crtc_arm_vblank_event(crtc_state: new_crtc_state);
692 }
693
694 if (state->base.legacy_cursor_update) {
695 struct intel_plane *plane;
696 struct intel_plane_state *old_plane_state;
697 int i;
698
699 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
700 if (old_plane_state->uapi.crtc == &crtc->base &&
701 old_plane_state->unpin_work.vblank) {
702 drm_vblank_work_schedule(work: &old_plane_state->unpin_work,
703 count: drm_crtc_accurate_vblank_count(crtc: &crtc->base) + 1,
704 nextonmiss: false);
705
706 /* Remove plane from atomic state, cleanup/free is done from vblank worker. */
707 memset(s: &state->base.planes[i], c: 0, n: sizeof(state->base.planes[i]));
708 }
709 }
710 }
711
712 /*
713 * Send VRR Push to terminate Vblank. If we are already in vblank
714 * this has to be done _after_ sampling the frame counter, as
715 * otherwise the push would immediately terminate the vblank and
716 * the sampled frame counter would correspond to the next frame
717 * instead of the current frame.
718 *
719 * There is a tiny race here (iff vblank evasion failed us) where
720 * we might sample the frame counter just before vmax vblank start
721 * but the push would be sent just after it. That would cause the
722 * push to affect the next frame instead of the current frame,
723 * which would cause the next frame to terminate already at vmin
724 * vblank start instead of vmax vblank start.
725 */
726 if (!state->base.legacy_cursor_update)
727 intel_vrr_send_push(NULL, crtc_state: new_crtc_state);
728
729 local_irq_enable();
730
731 if (intel_vgpu_active(i915: dev_priv))
732 goto out;
733
734 if (crtc->debug.start_vbl_count &&
735 crtc->debug.start_vbl_count != end_vbl_count) {
736 drm_err(display->drm,
737 "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
738 pipe_name(pipe), crtc->debug.start_vbl_count,
739 end_vbl_count,
740 ktime_us_delta(end_vbl_time,
741 crtc->debug.start_vbl_time),
742 crtc->debug.min_vbl, crtc->debug.max_vbl,
743 crtc->debug.scanline_start, scanline_end);
744 }
745
746 dbg_vblank_evade(crtc, end: end_vbl_time);
747
748out:
749 intel_psr_unlock(crtc_state: new_crtc_state);
750}
751