1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2020 Intel Corporation
4 */
5
6#include <linux/pci.h>
7#include <linux/string.h>
8
9#include <drm/drm_print.h>
10
11#include "intel_atomic.h"
12#include "intel_display_core.h"
13#include "intel_display_types.h"
14#include "intel_global_state.h"
15
16#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
17 for ((__i) = 0; \
18 (__i) < (__state)->num_global_objs && \
19 ((obj) = (__state)->global_objs[__i].ptr, \
20 (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
21 (__i)++) \
22 for_each_if(obj)
23
24#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \
25 for ((__i) = 0; \
26 (__i) < (__state)->num_global_objs && \
27 ((obj) = (__state)->global_objs[__i].ptr, \
28 (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \
29 (__i)++) \
30 for_each_if(obj)
31
32#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
33 for ((__i) = 0; \
34 (__i) < (__state)->num_global_objs && \
35 ((obj) = (__state)->global_objs[__i].ptr, \
36 (old_obj_state) = (__state)->global_objs[__i].old_state, \
37 (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
38 (__i)++) \
39 for_each_if(obj)
40
41struct intel_global_objs_state {
42 struct intel_global_obj *ptr;
43 struct intel_global_state *state, *old_state, *new_state;
44};
45
46struct intel_global_commit {
47 struct kref ref;
48 struct completion done;
49};
50
51static struct intel_global_commit *commit_new(void)
52{
53 struct intel_global_commit *commit;
54
55 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
56 if (!commit)
57 return NULL;
58
59 init_completion(x: &commit->done);
60 kref_init(kref: &commit->ref);
61
62 return commit;
63}
64
65static void __commit_free(struct kref *kref)
66{
67 struct intel_global_commit *commit =
68 container_of(kref, typeof(*commit), ref);
69
70 kfree(objp: commit);
71}
72
73static struct intel_global_commit *commit_get(struct intel_global_commit *commit)
74{
75 if (commit)
76 kref_get(kref: &commit->ref);
77
78 return commit;
79}
80
81static void commit_put(struct intel_global_commit *commit)
82{
83 if (commit)
84 kref_put(kref: &commit->ref, release: __commit_free);
85}
86
87static void __intel_atomic_global_state_free(struct kref *kref)
88{
89 struct intel_global_state *obj_state =
90 container_of(kref, struct intel_global_state, ref);
91 struct intel_global_obj *obj = obj_state->obj;
92
93 commit_put(commit: obj_state->commit);
94
95 obj->funcs->atomic_destroy_state(obj, obj_state);
96}
97
98static void intel_atomic_global_state_put(struct intel_global_state *obj_state)
99{
100 kref_put(kref: &obj_state->ref, release: __intel_atomic_global_state_free);
101}
102
103static struct intel_global_state *
104intel_atomic_global_state_get(struct intel_global_state *obj_state)
105{
106 kref_get(kref: &obj_state->ref);
107
108 return obj_state;
109}
110
111void intel_atomic_global_obj_init(struct intel_display *display,
112 struct intel_global_obj *obj,
113 struct intel_global_state *state,
114 const struct intel_global_state_funcs *funcs)
115{
116 memset(s: obj, c: 0, n: sizeof(*obj));
117
118 state->obj = obj;
119
120 kref_init(kref: &state->ref);
121
122 obj->state = state;
123 obj->funcs = funcs;
124 list_add_tail(new: &obj->head, head: &display->global.obj_list);
125}
126
127void intel_atomic_global_obj_cleanup(struct intel_display *display)
128{
129 struct intel_global_obj *obj, *next;
130
131 list_for_each_entry_safe(obj, next, &display->global.obj_list, head) {
132 list_del(entry: &obj->head);
133
134 drm_WARN_ON(display->drm, kref_read(&obj->state->ref) != 1);
135 intel_atomic_global_state_put(obj_state: obj->state);
136 }
137}
138
139static void assert_global_state_write_locked(struct intel_display *display)
140{
141 struct intel_crtc *crtc;
142
143 for_each_intel_crtc(display->drm, crtc)
144 drm_modeset_lock_assert_held(lock: &crtc->base.mutex);
145}
146
147static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
148 struct drm_modeset_lock *lock)
149{
150 struct drm_modeset_lock *l;
151
152 list_for_each_entry(l, &ctx->locked, head) {
153 if (lock == l)
154 return true;
155 }
156
157 return false;
158}
159
160static void assert_global_state_read_locked(struct intel_atomic_state *state)
161{
162 struct intel_display *display = to_intel_display(state);
163 struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
164 struct intel_crtc *crtc;
165
166 for_each_intel_crtc(display->drm, crtc) {
167 if (modeset_lock_is_held(ctx, lock: &crtc->base.mutex))
168 return;
169 }
170
171 drm_WARN(display->drm, 1, "Global state not read locked\n");
172}
173
174struct intel_global_state *
175intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
176 struct intel_global_obj *obj)
177{
178 struct intel_display *display = to_intel_display(state);
179 int index, num_objs, i;
180 size_t size;
181 struct intel_global_objs_state *arr;
182 struct intel_global_state *obj_state;
183
184 for (i = 0; i < state->num_global_objs; i++)
185 if (obj == state->global_objs[i].ptr)
186 return state->global_objs[i].state;
187
188 assert_global_state_read_locked(state);
189
190 num_objs = state->num_global_objs + 1;
191 size = sizeof(*state->global_objs) * num_objs;
192 arr = krealloc(state->global_objs, size, GFP_KERNEL);
193 if (!arr)
194 return ERR_PTR(error: -ENOMEM);
195
196 state->global_objs = arr;
197 index = state->num_global_objs;
198 memset(s: &state->global_objs[index], c: 0, n: sizeof(*state->global_objs));
199
200 obj_state = obj->funcs->atomic_duplicate_state(obj);
201 if (!obj_state)
202 return ERR_PTR(error: -ENOMEM);
203
204 obj_state->obj = obj;
205 obj_state->changed = false;
206 obj_state->serialized = false;
207 obj_state->commit = NULL;
208
209 kref_init(kref: &obj_state->ref);
210
211 state->global_objs[index].state = obj_state;
212 state->global_objs[index].old_state =
213 intel_atomic_global_state_get(obj_state: obj->state);
214 state->global_objs[index].new_state = obj_state;
215 state->global_objs[index].ptr = obj;
216 obj_state->state = state;
217
218 state->num_global_objs = num_objs;
219
220 drm_dbg_atomic(display->drm, "Added new global object %p state %p to %p\n",
221 obj, obj_state, state);
222
223 return obj_state;
224}
225
226struct intel_global_state *
227intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
228 struct intel_global_obj *obj)
229{
230 int i;
231
232 for (i = 0; i < state->num_global_objs; i++)
233 if (obj == state->global_objs[i].ptr)
234 return state->global_objs[i].old_state;
235
236 return NULL;
237}
238
239struct intel_global_state *
240intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
241 struct intel_global_obj *obj)
242{
243 int i;
244
245 for (i = 0; i < state->num_global_objs; i++)
246 if (obj == state->global_objs[i].ptr)
247 return state->global_objs[i].new_state;
248
249 return NULL;
250}
251
252void intel_atomic_swap_global_state(struct intel_atomic_state *state)
253{
254 struct intel_display *display = to_intel_display(state);
255 struct intel_global_state *old_obj_state, *new_obj_state;
256 struct intel_global_obj *obj;
257 int i;
258
259 for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
260 new_obj_state, i) {
261 drm_WARN_ON(display->drm, obj->state != old_obj_state);
262
263 /*
264 * If the new state wasn't modified (and properly
265 * locked for write access) we throw it away.
266 */
267 if (!new_obj_state->changed)
268 continue;
269
270 assert_global_state_write_locked(display);
271
272 old_obj_state->state = state;
273 new_obj_state->state = NULL;
274
275 state->global_objs[i].state = old_obj_state;
276
277 intel_atomic_global_state_put(obj_state: obj->state);
278 obj->state = intel_atomic_global_state_get(obj_state: new_obj_state);
279 }
280}
281
282void intel_atomic_clear_global_state(struct intel_atomic_state *state)
283{
284 int i;
285
286 for (i = 0; i < state->num_global_objs; i++) {
287 intel_atomic_global_state_put(obj_state: state->global_objs[i].old_state);
288 intel_atomic_global_state_put(obj_state: state->global_objs[i].new_state);
289
290 state->global_objs[i].ptr = NULL;
291 state->global_objs[i].state = NULL;
292 state->global_objs[i].old_state = NULL;
293 state->global_objs[i].new_state = NULL;
294 }
295 state->num_global_objs = 0;
296}
297
298int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
299{
300 struct intel_atomic_state *state = obj_state->state;
301 struct intel_display *display = to_intel_display(state);
302 struct intel_crtc *crtc;
303
304 for_each_intel_crtc(display->drm, crtc) {
305 int ret;
306
307 ret = drm_modeset_lock(lock: &crtc->base.mutex,
308 ctx: state->base.acquire_ctx);
309 if (ret)
310 return ret;
311 }
312
313 obj_state->changed = true;
314
315 return 0;
316}
317
318int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
319{
320 int ret;
321
322 ret = intel_atomic_lock_global_state(obj_state);
323 if (ret)
324 return ret;
325
326 obj_state->serialized = true;
327
328 return 0;
329}
330
331bool
332intel_atomic_global_state_is_serialized(struct intel_atomic_state *state)
333{
334 struct intel_display *display = to_intel_display(state);
335 struct intel_crtc *crtc;
336
337 for_each_intel_crtc(display->drm, crtc)
338 if (!intel_atomic_get_new_crtc_state(state, crtc))
339 return false;
340 return true;
341}
342
343int
344intel_atomic_global_state_setup_commit(struct intel_atomic_state *state)
345{
346 const struct intel_global_state *old_obj_state;
347 struct intel_global_state *new_obj_state;
348 struct intel_global_obj *obj;
349 int i;
350
351 for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
352 new_obj_state, i) {
353 struct intel_global_commit *commit = NULL;
354
355 if (new_obj_state->serialized) {
356 /*
357 * New commit which is going to be completed
358 * after the hardware reprogramming is done.
359 */
360 commit = commit_new();
361 if (!commit)
362 return -ENOMEM;
363 } else if (new_obj_state->changed) {
364 /*
365 * We're going to swap to this state, so carry the
366 * previous commit along, in case it's not yet done.
367 */
368 commit = commit_get(commit: old_obj_state->commit);
369 }
370
371 new_obj_state->commit = commit;
372 }
373
374 return 0;
375}
376
377int
378intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state)
379{
380 struct intel_display *display = to_intel_display(state);
381 const struct intel_global_state *old_obj_state;
382 struct intel_global_obj *obj;
383 int i;
384
385 for_each_old_global_obj_in_state(state, obj, old_obj_state, i) {
386 struct intel_global_commit *commit = old_obj_state->commit;
387 long ret;
388
389 if (!commit)
390 continue;
391
392 ret = wait_for_completion_timeout(x: &commit->done, timeout: 10 * HZ);
393 if (ret == 0) {
394 drm_err(display->drm, "global state timed out\n");
395 return -ETIMEDOUT;
396 }
397 }
398
399 return 0;
400}
401
402void
403intel_atomic_global_state_commit_done(struct intel_atomic_state *state)
404{
405 const struct intel_global_state *new_obj_state;
406 struct intel_global_obj *obj;
407 int i;
408
409 for_each_new_global_obj_in_state(state, obj, new_obj_state, i) {
410 struct intel_global_commit *commit = new_obj_state->commit;
411
412 if (!new_obj_state->serialized)
413 continue;
414
415 complete_all(&commit->done);
416 }
417}
418