blob: 192435c77d2b69be04570253070a02c0a6d99416 [file] [log] [blame]
Daniel Stone4c2fc702019-06-18 11:12:07 +01001/*
2 * Copyright © 2008-2011 Kristian Høgsberg
3 * Copyright © 2011 Intel Corporation
4 * Copyright © 2017, 2018 Collabora, Ltd.
5 * Copyright © 2017, 2018 General Electric Company
6 * Copyright (c) 2018 DisplayLink (UK) Ltd.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sublicense, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial
18 * portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 * SOFTWARE.
28 */
29
30#include "config.h"
31
32#include <stdint.h>
33
34#include <xf86drm.h>
35#include <xf86drmMode.h>
36#include <drm_fourcc.h>
37
Daniel Stone4c2fc702019-06-18 11:12:07 +010038#include <libweston/libweston.h>
39#include <libweston/backend-drm.h>
40#include "shared/helpers.h"
41#include "drm-internal.h"
42#include "pixel-formats.h"
43#include "presentation-time-server-protocol.h"
44
Tomohito Esaki29beeaf2019-06-24 17:23:44 +090045#ifndef DRM_FORMAT_MOD_LINEAR
46#define DRM_FORMAT_MOD_LINEAR 0
47#endif
48
Daniel Stone4c2fc702019-06-18 11:12:07 +010049struct drm_property_enum_info plane_type_enums[] = {
50 [WDRM_PLANE_TYPE_PRIMARY] = {
51 .name = "Primary",
52 },
53 [WDRM_PLANE_TYPE_OVERLAY] = {
54 .name = "Overlay",
55 },
56 [WDRM_PLANE_TYPE_CURSOR] = {
57 .name = "Cursor",
58 },
59};
60
61const struct drm_property_info plane_props[] = {
62 [WDRM_PLANE_TYPE] = {
63 .name = "type",
64 .enum_values = plane_type_enums,
65 .num_enum_values = WDRM_PLANE_TYPE__COUNT,
66 },
67 [WDRM_PLANE_SRC_X] = { .name = "SRC_X", },
68 [WDRM_PLANE_SRC_Y] = { .name = "SRC_Y", },
69 [WDRM_PLANE_SRC_W] = { .name = "SRC_W", },
70 [WDRM_PLANE_SRC_H] = { .name = "SRC_H", },
71 [WDRM_PLANE_CRTC_X] = { .name = "CRTC_X", },
72 [WDRM_PLANE_CRTC_Y] = { .name = "CRTC_Y", },
73 [WDRM_PLANE_CRTC_W] = { .name = "CRTC_W", },
74 [WDRM_PLANE_CRTC_H] = { .name = "CRTC_H", },
75 [WDRM_PLANE_FB_ID] = { .name = "FB_ID", },
76 [WDRM_PLANE_CRTC_ID] = { .name = "CRTC_ID", },
77 [WDRM_PLANE_IN_FORMATS] = { .name = "IN_FORMATS" },
78 [WDRM_PLANE_IN_FENCE_FD] = { .name = "IN_FENCE_FD" },
79 [WDRM_PLANE_FB_DAMAGE_CLIPS] = { .name = "FB_DAMAGE_CLIPS" },
Marius Vladcdd6fa22019-08-29 20:42:00 +030080 [WDRM_PLANE_ZPOS] = { .name = "zpos" },
Daniel Stone4c2fc702019-06-18 11:12:07 +010081};
82
83struct drm_property_enum_info dpms_state_enums[] = {
84 [WDRM_DPMS_STATE_OFF] = {
85 .name = "Off",
86 },
87 [WDRM_DPMS_STATE_ON] = {
88 .name = "On",
89 },
90 [WDRM_DPMS_STATE_STANDBY] = {
91 .name = "Standby",
92 },
93 [WDRM_DPMS_STATE_SUSPEND] = {
94 .name = "Suspend",
95 },
96};
97
Ankit Nautiyala344fe32019-05-14 18:36:08 +053098struct drm_property_enum_info content_protection_enums[] = {
99 [WDRM_CONTENT_PROTECTION_UNDESIRED] = {
100 .name = "Undesired",
101 },
102 [WDRM_CONTENT_PROTECTION_DESIRED] = {
103 .name = "Desired",
104 },
105 [WDRM_CONTENT_PROTECTION_ENABLED] = {
106 .name = "Enabled",
107 },
108};
109
110struct drm_property_enum_info hdcp_content_type_enums[] = {
111 [WDRM_HDCP_CONTENT_TYPE0] = {
112 .name = "HDCP Type0",
113 },
114 [WDRM_HDCP_CONTENT_TYPE1] = {
115 .name = "HDCP Type1",
116 },
117};
118
Daniel Stone4c2fc702019-06-18 11:12:07 +0100119const struct drm_property_info connector_props[] = {
120 [WDRM_CONNECTOR_EDID] = { .name = "EDID" },
121 [WDRM_CONNECTOR_DPMS] = {
122 .name = "DPMS",
123 .enum_values = dpms_state_enums,
124 .num_enum_values = WDRM_DPMS_STATE__COUNT,
125 },
126 [WDRM_CONNECTOR_CRTC_ID] = { .name = "CRTC_ID", },
127 [WDRM_CONNECTOR_NON_DESKTOP] = { .name = "non-desktop", },
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530128 [WDRM_CONNECTOR_CONTENT_PROTECTION] = {
129 .name = "Content Protection",
130 .enum_values = content_protection_enums,
131 .num_enum_values = WDRM_CONTENT_PROTECTION__COUNT,
132 },
133 [WDRM_CONNECTOR_HDCP_CONTENT_TYPE] = {
134 .name = "HDCP Content Type",
135 .enum_values = hdcp_content_type_enums,
136 .num_enum_values = WDRM_HDCP_CONTENT_TYPE__COUNT,
137 },
Daniel Stone4c2fc702019-06-18 11:12:07 +0100138};
139
140const struct drm_property_info crtc_props[] = {
141 [WDRM_CRTC_MODE_ID] = { .name = "MODE_ID", },
142 [WDRM_CRTC_ACTIVE] = { .name = "ACTIVE", },
143};
144
145
146/**
147 * Mode for drm_pending_state_apply and co.
148 */
149enum drm_state_apply_mode {
150 DRM_STATE_APPLY_SYNC, /**< state fully processed */
151 DRM_STATE_APPLY_ASYNC, /**< state pending event delivery */
152 DRM_STATE_TEST_ONLY, /**< test if the state can be applied */
153};
154
155/**
156 * Get the current value of a KMS property
157 *
158 * Given a drmModeObjectGetProperties return, as well as the drm_property_info
159 * for the target property, return the current value of that property,
160 * with an optional default. If the property is a KMS enum type, the return
161 * value will be translated into the appropriate internal enum.
162 *
163 * If the property is not present, the default value will be returned.
164 *
165 * @param info Internal structure for property to look up
166 * @param props Raw KMS properties for the target object
167 * @param def Value to return if property is not found
168 */
169uint64_t
170drm_property_get_value(struct drm_property_info *info,
171 const drmModeObjectProperties *props,
172 uint64_t def)
173{
174 unsigned int i;
175
176 if (info->prop_id == 0)
177 return def;
178
179 for (i = 0; i < props->count_props; i++) {
180 unsigned int j;
181
182 if (props->props[i] != info->prop_id)
183 continue;
184
185 /* Simple (non-enum) types can return the value directly */
186 if (info->num_enum_values == 0)
187 return props->prop_values[i];
188
189 /* Map from raw value to enum value */
190 for (j = 0; j < info->num_enum_values; j++) {
191 if (!info->enum_values[j].valid)
192 continue;
193 if (info->enum_values[j].value != props->prop_values[i])
194 continue;
195
196 return j;
197 }
198
199 /* We don't have a mapping for this enum; return default. */
200 break;
201 }
202
203 return def;
204}
205
206/**
Marius Vlad1accffe2019-11-01 12:00:09 +0200207 * Get the current range values of a KMS property
208 *
209 * Given a drmModeObjectGetProperties return, as well as the drm_property_info
210 * for the target property, return the current range values of that property,
211 *
212 * If the property is not present, or there's no it is not a prop range then
213 * NULL will be returned.
214 *
215 * @param info Internal structure for property to look up
216 * @param props Raw KMS properties for the target object
217 */
218uint64_t *
219drm_property_get_range_values(struct drm_property_info *info,
220 const drmModeObjectProperties *props)
221{
222 unsigned int i;
223
224 if (info->prop_id == 0)
225 return NULL;
226
227 for (i = 0; i < props->count_props; i++) {
228
229 if (props->props[i] != info->prop_id)
230 continue;
231
232 if (!(info->flags & DRM_MODE_PROP_RANGE) &&
233 !(info->flags & DRM_MODE_PROP_SIGNED_RANGE))
234 continue;
235
236 return info->range_values;
237 }
238
239 return NULL;
240}
241
242/**
Daniel Stone4c2fc702019-06-18 11:12:07 +0100243 * Cache DRM property values
244 *
245 * Update a per-object array of drm_property_info structures, given the
246 * DRM properties of the object.
247 *
248 * Call this every time an object newly appears (note that only connectors
249 * can be hotplugged), the first time it is seen, or when its status changes
250 * in a way which invalidates the potential property values (currently, the
251 * only case for this is connector hotplug).
252 *
253 * This updates the property IDs and enum values within the drm_property_info
254 * array.
255 *
256 * DRM property enum values are dynamic at runtime; the user must query the
257 * property to find out the desired runtime value for a requested string
258 * name. Using the 'type' field on planes as an example, there is no single
259 * hardcoded constant for primary plane types; instead, the property must be
260 * queried at runtime to find the value associated with the string "Primary".
261 *
262 * This helper queries and caches the enum values, to allow us to use a set
263 * of compile-time-constant enums portably across various implementations.
264 * The values given in enum_names are searched for, and stored in the
265 * same-indexed field of the map array.
266 *
267 * @param b DRM backend object
268 * @param src DRM property info array to source from
269 * @param info DRM property info array to copy into
270 * @param num_infos Number of entries in the source array
271 * @param props DRM object properties for the object
272 */
273void
274drm_property_info_populate(struct drm_backend *b,
275 const struct drm_property_info *src,
276 struct drm_property_info *info,
277 unsigned int num_infos,
278 drmModeObjectProperties *props)
279{
280 drmModePropertyRes *prop;
281 unsigned i, j;
282
283 for (i = 0; i < num_infos; i++) {
284 unsigned int j;
285
286 info[i].name = src[i].name;
287 info[i].prop_id = 0;
288 info[i].num_enum_values = src[i].num_enum_values;
289
290 if (src[i].num_enum_values == 0)
291 continue;
292
293 info[i].enum_values =
294 malloc(src[i].num_enum_values *
295 sizeof(*info[i].enum_values));
296 assert(info[i].enum_values);
297 for (j = 0; j < info[i].num_enum_values; j++) {
298 info[i].enum_values[j].name = src[i].enum_values[j].name;
299 info[i].enum_values[j].valid = false;
300 }
301 }
302
303 for (i = 0; i < props->count_props; i++) {
304 unsigned int k;
305
306 prop = drmModeGetProperty(b->drm.fd, props->props[i]);
307 if (!prop)
308 continue;
309
310 for (j = 0; j < num_infos; j++) {
311 if (!strcmp(prop->name, info[j].name))
312 break;
313 }
314
315 /* We don't know/care about this property. */
316 if (j == num_infos) {
317#ifdef DEBUG
318 weston_log("DRM debug: unrecognized property %u '%s'\n",
319 prop->prop_id, prop->name);
320#endif
321 drmModeFreeProperty(prop);
322 continue;
323 }
324
325 if (info[j].num_enum_values == 0 &&
326 (prop->flags & DRM_MODE_PROP_ENUM)) {
327 weston_log("DRM: expected property %s to not be an"
328 " enum, but it is; ignoring\n", prop->name);
329 drmModeFreeProperty(prop);
330 continue;
331 }
332
333 info[j].prop_id = props->props[i];
Marius Vlad1accffe2019-11-01 12:00:09 +0200334 info[j].flags = prop->flags;
335
336 if (prop->flags & DRM_MODE_PROP_RANGE ||
337 prop->flags & DRM_MODE_PROP_SIGNED_RANGE) {
338 info[j].num_range_values = prop->count_values;
339 for (int i = 0; i < prop->count_values; i++)
340 info[j].range_values[i] = prop->values[i];
341 }
342
Daniel Stone4c2fc702019-06-18 11:12:07 +0100343
344 if (info[j].num_enum_values == 0) {
345 drmModeFreeProperty(prop);
346 continue;
347 }
348
349 if (!(prop->flags & DRM_MODE_PROP_ENUM)) {
350 weston_log("DRM: expected property %s to be an enum,"
351 " but it is not; ignoring\n", prop->name);
352 drmModeFreeProperty(prop);
353 info[j].prop_id = 0;
354 continue;
355 }
356
357 for (k = 0; k < info[j].num_enum_values; k++) {
358 int l;
359
360 for (l = 0; l < prop->count_enums; l++) {
361 if (!strcmp(prop->enums[l].name,
362 info[j].enum_values[k].name))
363 break;
364 }
365
366 if (l == prop->count_enums)
367 continue;
368
369 info[j].enum_values[k].valid = true;
370 info[j].enum_values[k].value = prop->enums[l].value;
371 }
372
373 drmModeFreeProperty(prop);
374 }
375
376#ifdef DEBUG
377 for (i = 0; i < num_infos; i++) {
378 if (info[i].prop_id == 0)
379 weston_log("DRM warning: property '%s' missing\n",
380 info[i].name);
381 }
382#endif
383}
384
385/**
386 * Free DRM property information
387 *
388 * Frees all memory associated with a DRM property info array and zeroes
389 * it out, leaving it usable for a further drm_property_info_update() or
390 * drm_property_info_free().
391 *
392 * @param info DRM property info array
393 * @param num_props Number of entries in array to free
394 */
395void
396drm_property_info_free(struct drm_property_info *info, int num_props)
397{
398 int i;
399
400 for (i = 0; i < num_props; i++)
401 free(info[i].enum_values);
402
403 memset(info, 0, sizeof(*info) * num_props);
404}
405
Daniel Stone4c2fc702019-06-18 11:12:07 +0100406static inline uint32_t *
407formats_ptr(struct drm_format_modifier_blob *blob)
408{
409 return (uint32_t *)(((char *)blob) + blob->formats_offset);
410}
411
412static inline struct drm_format_modifier *
413modifiers_ptr(struct drm_format_modifier_blob *blob)
414{
415 return (struct drm_format_modifier *)
416 (((char *)blob) + blob->modifiers_offset);
417}
Daniel Stone4c2fc702019-06-18 11:12:07 +0100418
419/**
420 * Populates the plane's formats array, using either the IN_FORMATS blob
421 * property (if available), or the plane's format list if not.
422 */
423int
424drm_plane_populate_formats(struct drm_plane *plane, const drmModePlane *kplane,
425 const drmModeObjectProperties *props)
426{
427 unsigned i;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100428 drmModePropertyBlobRes *blob;
429 struct drm_format_modifier_blob *fmt_mod_blob;
430 struct drm_format_modifier *blob_modifiers;
431 uint32_t *blob_formats;
432 uint32_t blob_id;
433
434 blob_id = drm_property_get_value(&plane->props[WDRM_PLANE_IN_FORMATS],
435 props,
436 0);
437 if (blob_id == 0)
438 goto fallback;
439
440 blob = drmModeGetPropertyBlob(plane->backend->drm.fd, blob_id);
441 if (!blob)
442 goto fallback;
443
444 fmt_mod_blob = blob->data;
445 blob_formats = formats_ptr(fmt_mod_blob);
446 blob_modifiers = modifiers_ptr(fmt_mod_blob);
447
448 if (plane->count_formats != fmt_mod_blob->count_formats) {
449 weston_log("DRM backend: format count differs between "
450 "plane (%d) and IN_FORMATS (%d)\n",
451 plane->count_formats,
452 fmt_mod_blob->count_formats);
453 weston_log("This represents a kernel bug; Weston is "
454 "unable to continue.\n");
455 abort();
456 }
457
458 for (i = 0; i < fmt_mod_blob->count_formats; i++) {
459 uint32_t count_modifiers = 0;
460 uint64_t *modifiers = NULL;
461 unsigned j;
462
463 for (j = 0; j < fmt_mod_blob->count_modifiers; j++) {
464 struct drm_format_modifier *mod = &blob_modifiers[j];
465
466 if ((i < mod->offset) || (i > mod->offset + 63))
467 continue;
468 if (!(mod->formats & (1 << (i - mod->offset))))
469 continue;
470
471 modifiers = realloc(modifiers,
472 (count_modifiers + 1) *
473 sizeof(modifiers[0]));
474 assert(modifiers);
475 modifiers[count_modifiers++] = mod->modifier;
476 }
477
Tomohito Esaki29beeaf2019-06-24 17:23:44 +0900478 if (count_modifiers == 0) {
479 modifiers = malloc(sizeof(*modifiers));
480 *modifiers = DRM_FORMAT_MOD_LINEAR;
481 count_modifiers = 1;
482 }
483
Daniel Stone4c2fc702019-06-18 11:12:07 +0100484 plane->formats[i].format = blob_formats[i];
485 plane->formats[i].modifiers = modifiers;
486 plane->formats[i].count_modifiers = count_modifiers;
487 }
488
489 drmModeFreePropertyBlob(blob);
490
491 return 0;
492
493fallback:
Daniel Stone4c2fc702019-06-18 11:12:07 +0100494 /* No IN_FORMATS blob available, so just use the old. */
495 assert(plane->count_formats == kplane->count_formats);
Tomohito Esaki29beeaf2019-06-24 17:23:44 +0900496 for (i = 0; i < kplane->count_formats; i++) {
Daniel Stone4c2fc702019-06-18 11:12:07 +0100497 plane->formats[i].format = kplane->formats[i];
Tomohito Esaki29beeaf2019-06-24 17:23:44 +0900498 plane->formats[i].modifiers = malloc(sizeof(uint64_t));
499 plane->formats[i].modifiers[0] = DRM_FORMAT_MOD_LINEAR;
500 plane->formats[i].count_modifiers = 1;
501 }
Daniel Stone4c2fc702019-06-18 11:12:07 +0100502
503 return 0;
504}
505
506void
507drm_output_set_gamma(struct weston_output *output_base,
508 uint16_t size, uint16_t *r, uint16_t *g, uint16_t *b)
509{
510 int rc;
511 struct drm_output *output = to_drm_output(output_base);
512 struct drm_backend *backend =
513 to_drm_backend(output->base.compositor);
514
515 /* check */
516 if (output_base->gamma_size != size)
517 return;
518
519 rc = drmModeCrtcSetGamma(backend->drm.fd,
520 output->crtc_id,
521 size, r, g, b);
522 if (rc)
523 weston_log("set gamma failed: %s\n", strerror(errno));
524}
525
526/**
527 * Mark an output state as current on the output, i.e. it has been
528 * submitted to the kernel. The mode argument determines whether this
529 * update will be applied synchronously (e.g. when calling drmModeSetCrtc),
530 * or asynchronously (in which case we wait for events to complete).
531 */
532static void
533drm_output_assign_state(struct drm_output_state *state,
534 enum drm_state_apply_mode mode)
535{
536 struct drm_output *output = state->output;
537 struct drm_backend *b = to_drm_backend(output->base.compositor);
538 struct drm_plane_state *plane_state;
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530539 struct drm_head *head;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100540
541 assert(!output->state_last);
542
543 if (mode == DRM_STATE_APPLY_ASYNC)
544 output->state_last = output->state_cur;
545 else
546 drm_output_state_free(output->state_cur);
547
548 wl_list_remove(&state->link);
549 wl_list_init(&state->link);
550 state->pending_state = NULL;
551
552 output->state_cur = state;
553
554 if (b->atomic_modeset && mode == DRM_STATE_APPLY_ASYNC) {
555 drm_debug(b, "\t[CRTC:%u] setting pending flip\n", output->crtc_id);
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +0100556 output->atomic_complete_pending = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100557 }
558
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530559 if (b->atomic_modeset &&
560 state->protection == WESTON_HDCP_DISABLE)
561 wl_list_for_each(head, &output->base.head_list, base.output_link)
562 weston_head_set_content_protection_status(&head->base,
563 WESTON_HDCP_DISABLE);
564
Daniel Stone4c2fc702019-06-18 11:12:07 +0100565 /* Replace state_cur on each affected plane with the new state, being
566 * careful to dispose of orphaned (but only orphaned) previous state.
567 * If the previous state is not orphaned (still has an output_state
568 * attached), it will be disposed of by freeing the output_state. */
569 wl_list_for_each(plane_state, &state->plane_list, link) {
570 struct drm_plane *plane = plane_state->plane;
571
572 if (plane->state_cur && !plane->state_cur->output_state)
573 drm_plane_state_free(plane->state_cur, true);
574 plane->state_cur = plane_state;
575
576 if (mode != DRM_STATE_APPLY_ASYNC) {
577 plane_state->complete = true;
578 continue;
579 }
580
581 if (b->atomic_modeset)
582 continue;
583
584 assert(plane->type != WDRM_PLANE_TYPE_OVERLAY);
585 if (plane->type == WDRM_PLANE_TYPE_PRIMARY)
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +0100586 output->page_flip_pending = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100587 }
588}
589
590static void
591drm_output_set_cursor(struct drm_output_state *output_state)
592{
593 struct drm_output *output = output_state->output;
594 struct drm_backend *b = to_drm_backend(output->base.compositor);
595 struct drm_plane *plane = output->cursor_plane;
596 struct drm_plane_state *state;
Stefan Agner974390a2019-07-08 00:42:05 +0200597 uint32_t handle;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100598
599 if (!plane)
600 return;
601
602 state = drm_output_state_get_existing_plane(output_state, plane);
603 if (!state)
604 return;
605
606 if (!state->fb) {
607 pixman_region32_fini(&plane->base.damage);
608 pixman_region32_init(&plane->base.damage);
609 drmModeSetCursor(b->drm.fd, output->crtc_id, 0, 0, 0);
610 return;
611 }
612
613 assert(state->fb == output->gbm_cursor_fb[output->current_cursor]);
614 assert(!plane->state_cur->output || plane->state_cur->output == output);
615
Stefan Agner974390a2019-07-08 00:42:05 +0200616 handle = output->gbm_cursor_handle[output->current_cursor];
Daniel Stone4c2fc702019-06-18 11:12:07 +0100617 if (plane->state_cur->fb != state->fb) {
Daniel Stone4c2fc702019-06-18 11:12:07 +0100618 if (drmModeSetCursor(b->drm.fd, output->crtc_id, handle,
619 b->cursor_width, b->cursor_height)) {
620 weston_log("failed to set cursor: %s\n",
621 strerror(errno));
622 goto err;
623 }
624 }
625
626 pixman_region32_fini(&plane->base.damage);
627 pixman_region32_init(&plane->base.damage);
628
629 if (drmModeMoveCursor(b->drm.fd, output->crtc_id,
630 state->dest_x, state->dest_y)) {
631 weston_log("failed to move cursor: %s\n", strerror(errno));
632 goto err;
633 }
634
635 return;
636
637err:
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +0100638 b->cursors_are_broken = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100639 drmModeSetCursor(b->drm.fd, output->crtc_id, 0, 0, 0);
640}
641
642static int
643drm_output_apply_state_legacy(struct drm_output_state *state)
644{
645 struct drm_output *output = state->output;
646 struct drm_backend *backend = to_drm_backend(output->base.compositor);
647 struct drm_plane *scanout_plane = output->scanout_plane;
648 struct drm_property_info *dpms_prop;
649 struct drm_plane_state *scanout_state;
650 struct drm_mode *mode;
651 struct drm_head *head;
652 const struct pixel_format_info *pinfo = NULL;
653 uint32_t connectors[MAX_CLONED_CONNECTORS];
654 int n_conn = 0;
655 struct timespec now;
656 int ret = 0;
657
658 wl_list_for_each(head, &output->base.head_list, base.output_link) {
659 assert(n_conn < MAX_CLONED_CONNECTORS);
660 connectors[n_conn++] = head->connector_id;
661 }
662
663 /* If disable_planes is set then assign_planes() wasn't
664 * called for this render, so we could still have a stale
665 * cursor plane set up.
666 */
667 if (output->base.disable_planes) {
668 output->cursor_view = NULL;
669 if (output->cursor_plane) {
670 output->cursor_plane->base.x = INT32_MIN;
671 output->cursor_plane->base.y = INT32_MIN;
672 }
673 }
674
675 if (state->dpms != WESTON_DPMS_ON) {
676 if (output->cursor_plane) {
677 ret = drmModeSetCursor(backend->drm.fd, output->crtc_id,
678 0, 0, 0);
679 if (ret)
680 weston_log("drmModeSetCursor failed disable: %s\n",
681 strerror(errno));
682 }
683
684 ret = drmModeSetCrtc(backend->drm.fd, output->crtc_id, 0, 0, 0,
685 NULL, 0, NULL);
686 if (ret)
687 weston_log("drmModeSetCrtc failed disabling: %s\n",
688 strerror(errno));
689
690 drm_output_assign_state(state, DRM_STATE_APPLY_SYNC);
691 weston_compositor_read_presentation_clock(output->base.compositor, &now);
692 drm_output_update_complete(output,
693 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION,
694 now.tv_sec, now.tv_nsec / 1000);
695
696 return 0;
697 }
698
699 scanout_state =
700 drm_output_state_get_existing_plane(state, scanout_plane);
701
702 /* The legacy SetCrtc API doesn't allow us to do scaling, and the
703 * legacy PageFlip API doesn't allow us to do clipping either. */
704 assert(scanout_state->src_x == 0);
705 assert(scanout_state->src_y == 0);
706 assert(scanout_state->src_w ==
707 (unsigned) (output->base.current_mode->width << 16));
708 assert(scanout_state->src_h ==
709 (unsigned) (output->base.current_mode->height << 16));
710 assert(scanout_state->dest_x == 0);
711 assert(scanout_state->dest_y == 0);
712 assert(scanout_state->dest_w == scanout_state->src_w >> 16);
713 assert(scanout_state->dest_h == scanout_state->src_h >> 16);
714 /* The legacy SetCrtc API doesn't support fences */
715 assert(scanout_state->in_fence_fd == -1);
716
717 mode = to_drm_mode(output->base.current_mode);
718 if (backend->state_invalid ||
719 !scanout_plane->state_cur->fb ||
720 scanout_plane->state_cur->fb->strides[0] !=
721 scanout_state->fb->strides[0]) {
722
723 ret = drmModeSetCrtc(backend->drm.fd, output->crtc_id,
724 scanout_state->fb->fb_id,
725 0, 0,
726 connectors, n_conn,
727 &mode->mode_info);
728 if (ret) {
729 weston_log("set mode failed: %s\n", strerror(errno));
730 goto err;
731 }
732 }
733
734 pinfo = scanout_state->fb->format;
735 drm_debug(backend, "\t[CRTC:%u, PLANE:%u] FORMAT: %s\n",
736 output->crtc_id, scanout_state->plane->plane_id,
737 pinfo ? pinfo->drm_format_name : "UNKNOWN");
738
739 if (drmModePageFlip(backend->drm.fd, output->crtc_id,
740 scanout_state->fb->fb_id,
741 DRM_MODE_PAGE_FLIP_EVENT, output) < 0) {
742 weston_log("queueing pageflip failed: %s\n", strerror(errno));
743 goto err;
744 }
745
746 assert(!output->page_flip_pending);
747
748 if (output->pageflip_timer)
749 wl_event_source_timer_update(output->pageflip_timer,
750 backend->pageflip_timeout);
751
752 drm_output_set_cursor(state);
753
754 if (state->dpms != output->state_cur->dpms) {
755 wl_list_for_each(head, &output->base.head_list, base.output_link) {
756 dpms_prop = &head->props_conn[WDRM_CONNECTOR_DPMS];
757 if (dpms_prop->prop_id == 0)
758 continue;
759
760 ret = drmModeConnectorSetProperty(backend->drm.fd,
761 head->connector_id,
762 dpms_prop->prop_id,
763 state->dpms);
764 if (ret) {
765 weston_log("DRM: DPMS: failed property set for %s\n",
766 head->base.name);
767 }
768 }
769 }
770
771 drm_output_assign_state(state, DRM_STATE_APPLY_ASYNC);
772
773 return 0;
774
775err:
776 output->cursor_view = NULL;
777 drm_output_state_free(state);
778 return -1;
779}
780
Daniel Stone4c2fc702019-06-18 11:12:07 +0100781static int
782crtc_add_prop(drmModeAtomicReq *req, struct drm_output *output,
783 enum wdrm_crtc_property prop, uint64_t val)
784{
785 struct drm_property_info *info = &output->props_crtc[prop];
786 int ret;
787
788 if (info->prop_id == 0)
789 return -1;
790
791 ret = drmModeAtomicAddProperty(req, output->crtc_id, info->prop_id,
792 val);
793 drm_debug(output->backend, "\t\t\t[CRTC:%lu] %lu (%s) -> %llu (0x%llx)\n",
794 (unsigned long) output->crtc_id,
795 (unsigned long) info->prop_id, info->name,
796 (unsigned long long) val, (unsigned long long) val);
797 return (ret <= 0) ? -1 : 0;
798}
799
800static int
801connector_add_prop(drmModeAtomicReq *req, struct drm_head *head,
802 enum wdrm_connector_property prop, uint64_t val)
803{
804 struct drm_property_info *info = &head->props_conn[prop];
805 int ret;
806
807 if (info->prop_id == 0)
808 return -1;
809
810 ret = drmModeAtomicAddProperty(req, head->connector_id,
811 info->prop_id, val);
812 drm_debug(head->backend, "\t\t\t[CONN:%lu] %lu (%s) -> %llu (0x%llx)\n",
813 (unsigned long) head->connector_id,
814 (unsigned long) info->prop_id, info->name,
815 (unsigned long long) val, (unsigned long long) val);
816 return (ret <= 0) ? -1 : 0;
817}
818
819static int
820plane_add_prop(drmModeAtomicReq *req, struct drm_plane *plane,
821 enum wdrm_plane_property prop, uint64_t val)
822{
823 struct drm_property_info *info = &plane->props[prop];
824 int ret;
825
826 if (info->prop_id == 0)
827 return -1;
828
829 ret = drmModeAtomicAddProperty(req, plane->plane_id, info->prop_id,
830 val);
831 drm_debug(plane->backend, "\t\t\t[PLANE:%lu] %lu (%s) -> %llu (0x%llx)\n",
832 (unsigned long) plane->plane_id,
833 (unsigned long) info->prop_id, info->name,
834 (unsigned long long) val, (unsigned long long) val);
835 return (ret <= 0) ? -1 : 0;
836}
837
838
839static int
840plane_add_damage(drmModeAtomicReq *req, struct drm_backend *backend,
841 struct drm_plane_state *plane_state)
842{
843 struct drm_plane *plane = plane_state->plane;
844 struct drm_property_info *info =
845 &plane->props[WDRM_PLANE_FB_DAMAGE_CLIPS];
846 pixman_box32_t *rects;
847 uint32_t blob_id;
848 int n_rects;
849 int ret;
850
851 if (!pixman_region32_not_empty(&plane_state->damage))
852 return 0;
853
854 /*
855 * If a plane doesn't support fb damage blob property, kernel will
856 * perform full plane update.
857 */
858 if (info->prop_id == 0)
859 return 0;
860
861 rects = pixman_region32_rectangles(&plane_state->damage, &n_rects);
862
863 ret = drmModeCreatePropertyBlob(backend->drm.fd, rects,
864 sizeof(*rects) * n_rects, &blob_id);
865 if (ret != 0)
866 return ret;
867
868 ret = plane_add_prop(req, plane, WDRM_PLANE_FB_DAMAGE_CLIPS, blob_id);
869 if (ret != 0)
870 return ret;
871
872 return 0;
873}
874
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530875static bool
876drm_head_has_prop(struct drm_head *head,
877 enum wdrm_connector_property prop)
878{
879 if (head && head->props_conn[prop].prop_id != 0)
880 return true;
881
882 return false;
883}
884
885/*
886 * This function converts the protection requests from weston_hdcp_protection
887 * corresponding drm values. These values can be set in "Content Protection"
888 * & "HDCP Content Type" connector properties.
889 */
890static void
891get_drm_protection_from_weston(enum weston_hdcp_protection weston_protection,
892 enum wdrm_content_protection_state *drm_protection,
893 enum wdrm_hdcp_content_type *drm_cp_type)
894{
895
896 switch (weston_protection) {
897 case WESTON_HDCP_DISABLE:
898 *drm_protection = WDRM_CONTENT_PROTECTION_UNDESIRED;
899 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE0;
900 break;
901 case WESTON_HDCP_ENABLE_TYPE_0:
902 *drm_protection = WDRM_CONTENT_PROTECTION_DESIRED;
903 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE0;
904 break;
905 case WESTON_HDCP_ENABLE_TYPE_1:
906 *drm_protection = WDRM_CONTENT_PROTECTION_DESIRED;
907 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE1;
908 break;
909 default:
910 assert(0 && "bad weston_hdcp_protection");
911 }
912}
913
914static void
915drm_head_set_hdcp_property(struct drm_head *head,
916 enum weston_hdcp_protection protection,
917 drmModeAtomicReq *req)
918{
919 int ret;
920 enum wdrm_content_protection_state drm_protection;
921 enum wdrm_hdcp_content_type drm_cp_type;
922 struct drm_property_enum_info *enum_info;
923 uint64_t prop_val;
924
925 get_drm_protection_from_weston(protection, &drm_protection,
926 &drm_cp_type);
927
928 if (!drm_head_has_prop(head, WDRM_CONNECTOR_CONTENT_PROTECTION))
929 return;
930
931 /*
932 * Content-type property is not exposed for platforms not supporting
933 * HDCP2.2, therefore, type-1 cannot be supported. The type-0 content
934 * still can be supported if the content-protection property is exposed.
935 */
936 if (!drm_head_has_prop(head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE) &&
937 drm_cp_type != WDRM_HDCP_CONTENT_TYPE0)
938 return;
939
940 enum_info = head->props_conn[WDRM_CONNECTOR_CONTENT_PROTECTION].enum_values;
941 prop_val = enum_info[drm_protection].value;
942 ret = connector_add_prop(req, head, WDRM_CONNECTOR_CONTENT_PROTECTION,
943 prop_val);
944 assert(ret == 0);
945
Ankit Nautiyalfc2c1802019-08-30 19:40:46 +0530946 if (!drm_head_has_prop(head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE))
947 return;
948
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530949 enum_info = head->props_conn[WDRM_CONNECTOR_HDCP_CONTENT_TYPE].enum_values;
950 prop_val = enum_info[drm_cp_type].value;
951 ret = connector_add_prop(req, head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE,
952 prop_val);
953 assert(ret == 0);
954}
955
Daniel Stone4c2fc702019-06-18 11:12:07 +0100956static int
957drm_output_apply_state_atomic(struct drm_output_state *state,
958 drmModeAtomicReq *req,
959 uint32_t *flags)
960{
961 struct drm_output *output = state->output;
962 struct drm_backend *b = to_drm_backend(output->base.compositor);
963 struct drm_plane_state *plane_state;
964 struct drm_mode *current_mode = to_drm_mode(output->base.current_mode);
965 struct drm_head *head;
966 int ret = 0;
967
968 drm_debug(b, "\t\t[atomic] %s output %lu (%s) state\n",
969 (*flags & DRM_MODE_ATOMIC_TEST_ONLY) ? "testing" : "applying",
970 (unsigned long) output->base.id, output->base.name);
971
972 if (state->dpms != output->state_cur->dpms) {
973 drm_debug(b, "\t\t\t[atomic] DPMS state differs, modeset OK\n");
974 *flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
975 }
976
977 if (state->dpms == WESTON_DPMS_ON) {
978 ret = drm_mode_ensure_blob(b, current_mode);
979 if (ret != 0)
980 return ret;
981
982 ret |= crtc_add_prop(req, output, WDRM_CRTC_MODE_ID,
983 current_mode->blob_id);
984 ret |= crtc_add_prop(req, output, WDRM_CRTC_ACTIVE, 1);
985
986 /* No need for the DPMS property, since it is implicit in
987 * routing and CRTC activity. */
988 wl_list_for_each(head, &output->base.head_list, base.output_link) {
989 ret |= connector_add_prop(req, head, WDRM_CONNECTOR_CRTC_ID,
990 output->crtc_id);
991 }
992 } else {
993 ret |= crtc_add_prop(req, output, WDRM_CRTC_MODE_ID, 0);
994 ret |= crtc_add_prop(req, output, WDRM_CRTC_ACTIVE, 0);
995
996 /* No need for the DPMS property, since it is implicit in
997 * routing and CRTC activity. */
998 wl_list_for_each(head, &output->base.head_list, base.output_link)
999 ret |= connector_add_prop(req, head, WDRM_CONNECTOR_CRTC_ID, 0);
1000 }
1001
Ankit Nautiyala344fe32019-05-14 18:36:08 +05301002 wl_list_for_each(head, &output->base.head_list, base.output_link)
1003 drm_head_set_hdcp_property(head, state->protection, req);
1004
Daniel Stone4c2fc702019-06-18 11:12:07 +01001005 if (ret != 0) {
1006 weston_log("couldn't set atomic CRTC/connector state\n");
1007 return ret;
1008 }
1009
1010 wl_list_for_each(plane_state, &state->plane_list, link) {
1011 struct drm_plane *plane = plane_state->plane;
1012 const struct pixel_format_info *pinfo = NULL;
1013
1014 ret |= plane_add_prop(req, plane, WDRM_PLANE_FB_ID,
1015 plane_state->fb ? plane_state->fb->fb_id : 0);
1016 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID,
1017 plane_state->fb ? output->crtc_id : 0);
1018 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_X,
1019 plane_state->src_x);
1020 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_Y,
1021 plane_state->src_y);
1022 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_W,
1023 plane_state->src_w);
1024 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_H,
1025 plane_state->src_h);
1026 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_X,
1027 plane_state->dest_x);
1028 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_Y,
1029 plane_state->dest_y);
1030 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_W,
1031 plane_state->dest_w);
1032 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_H,
1033 plane_state->dest_h);
1034 ret |= plane_add_damage(req, b, plane_state);
1035
1036 if (plane_state->fb && plane_state->fb->format)
1037 pinfo = plane_state->fb->format;
1038
1039 drm_debug(plane->backend, "\t\t\t[PLANE:%lu] FORMAT: %s\n",
1040 (unsigned long) plane->plane_id,
1041 pinfo ? pinfo->drm_format_name : "UNKNOWN");
1042
1043 if (plane_state->in_fence_fd >= 0) {
1044 ret |= plane_add_prop(req, plane,
1045 WDRM_PLANE_IN_FENCE_FD,
1046 plane_state->in_fence_fd);
1047 }
1048
Marius Vladcdd6fa22019-08-29 20:42:00 +03001049 /* do note, that 'invented' zpos values are set as immutable */
1050 if (plane_state->zpos != DRM_PLANE_ZPOS_INVALID_PLANE &&
1051 plane_state->plane->zpos_min != plane_state->plane->zpos_max)
1052 ret |= plane_add_prop(req, plane,
1053 WDRM_PLANE_ZPOS,
1054 plane_state->zpos);
1055
Daniel Stone4c2fc702019-06-18 11:12:07 +01001056 if (ret != 0) {
1057 weston_log("couldn't set plane state\n");
1058 return ret;
1059 }
1060 }
1061
1062 return 0;
1063}
1064
1065/**
1066 * Helper function used only by drm_pending_state_apply, with the same
1067 * guarantees and constraints as that function.
1068 */
1069static int
1070drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
1071 enum drm_state_apply_mode mode)
1072{
1073 struct drm_backend *b = pending_state->backend;
1074 struct drm_output_state *output_state, *tmp;
1075 struct drm_plane *plane;
1076 drmModeAtomicReq *req = drmModeAtomicAlloc();
1077 uint32_t flags;
1078 int ret = 0;
1079
1080 if (!req)
1081 return -1;
1082
1083 switch (mode) {
1084 case DRM_STATE_APPLY_SYNC:
1085 flags = 0;
1086 break;
1087 case DRM_STATE_APPLY_ASYNC:
1088 flags = DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_ATOMIC_NONBLOCK;
1089 break;
1090 case DRM_STATE_TEST_ONLY:
1091 flags = DRM_MODE_ATOMIC_TEST_ONLY;
1092 break;
1093 }
1094
1095 if (b->state_invalid) {
1096 struct weston_head *head_base;
1097 struct drm_head *head;
1098 uint32_t *unused;
1099 int err;
1100
1101 drm_debug(b, "\t\t[atomic] previous state invalid; "
1102 "starting with fresh state\n");
1103
1104 /* If we need to reset all our state (e.g. because we've
1105 * just started, or just been VT-switched in), explicitly
1106 * disable all the CRTCs and connectors we aren't using. */
1107 wl_list_for_each(head_base,
1108 &b->compositor->head_list, compositor_link) {
1109 struct drm_property_info *info;
1110
1111 if (weston_head_is_enabled(head_base))
1112 continue;
1113
1114 head = to_drm_head(head_base);
1115
1116 drm_debug(b, "\t\t[atomic] disabling inactive head %s\n",
1117 head_base->name);
1118
1119 info = &head->props_conn[WDRM_CONNECTOR_CRTC_ID];
1120 err = drmModeAtomicAddProperty(req, head->connector_id,
1121 info->prop_id, 0);
1122 drm_debug(b, "\t\t\t[CONN:%lu] %lu (%s) -> 0\n",
1123 (unsigned long) head->connector_id,
1124 (unsigned long) info->prop_id,
1125 info->name);
1126 if (err <= 0)
1127 ret = -1;
1128 }
1129
1130 wl_array_for_each(unused, &b->unused_crtcs) {
1131 struct drm_property_info infos[WDRM_CRTC__COUNT];
1132 struct drm_property_info *info;
1133 drmModeObjectProperties *props;
1134 uint64_t active;
1135
1136 memset(infos, 0, sizeof(infos));
1137
1138 /* We can't emit a disable on a CRTC that's already
1139 * off, as the kernel will refuse to generate an event
1140 * for an off->off state and fail the commit.
1141 */
1142 props = drmModeObjectGetProperties(b->drm.fd,
1143 *unused,
1144 DRM_MODE_OBJECT_CRTC);
1145 if (!props) {
1146 ret = -1;
1147 continue;
1148 }
1149
1150 drm_property_info_populate(b, crtc_props, infos,
1151 WDRM_CRTC__COUNT,
1152 props);
1153
1154 info = &infos[WDRM_CRTC_ACTIVE];
1155 active = drm_property_get_value(info, props, 0);
1156 drmModeFreeObjectProperties(props);
1157 if (active == 0) {
1158 drm_property_info_free(infos, WDRM_CRTC__COUNT);
1159 continue;
1160 }
1161
1162 drm_debug(b, "\t\t[atomic] disabling unused CRTC %lu\n",
1163 (unsigned long) *unused);
1164
1165 drm_debug(b, "\t\t\t[CRTC:%lu] %lu (%s) -> 0\n",
1166 (unsigned long) *unused,
1167 (unsigned long) info->prop_id, info->name);
1168 err = drmModeAtomicAddProperty(req, *unused,
1169 info->prop_id, 0);
1170 if (err <= 0)
1171 ret = -1;
1172
1173 info = &infos[WDRM_CRTC_MODE_ID];
1174 drm_debug(b, "\t\t\t[CRTC:%lu] %lu (%s) -> 0\n",
1175 (unsigned long) *unused,
1176 (unsigned long) info->prop_id, info->name);
1177 err = drmModeAtomicAddProperty(req, *unused,
1178 info->prop_id, 0);
1179 if (err <= 0)
1180 ret = -1;
1181
1182 drm_property_info_free(infos, WDRM_CRTC__COUNT);
1183 }
1184
1185 /* Disable all the planes; planes which are being used will
1186 * override this state in the output-state application. */
1187 wl_list_for_each(plane, &b->plane_list, link) {
1188 drm_debug(b, "\t\t[atomic] starting with plane %lu disabled\n",
1189 (unsigned long) plane->plane_id);
1190 plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID, 0);
1191 plane_add_prop(req, plane, WDRM_PLANE_FB_ID, 0);
1192 }
1193
1194 flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
1195 }
1196
1197 wl_list_for_each(output_state, &pending_state->output_list, link) {
1198 if (output_state->output->virtual)
1199 continue;
1200 if (mode == DRM_STATE_APPLY_SYNC)
1201 assert(output_state->dpms == WESTON_DPMS_OFF);
1202 ret |= drm_output_apply_state_atomic(output_state, req, &flags);
1203 }
1204
1205 if (ret != 0) {
1206 weston_log("atomic: couldn't compile atomic state\n");
1207 goto out;
1208 }
1209
1210 ret = drmModeAtomicCommit(b->drm.fd, req, flags, b);
1211 drm_debug(b, "[atomic] drmModeAtomicCommit\n");
1212
1213 /* Test commits do not take ownership of the state; return
1214 * without freeing here. */
1215 if (mode == DRM_STATE_TEST_ONLY) {
1216 drmModeAtomicFree(req);
1217 return ret;
1218 }
1219
1220 if (ret != 0) {
1221 weston_log("atomic: couldn't commit new state: %s\n",
1222 strerror(errno));
1223 goto out;
1224 }
1225
1226 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1227 link)
1228 drm_output_assign_state(output_state, mode);
1229
1230 b->state_invalid = false;
1231
1232 assert(wl_list_empty(&pending_state->output_list));
1233
1234out:
1235 drmModeAtomicFree(req);
1236 drm_pending_state_free(pending_state);
1237 return ret;
1238}
Daniel Stone4c2fc702019-06-18 11:12:07 +01001239
1240/**
1241 * Tests a pending state, to see if the kernel will accept the update as
1242 * constructed.
1243 *
1244 * Using atomic modesetting, the kernel performs the same checks as it would
1245 * on a real commit, returning success or failure without actually modifying
1246 * the running state. It does not return -EBUSY if there are pending updates
1247 * in flight, so states may be tested at any point, however this means a
1248 * state which passed testing may fail on a real commit if the timing is not
1249 * respected (e.g. committing before the previous commit has completed).
1250 *
1251 * Without atomic modesetting, we have no way to check, so we optimistically
1252 * claim it will work.
1253 *
1254 * Unlike drm_pending_state_apply() and drm_pending_state_apply_sync(), this
1255 * function does _not_ take ownership of pending_state, nor does it clear
1256 * state_invalid.
1257 */
1258int
1259drm_pending_state_test(struct drm_pending_state *pending_state)
1260{
Daniel Stone4c2fc702019-06-18 11:12:07 +01001261 struct drm_backend *b = pending_state->backend;
1262
1263 if (b->atomic_modeset)
1264 return drm_pending_state_apply_atomic(pending_state,
1265 DRM_STATE_TEST_ONLY);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001266
1267 /* We have no way to test state before application on the legacy
1268 * modesetting API, so just claim it succeeded. */
1269 return 0;
1270}
1271
1272/**
1273 * Applies all of a pending_state asynchronously: the primary entry point for
1274 * applying KMS state to a device. Updates the state for all outputs in the
1275 * pending_state, as well as disabling any unclaimed outputs.
1276 *
1277 * Unconditionally takes ownership of pending_state, and clears state_invalid.
1278 */
1279int
1280drm_pending_state_apply(struct drm_pending_state *pending_state)
1281{
1282 struct drm_backend *b = pending_state->backend;
1283 struct drm_output_state *output_state, *tmp;
1284 uint32_t *unused;
1285
Daniel Stone4c2fc702019-06-18 11:12:07 +01001286 if (b->atomic_modeset)
1287 return drm_pending_state_apply_atomic(pending_state,
1288 DRM_STATE_APPLY_ASYNC);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001289
1290 if (b->state_invalid) {
1291 /* If we need to reset all our state (e.g. because we've
1292 * just started, or just been VT-switched in), explicitly
1293 * disable all the CRTCs we aren't using. This also disables
1294 * all connectors on these CRTCs, so we don't need to do that
1295 * separately with the pre-atomic API. */
1296 wl_array_for_each(unused, &b->unused_crtcs)
1297 drmModeSetCrtc(b->drm.fd, *unused, 0, 0, 0, NULL, 0,
1298 NULL);
1299 }
1300
1301 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1302 link) {
1303 struct drm_output *output = output_state->output;
1304 int ret;
1305
1306 if (output->virtual) {
1307 drm_output_assign_state(output_state,
1308 DRM_STATE_APPLY_ASYNC);
1309 continue;
1310 }
1311
1312 ret = drm_output_apply_state_legacy(output_state);
1313 if (ret != 0) {
1314 weston_log("Couldn't apply state for output %s\n",
1315 output->base.name);
1316 }
1317 }
1318
1319 b->state_invalid = false;
1320
1321 assert(wl_list_empty(&pending_state->output_list));
1322
1323 drm_pending_state_free(pending_state);
1324
1325 return 0;
1326}
1327
1328/**
1329 * The synchronous version of drm_pending_state_apply. May only be used to
1330 * disable outputs. Does so synchronously: the request is guaranteed to have
1331 * completed on return, and the output will not be touched afterwards.
1332 *
1333 * Unconditionally takes ownership of pending_state, and clears state_invalid.
1334 */
1335int
1336drm_pending_state_apply_sync(struct drm_pending_state *pending_state)
1337{
1338 struct drm_backend *b = pending_state->backend;
1339 struct drm_output_state *output_state, *tmp;
1340 uint32_t *unused;
1341
Daniel Stone4c2fc702019-06-18 11:12:07 +01001342 if (b->atomic_modeset)
1343 return drm_pending_state_apply_atomic(pending_state,
1344 DRM_STATE_APPLY_SYNC);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001345
1346 if (b->state_invalid) {
1347 /* If we need to reset all our state (e.g. because we've
1348 * just started, or just been VT-switched in), explicitly
1349 * disable all the CRTCs we aren't using. This also disables
1350 * all connectors on these CRTCs, so we don't need to do that
1351 * separately with the pre-atomic API. */
1352 wl_array_for_each(unused, &b->unused_crtcs)
1353 drmModeSetCrtc(b->drm.fd, *unused, 0, 0, 0, NULL, 0,
1354 NULL);
1355 }
1356
1357 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1358 link) {
1359 int ret;
1360
1361 assert(output_state->dpms == WESTON_DPMS_OFF);
1362 ret = drm_output_apply_state_legacy(output_state);
1363 if (ret != 0) {
1364 weston_log("Couldn't apply state for output %s\n",
1365 output_state->output->base.name);
1366 }
1367 }
1368
1369 b->state_invalid = false;
1370
1371 assert(wl_list_empty(&pending_state->output_list));
1372
1373 drm_pending_state_free(pending_state);
1374
1375 return 0;
1376}
1377
1378void
1379drm_output_update_msc(struct drm_output *output, unsigned int seq)
1380{
1381 uint64_t msc_hi = output->base.msc >> 32;
1382
1383 if (seq < (output->base.msc & 0xffffffff))
1384 msc_hi++;
1385
1386 output->base.msc = (msc_hi << 32) + seq;
1387}
1388
1389static void
1390page_flip_handler(int fd, unsigned int frame,
1391 unsigned int sec, unsigned int usec, void *data)
1392{
1393 struct drm_output *output = data;
1394 struct drm_backend *b = to_drm_backend(output->base.compositor);
1395 uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC |
1396 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION |
1397 WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK;
1398
1399 drm_output_update_msc(output, frame);
1400
1401 assert(!b->atomic_modeset);
1402 assert(output->page_flip_pending);
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +01001403 output->page_flip_pending = false;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001404
1405 drm_output_update_complete(output, flags, sec, usec);
1406}
1407
Daniel Stone4c2fc702019-06-18 11:12:07 +01001408static void
1409atomic_flip_handler(int fd, unsigned int frame, unsigned int sec,
1410 unsigned int usec, unsigned int crtc_id, void *data)
1411{
1412 struct drm_backend *b = data;
1413 struct drm_output *output = drm_output_find_by_crtc(b, crtc_id);
1414 uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC |
1415 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION |
1416 WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK;
1417
1418 /* During the initial modeset, we can disable CRTCs which we don't
1419 * actually handle during normal operation; this will give us events
1420 * for unknown outputs. Ignore them. */
1421 if (!output || !output->base.enabled)
1422 return;
1423
1424 drm_output_update_msc(output, frame);
1425
1426 drm_debug(b, "[atomic][CRTC:%u] flip processing started\n", crtc_id);
1427 assert(b->atomic_modeset);
1428 assert(output->atomic_complete_pending);
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +01001429 output->atomic_complete_pending = false;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001430
1431 drm_output_update_complete(output, flags, sec, usec);
1432 drm_debug(b, "[atomic][CRTC:%u] flip processing completed\n", crtc_id);
1433}
Daniel Stone4c2fc702019-06-18 11:12:07 +01001434
1435int
1436on_drm_input(int fd, uint32_t mask, void *data)
1437{
Daniel Stone4c2fc702019-06-18 11:12:07 +01001438 struct drm_backend *b = data;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001439 drmEventContext evctx;
1440
1441 memset(&evctx, 0, sizeof evctx);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001442 evctx.version = 3;
1443 if (b->atomic_modeset)
1444 evctx.page_flip_handler2 = atomic_flip_handler;
1445 else
Daniel Stone4c2fc702019-06-18 11:12:07 +01001446 evctx.page_flip_handler = page_flip_handler;
1447 drmHandleEvent(fd, &evctx);
1448
1449 return 1;
1450}
1451
1452int
1453init_kms_caps(struct drm_backend *b)
1454{
1455 uint64_t cap;
1456 int ret;
1457 clockid_t clk_id;
1458
1459 weston_log("using %s\n", b->drm.filename);
1460
1461 ret = drmGetCap(b->drm.fd, DRM_CAP_TIMESTAMP_MONOTONIC, &cap);
1462 if (ret == 0 && cap == 1)
1463 clk_id = CLOCK_MONOTONIC;
1464 else
1465 clk_id = CLOCK_REALTIME;
1466
1467 if (weston_compositor_set_presentation_clock(b->compositor, clk_id) < 0) {
1468 weston_log("Error: failed to set presentation clock %d.\n",
1469 clk_id);
1470 return -1;
1471 }
1472
1473 ret = drmGetCap(b->drm.fd, DRM_CAP_CURSOR_WIDTH, &cap);
1474 if (ret == 0)
1475 b->cursor_width = cap;
1476 else
1477 b->cursor_width = 64;
1478
1479 ret = drmGetCap(b->drm.fd, DRM_CAP_CURSOR_HEIGHT, &cap);
1480 if (ret == 0)
1481 b->cursor_height = cap;
1482 else
1483 b->cursor_height = 64;
1484
1485 if (!getenv("WESTON_DISABLE_UNIVERSAL_PLANES")) {
1486 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
1487 b->universal_planes = (ret == 0);
1488 }
1489 weston_log("DRM: %s universal planes\n",
1490 b->universal_planes ? "supports" : "does not support");
1491
Daniel Stone4c2fc702019-06-18 11:12:07 +01001492 if (b->universal_planes && !getenv("WESTON_DISABLE_ATOMIC")) {
1493 ret = drmGetCap(b->drm.fd, DRM_CAP_CRTC_IN_VBLANK_EVENT, &cap);
1494 if (ret != 0)
1495 cap = 0;
1496 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_ATOMIC, 1);
1497 b->atomic_modeset = ((ret == 0) && (cap == 1));
1498 }
Daniel Stone4c2fc702019-06-18 11:12:07 +01001499 weston_log("DRM: %s atomic modesetting\n",
1500 b->atomic_modeset ? "supports" : "does not support");
1501
Daniel Stone4c2fc702019-06-18 11:12:07 +01001502 ret = drmGetCap(b->drm.fd, DRM_CAP_ADDFB2_MODIFIERS, &cap);
1503 if (ret == 0)
1504 b->fb_modifiers = cap;
1505 else
Daniel Stone4c2fc702019-06-18 11:12:07 +01001506 b->fb_modifiers = 0;
1507
1508 /*
1509 * KMS support for hardware planes cannot properly synchronize
1510 * without nuclear page flip. Without nuclear/atomic, hw plane
1511 * and cursor plane updates would either tear or cause extra
1512 * waits for vblanks which means dropping the compositor framerate
1513 * to a fraction. For cursors, it's not so bad, so they are
1514 * enabled.
1515 */
1516 if (!b->atomic_modeset || getenv("WESTON_FORCE_RENDERER"))
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +01001517 b->sprites_are_broken = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001518
1519 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_ASPECT_RATIO, 1);
1520 b->aspect_ratio_supported = (ret == 0);
1521 weston_log("DRM: %s picture aspect ratio\n",
1522 b->aspect_ratio_supported ? "supports" : "does not support");
1523
1524 return 0;
1525}