blob: c91e381034ea8e555796373bc2f9ead2c332358c [file] [log] [blame]
Daniel Stone4c2fc702019-06-18 11:12:07 +01001/*
2 * Copyright © 2008-2011 Kristian Høgsberg
3 * Copyright © 2011 Intel Corporation
4 * Copyright © 2017, 2018 Collabora, Ltd.
5 * Copyright © 2017, 2018 General Electric Company
6 * Copyright (c) 2018 DisplayLink (UK) Ltd.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sublicense, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial
18 * portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 * SOFTWARE.
28 */
29
30#include "config.h"
31
32#include <stdint.h>
33
34#include <xf86drm.h>
35#include <xf86drmMode.h>
36#include <drm_fourcc.h>
37
Daniel Stone4c2fc702019-06-18 11:12:07 +010038#include <libweston/libweston.h>
39#include <libweston/backend-drm.h>
40#include "shared/helpers.h"
41#include "drm-internal.h"
42#include "pixel-formats.h"
43#include "presentation-time-server-protocol.h"
44
Tomohito Esaki29beeaf2019-06-24 17:23:44 +090045#ifndef DRM_FORMAT_MOD_LINEAR
46#define DRM_FORMAT_MOD_LINEAR 0
47#endif
48
Daniel Stone4c2fc702019-06-18 11:12:07 +010049struct drm_property_enum_info plane_type_enums[] = {
50 [WDRM_PLANE_TYPE_PRIMARY] = {
51 .name = "Primary",
52 },
53 [WDRM_PLANE_TYPE_OVERLAY] = {
54 .name = "Overlay",
55 },
56 [WDRM_PLANE_TYPE_CURSOR] = {
57 .name = "Cursor",
58 },
59};
60
61const struct drm_property_info plane_props[] = {
62 [WDRM_PLANE_TYPE] = {
63 .name = "type",
64 .enum_values = plane_type_enums,
65 .num_enum_values = WDRM_PLANE_TYPE__COUNT,
66 },
67 [WDRM_PLANE_SRC_X] = { .name = "SRC_X", },
68 [WDRM_PLANE_SRC_Y] = { .name = "SRC_Y", },
69 [WDRM_PLANE_SRC_W] = { .name = "SRC_W", },
70 [WDRM_PLANE_SRC_H] = { .name = "SRC_H", },
71 [WDRM_PLANE_CRTC_X] = { .name = "CRTC_X", },
72 [WDRM_PLANE_CRTC_Y] = { .name = "CRTC_Y", },
73 [WDRM_PLANE_CRTC_W] = { .name = "CRTC_W", },
74 [WDRM_PLANE_CRTC_H] = { .name = "CRTC_H", },
75 [WDRM_PLANE_FB_ID] = { .name = "FB_ID", },
76 [WDRM_PLANE_CRTC_ID] = { .name = "CRTC_ID", },
77 [WDRM_PLANE_IN_FORMATS] = { .name = "IN_FORMATS" },
78 [WDRM_PLANE_IN_FENCE_FD] = { .name = "IN_FENCE_FD" },
79 [WDRM_PLANE_FB_DAMAGE_CLIPS] = { .name = "FB_DAMAGE_CLIPS" },
Marius Vladcdd6fa22019-08-29 20:42:00 +030080 [WDRM_PLANE_ZPOS] = { .name = "zpos" },
Daniel Stone4c2fc702019-06-18 11:12:07 +010081};
82
83struct drm_property_enum_info dpms_state_enums[] = {
84 [WDRM_DPMS_STATE_OFF] = {
85 .name = "Off",
86 },
87 [WDRM_DPMS_STATE_ON] = {
88 .name = "On",
89 },
90 [WDRM_DPMS_STATE_STANDBY] = {
91 .name = "Standby",
92 },
93 [WDRM_DPMS_STATE_SUSPEND] = {
94 .name = "Suspend",
95 },
96};
97
Ankit Nautiyala344fe32019-05-14 18:36:08 +053098struct drm_property_enum_info content_protection_enums[] = {
99 [WDRM_CONTENT_PROTECTION_UNDESIRED] = {
100 .name = "Undesired",
101 },
102 [WDRM_CONTENT_PROTECTION_DESIRED] = {
103 .name = "Desired",
104 },
105 [WDRM_CONTENT_PROTECTION_ENABLED] = {
106 .name = "Enabled",
107 },
108};
109
110struct drm_property_enum_info hdcp_content_type_enums[] = {
111 [WDRM_HDCP_CONTENT_TYPE0] = {
112 .name = "HDCP Type0",
113 },
114 [WDRM_HDCP_CONTENT_TYPE1] = {
115 .name = "HDCP Type1",
116 },
117};
118
Lucas Stach72e7a1e2019-11-25 23:31:57 +0000119struct drm_property_enum_info panel_orientation_enums[] = {
120 [WDRM_PANEL_ORIENTATION_NORMAL] = { .name = "Normal", },
121 [WDRM_PANEL_ORIENTATION_UPSIDE_DOWN] = { .name = "Upside Down", },
122 [WDRM_PANEL_ORIENTATION_LEFT_SIDE_UP] = { .name = "Left Side Up", },
123 [WDRM_PANEL_ORIENTATION_RIGHT_SIDE_UP] = { .name = "Right Side Up", },
124};
125
Daniel Stone4c2fc702019-06-18 11:12:07 +0100126const struct drm_property_info connector_props[] = {
127 [WDRM_CONNECTOR_EDID] = { .name = "EDID" },
128 [WDRM_CONNECTOR_DPMS] = {
129 .name = "DPMS",
130 .enum_values = dpms_state_enums,
131 .num_enum_values = WDRM_DPMS_STATE__COUNT,
132 },
133 [WDRM_CONNECTOR_CRTC_ID] = { .name = "CRTC_ID", },
134 [WDRM_CONNECTOR_NON_DESKTOP] = { .name = "non-desktop", },
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530135 [WDRM_CONNECTOR_CONTENT_PROTECTION] = {
136 .name = "Content Protection",
137 .enum_values = content_protection_enums,
138 .num_enum_values = WDRM_CONTENT_PROTECTION__COUNT,
139 },
140 [WDRM_CONNECTOR_HDCP_CONTENT_TYPE] = {
141 .name = "HDCP Content Type",
142 .enum_values = hdcp_content_type_enums,
143 .num_enum_values = WDRM_HDCP_CONTENT_TYPE__COUNT,
144 },
Lucas Stach72e7a1e2019-11-25 23:31:57 +0000145 [WDRM_CONNECTOR_PANEL_ORIENTATION] = {
146 .name = "panel orientation",
147 .enum_values = panel_orientation_enums,
148 .num_enum_values = WDRM_PANEL_ORIENTATION__COUNT,
149 },
Daniel Stone4c2fc702019-06-18 11:12:07 +0100150};
151
152const struct drm_property_info crtc_props[] = {
153 [WDRM_CRTC_MODE_ID] = { .name = "MODE_ID", },
154 [WDRM_CRTC_ACTIVE] = { .name = "ACTIVE", },
155};
156
157
158/**
159 * Mode for drm_pending_state_apply and co.
160 */
161enum drm_state_apply_mode {
162 DRM_STATE_APPLY_SYNC, /**< state fully processed */
163 DRM_STATE_APPLY_ASYNC, /**< state pending event delivery */
164 DRM_STATE_TEST_ONLY, /**< test if the state can be applied */
165};
166
167/**
168 * Get the current value of a KMS property
169 *
170 * Given a drmModeObjectGetProperties return, as well as the drm_property_info
171 * for the target property, return the current value of that property,
172 * with an optional default. If the property is a KMS enum type, the return
173 * value will be translated into the appropriate internal enum.
174 *
175 * If the property is not present, the default value will be returned.
176 *
177 * @param info Internal structure for property to look up
178 * @param props Raw KMS properties for the target object
179 * @param def Value to return if property is not found
180 */
181uint64_t
182drm_property_get_value(struct drm_property_info *info,
183 const drmModeObjectProperties *props,
184 uint64_t def)
185{
186 unsigned int i;
187
188 if (info->prop_id == 0)
189 return def;
190
191 for (i = 0; i < props->count_props; i++) {
192 unsigned int j;
193
194 if (props->props[i] != info->prop_id)
195 continue;
196
197 /* Simple (non-enum) types can return the value directly */
198 if (info->num_enum_values == 0)
199 return props->prop_values[i];
200
201 /* Map from raw value to enum value */
202 for (j = 0; j < info->num_enum_values; j++) {
203 if (!info->enum_values[j].valid)
204 continue;
205 if (info->enum_values[j].value != props->prop_values[i])
206 continue;
207
208 return j;
209 }
210
211 /* We don't have a mapping for this enum; return default. */
212 break;
213 }
214
215 return def;
216}
217
218/**
Marius Vlad1accffe2019-11-01 12:00:09 +0200219 * Get the current range values of a KMS property
220 *
221 * Given a drmModeObjectGetProperties return, as well as the drm_property_info
222 * for the target property, return the current range values of that property,
223 *
224 * If the property is not present, or there's no it is not a prop range then
225 * NULL will be returned.
226 *
227 * @param info Internal structure for property to look up
228 * @param props Raw KMS properties for the target object
229 */
230uint64_t *
231drm_property_get_range_values(struct drm_property_info *info,
232 const drmModeObjectProperties *props)
233{
234 unsigned int i;
235
236 if (info->prop_id == 0)
237 return NULL;
238
239 for (i = 0; i < props->count_props; i++) {
240
241 if (props->props[i] != info->prop_id)
242 continue;
243
244 if (!(info->flags & DRM_MODE_PROP_RANGE) &&
245 !(info->flags & DRM_MODE_PROP_SIGNED_RANGE))
246 continue;
247
248 return info->range_values;
249 }
250
251 return NULL;
252}
253
254/**
Daniel Stone4c2fc702019-06-18 11:12:07 +0100255 * Cache DRM property values
256 *
257 * Update a per-object array of drm_property_info structures, given the
258 * DRM properties of the object.
259 *
260 * Call this every time an object newly appears (note that only connectors
261 * can be hotplugged), the first time it is seen, or when its status changes
262 * in a way which invalidates the potential property values (currently, the
263 * only case for this is connector hotplug).
264 *
265 * This updates the property IDs and enum values within the drm_property_info
266 * array.
267 *
268 * DRM property enum values are dynamic at runtime; the user must query the
269 * property to find out the desired runtime value for a requested string
270 * name. Using the 'type' field on planes as an example, there is no single
271 * hardcoded constant for primary plane types; instead, the property must be
272 * queried at runtime to find the value associated with the string "Primary".
273 *
274 * This helper queries and caches the enum values, to allow us to use a set
275 * of compile-time-constant enums portably across various implementations.
276 * The values given in enum_names are searched for, and stored in the
277 * same-indexed field of the map array.
278 *
279 * @param b DRM backend object
280 * @param src DRM property info array to source from
281 * @param info DRM property info array to copy into
282 * @param num_infos Number of entries in the source array
283 * @param props DRM object properties for the object
284 */
285void
286drm_property_info_populate(struct drm_backend *b,
287 const struct drm_property_info *src,
288 struct drm_property_info *info,
289 unsigned int num_infos,
290 drmModeObjectProperties *props)
291{
292 drmModePropertyRes *prop;
293 unsigned i, j;
294
295 for (i = 0; i < num_infos; i++) {
296 unsigned int j;
297
298 info[i].name = src[i].name;
299 info[i].prop_id = 0;
300 info[i].num_enum_values = src[i].num_enum_values;
301
302 if (src[i].num_enum_values == 0)
303 continue;
304
305 info[i].enum_values =
306 malloc(src[i].num_enum_values *
307 sizeof(*info[i].enum_values));
308 assert(info[i].enum_values);
309 for (j = 0; j < info[i].num_enum_values; j++) {
310 info[i].enum_values[j].name = src[i].enum_values[j].name;
311 info[i].enum_values[j].valid = false;
312 }
313 }
314
315 for (i = 0; i < props->count_props; i++) {
316 unsigned int k;
317
318 prop = drmModeGetProperty(b->drm.fd, props->props[i]);
319 if (!prop)
320 continue;
321
322 for (j = 0; j < num_infos; j++) {
323 if (!strcmp(prop->name, info[j].name))
324 break;
325 }
326
327 /* We don't know/care about this property. */
328 if (j == num_infos) {
329#ifdef DEBUG
330 weston_log("DRM debug: unrecognized property %u '%s'\n",
331 prop->prop_id, prop->name);
332#endif
333 drmModeFreeProperty(prop);
334 continue;
335 }
336
337 if (info[j].num_enum_values == 0 &&
338 (prop->flags & DRM_MODE_PROP_ENUM)) {
339 weston_log("DRM: expected property %s to not be an"
340 " enum, but it is; ignoring\n", prop->name);
341 drmModeFreeProperty(prop);
342 continue;
343 }
344
345 info[j].prop_id = props->props[i];
Marius Vlad1accffe2019-11-01 12:00:09 +0200346 info[j].flags = prop->flags;
347
348 if (prop->flags & DRM_MODE_PROP_RANGE ||
349 prop->flags & DRM_MODE_PROP_SIGNED_RANGE) {
350 info[j].num_range_values = prop->count_values;
351 for (int i = 0; i < prop->count_values; i++)
352 info[j].range_values[i] = prop->values[i];
353 }
354
Daniel Stone4c2fc702019-06-18 11:12:07 +0100355
356 if (info[j].num_enum_values == 0) {
357 drmModeFreeProperty(prop);
358 continue;
359 }
360
361 if (!(prop->flags & DRM_MODE_PROP_ENUM)) {
362 weston_log("DRM: expected property %s to be an enum,"
363 " but it is not; ignoring\n", prop->name);
364 drmModeFreeProperty(prop);
365 info[j].prop_id = 0;
366 continue;
367 }
368
369 for (k = 0; k < info[j].num_enum_values; k++) {
370 int l;
371
372 for (l = 0; l < prop->count_enums; l++) {
373 if (!strcmp(prop->enums[l].name,
374 info[j].enum_values[k].name))
375 break;
376 }
377
378 if (l == prop->count_enums)
379 continue;
380
381 info[j].enum_values[k].valid = true;
382 info[j].enum_values[k].value = prop->enums[l].value;
383 }
384
385 drmModeFreeProperty(prop);
386 }
387
388#ifdef DEBUG
389 for (i = 0; i < num_infos; i++) {
390 if (info[i].prop_id == 0)
391 weston_log("DRM warning: property '%s' missing\n",
392 info[i].name);
393 }
394#endif
395}
396
397/**
398 * Free DRM property information
399 *
400 * Frees all memory associated with a DRM property info array and zeroes
401 * it out, leaving it usable for a further drm_property_info_update() or
402 * drm_property_info_free().
403 *
404 * @param info DRM property info array
405 * @param num_props Number of entries in array to free
406 */
407void
408drm_property_info_free(struct drm_property_info *info, int num_props)
409{
410 int i;
411
412 for (i = 0; i < num_props; i++)
413 free(info[i].enum_values);
414
415 memset(info, 0, sizeof(*info) * num_props);
416}
417
Daniel Stone4c2fc702019-06-18 11:12:07 +0100418static inline uint32_t *
419formats_ptr(struct drm_format_modifier_blob *blob)
420{
421 return (uint32_t *)(((char *)blob) + blob->formats_offset);
422}
423
424static inline struct drm_format_modifier *
425modifiers_ptr(struct drm_format_modifier_blob *blob)
426{
427 return (struct drm_format_modifier *)
428 (((char *)blob) + blob->modifiers_offset);
429}
Daniel Stone4c2fc702019-06-18 11:12:07 +0100430
431/**
432 * Populates the plane's formats array, using either the IN_FORMATS blob
433 * property (if available), or the plane's format list if not.
434 */
435int
436drm_plane_populate_formats(struct drm_plane *plane, const drmModePlane *kplane,
Stefan Agner465ab2c2020-06-17 23:36:44 +0200437 const drmModeObjectProperties *props,
438 const bool use_modifiers)
Daniel Stone4c2fc702019-06-18 11:12:07 +0100439{
440 unsigned i;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100441 drmModePropertyBlobRes *blob;
442 struct drm_format_modifier_blob *fmt_mod_blob;
443 struct drm_format_modifier *blob_modifiers;
444 uint32_t *blob_formats;
445 uint32_t blob_id;
446
Stefan Agner465ab2c2020-06-17 23:36:44 +0200447 if (!use_modifiers)
448 goto fallback;
449
Daniel Stone4c2fc702019-06-18 11:12:07 +0100450 blob_id = drm_property_get_value(&plane->props[WDRM_PLANE_IN_FORMATS],
451 props,
452 0);
453 if (blob_id == 0)
454 goto fallback;
455
456 blob = drmModeGetPropertyBlob(plane->backend->drm.fd, blob_id);
457 if (!blob)
458 goto fallback;
459
460 fmt_mod_blob = blob->data;
461 blob_formats = formats_ptr(fmt_mod_blob);
462 blob_modifiers = modifiers_ptr(fmt_mod_blob);
463
464 if (plane->count_formats != fmt_mod_blob->count_formats) {
465 weston_log("DRM backend: format count differs between "
466 "plane (%d) and IN_FORMATS (%d)\n",
467 plane->count_formats,
468 fmt_mod_blob->count_formats);
469 weston_log("This represents a kernel bug; Weston is "
470 "unable to continue.\n");
471 abort();
472 }
473
474 for (i = 0; i < fmt_mod_blob->count_formats; i++) {
475 uint32_t count_modifiers = 0;
476 uint64_t *modifiers = NULL;
477 unsigned j;
478
479 for (j = 0; j < fmt_mod_blob->count_modifiers; j++) {
480 struct drm_format_modifier *mod = &blob_modifiers[j];
481
482 if ((i < mod->offset) || (i > mod->offset + 63))
483 continue;
484 if (!(mod->formats & (1 << (i - mod->offset))))
485 continue;
486
487 modifiers = realloc(modifiers,
488 (count_modifiers + 1) *
489 sizeof(modifiers[0]));
490 assert(modifiers);
491 modifiers[count_modifiers++] = mod->modifier;
492 }
493
Tomohito Esaki29beeaf2019-06-24 17:23:44 +0900494 if (count_modifiers == 0) {
495 modifiers = malloc(sizeof(*modifiers));
496 *modifiers = DRM_FORMAT_MOD_LINEAR;
497 count_modifiers = 1;
498 }
499
Daniel Stone4c2fc702019-06-18 11:12:07 +0100500 plane->formats[i].format = blob_formats[i];
501 plane->formats[i].modifiers = modifiers;
502 plane->formats[i].count_modifiers = count_modifiers;
503 }
504
505 drmModeFreePropertyBlob(blob);
506
507 return 0;
508
509fallback:
Daniel Stone4c2fc702019-06-18 11:12:07 +0100510 /* No IN_FORMATS blob available, so just use the old. */
511 assert(plane->count_formats == kplane->count_formats);
Tomohito Esaki29beeaf2019-06-24 17:23:44 +0900512 for (i = 0; i < kplane->count_formats; i++) {
Daniel Stone4c2fc702019-06-18 11:12:07 +0100513 plane->formats[i].format = kplane->formats[i];
Tomohito Esaki29beeaf2019-06-24 17:23:44 +0900514 plane->formats[i].modifiers = malloc(sizeof(uint64_t));
515 plane->formats[i].modifiers[0] = DRM_FORMAT_MOD_LINEAR;
516 plane->formats[i].count_modifiers = 1;
517 }
Daniel Stone4c2fc702019-06-18 11:12:07 +0100518
519 return 0;
520}
521
522void
523drm_output_set_gamma(struct weston_output *output_base,
524 uint16_t size, uint16_t *r, uint16_t *g, uint16_t *b)
525{
526 int rc;
527 struct drm_output *output = to_drm_output(output_base);
528 struct drm_backend *backend =
529 to_drm_backend(output->base.compositor);
530
531 /* check */
532 if (output_base->gamma_size != size)
533 return;
534
535 rc = drmModeCrtcSetGamma(backend->drm.fd,
536 output->crtc_id,
537 size, r, g, b);
538 if (rc)
539 weston_log("set gamma failed: %s\n", strerror(errno));
540}
541
542/**
543 * Mark an output state as current on the output, i.e. it has been
544 * submitted to the kernel. The mode argument determines whether this
545 * update will be applied synchronously (e.g. when calling drmModeSetCrtc),
546 * or asynchronously (in which case we wait for events to complete).
547 */
548static void
549drm_output_assign_state(struct drm_output_state *state,
550 enum drm_state_apply_mode mode)
551{
552 struct drm_output *output = state->output;
553 struct drm_backend *b = to_drm_backend(output->base.compositor);
554 struct drm_plane_state *plane_state;
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530555 struct drm_head *head;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100556
557 assert(!output->state_last);
558
559 if (mode == DRM_STATE_APPLY_ASYNC)
560 output->state_last = output->state_cur;
561 else
562 drm_output_state_free(output->state_cur);
563
564 wl_list_remove(&state->link);
565 wl_list_init(&state->link);
566 state->pending_state = NULL;
567
568 output->state_cur = state;
569
570 if (b->atomic_modeset && mode == DRM_STATE_APPLY_ASYNC) {
571 drm_debug(b, "\t[CRTC:%u] setting pending flip\n", output->crtc_id);
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +0100572 output->atomic_complete_pending = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100573 }
574
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530575 if (b->atomic_modeset &&
576 state->protection == WESTON_HDCP_DISABLE)
577 wl_list_for_each(head, &output->base.head_list, base.output_link)
578 weston_head_set_content_protection_status(&head->base,
579 WESTON_HDCP_DISABLE);
580
Daniel Stone4c2fc702019-06-18 11:12:07 +0100581 /* Replace state_cur on each affected plane with the new state, being
582 * careful to dispose of orphaned (but only orphaned) previous state.
583 * If the previous state is not orphaned (still has an output_state
584 * attached), it will be disposed of by freeing the output_state. */
585 wl_list_for_each(plane_state, &state->plane_list, link) {
586 struct drm_plane *plane = plane_state->plane;
587
588 if (plane->state_cur && !plane->state_cur->output_state)
589 drm_plane_state_free(plane->state_cur, true);
590 plane->state_cur = plane_state;
591
592 if (mode != DRM_STATE_APPLY_ASYNC) {
593 plane_state->complete = true;
594 continue;
595 }
596
597 if (b->atomic_modeset)
598 continue;
599
600 assert(plane->type != WDRM_PLANE_TYPE_OVERLAY);
601 if (plane->type == WDRM_PLANE_TYPE_PRIMARY)
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +0100602 output->page_flip_pending = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100603 }
604}
605
606static void
607drm_output_set_cursor(struct drm_output_state *output_state)
608{
609 struct drm_output *output = output_state->output;
610 struct drm_backend *b = to_drm_backend(output->base.compositor);
611 struct drm_plane *plane = output->cursor_plane;
612 struct drm_plane_state *state;
Stefan Agner974390a2019-07-08 00:42:05 +0200613 uint32_t handle;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100614
615 if (!plane)
616 return;
617
618 state = drm_output_state_get_existing_plane(output_state, plane);
619 if (!state)
620 return;
621
622 if (!state->fb) {
623 pixman_region32_fini(&plane->base.damage);
624 pixman_region32_init(&plane->base.damage);
625 drmModeSetCursor(b->drm.fd, output->crtc_id, 0, 0, 0);
626 return;
627 }
628
629 assert(state->fb == output->gbm_cursor_fb[output->current_cursor]);
630 assert(!plane->state_cur->output || plane->state_cur->output == output);
631
Stefan Agner974390a2019-07-08 00:42:05 +0200632 handle = output->gbm_cursor_handle[output->current_cursor];
Daniel Stone4c2fc702019-06-18 11:12:07 +0100633 if (plane->state_cur->fb != state->fb) {
Daniel Stone4c2fc702019-06-18 11:12:07 +0100634 if (drmModeSetCursor(b->drm.fd, output->crtc_id, handle,
635 b->cursor_width, b->cursor_height)) {
636 weston_log("failed to set cursor: %s\n",
637 strerror(errno));
638 goto err;
639 }
640 }
641
642 pixman_region32_fini(&plane->base.damage);
643 pixman_region32_init(&plane->base.damage);
644
645 if (drmModeMoveCursor(b->drm.fd, output->crtc_id,
646 state->dest_x, state->dest_y)) {
647 weston_log("failed to move cursor: %s\n", strerror(errno));
648 goto err;
649 }
650
651 return;
652
653err:
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +0100654 b->cursors_are_broken = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100655 drmModeSetCursor(b->drm.fd, output->crtc_id, 0, 0, 0);
656}
657
658static int
659drm_output_apply_state_legacy(struct drm_output_state *state)
660{
661 struct drm_output *output = state->output;
662 struct drm_backend *backend = to_drm_backend(output->base.compositor);
663 struct drm_plane *scanout_plane = output->scanout_plane;
664 struct drm_property_info *dpms_prop;
665 struct drm_plane_state *scanout_state;
666 struct drm_mode *mode;
667 struct drm_head *head;
668 const struct pixel_format_info *pinfo = NULL;
669 uint32_t connectors[MAX_CLONED_CONNECTORS];
670 int n_conn = 0;
671 struct timespec now;
672 int ret = 0;
673
674 wl_list_for_each(head, &output->base.head_list, base.output_link) {
675 assert(n_conn < MAX_CLONED_CONNECTORS);
676 connectors[n_conn++] = head->connector_id;
677 }
678
679 /* If disable_planes is set then assign_planes() wasn't
680 * called for this render, so we could still have a stale
681 * cursor plane set up.
682 */
683 if (output->base.disable_planes) {
684 output->cursor_view = NULL;
685 if (output->cursor_plane) {
686 output->cursor_plane->base.x = INT32_MIN;
687 output->cursor_plane->base.y = INT32_MIN;
688 }
689 }
690
691 if (state->dpms != WESTON_DPMS_ON) {
692 if (output->cursor_plane) {
693 ret = drmModeSetCursor(backend->drm.fd, output->crtc_id,
694 0, 0, 0);
695 if (ret)
696 weston_log("drmModeSetCursor failed disable: %s\n",
697 strerror(errno));
698 }
699
700 ret = drmModeSetCrtc(backend->drm.fd, output->crtc_id, 0, 0, 0,
701 NULL, 0, NULL);
702 if (ret)
703 weston_log("drmModeSetCrtc failed disabling: %s\n",
704 strerror(errno));
705
706 drm_output_assign_state(state, DRM_STATE_APPLY_SYNC);
707 weston_compositor_read_presentation_clock(output->base.compositor, &now);
708 drm_output_update_complete(output,
709 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION,
710 now.tv_sec, now.tv_nsec / 1000);
711
712 return 0;
713 }
714
715 scanout_state =
716 drm_output_state_get_existing_plane(state, scanout_plane);
717
718 /* The legacy SetCrtc API doesn't allow us to do scaling, and the
719 * legacy PageFlip API doesn't allow us to do clipping either. */
720 assert(scanout_state->src_x == 0);
721 assert(scanout_state->src_y == 0);
722 assert(scanout_state->src_w ==
723 (unsigned) (output->base.current_mode->width << 16));
724 assert(scanout_state->src_h ==
725 (unsigned) (output->base.current_mode->height << 16));
726 assert(scanout_state->dest_x == 0);
727 assert(scanout_state->dest_y == 0);
728 assert(scanout_state->dest_w == scanout_state->src_w >> 16);
729 assert(scanout_state->dest_h == scanout_state->src_h >> 16);
730 /* The legacy SetCrtc API doesn't support fences */
731 assert(scanout_state->in_fence_fd == -1);
732
733 mode = to_drm_mode(output->base.current_mode);
734 if (backend->state_invalid ||
735 !scanout_plane->state_cur->fb ||
736 scanout_plane->state_cur->fb->strides[0] !=
737 scanout_state->fb->strides[0]) {
738
739 ret = drmModeSetCrtc(backend->drm.fd, output->crtc_id,
740 scanout_state->fb->fb_id,
741 0, 0,
742 connectors, n_conn,
743 &mode->mode_info);
744 if (ret) {
745 weston_log("set mode failed: %s\n", strerror(errno));
746 goto err;
747 }
748 }
749
750 pinfo = scanout_state->fb->format;
751 drm_debug(backend, "\t[CRTC:%u, PLANE:%u] FORMAT: %s\n",
752 output->crtc_id, scanout_state->plane->plane_id,
753 pinfo ? pinfo->drm_format_name : "UNKNOWN");
754
755 if (drmModePageFlip(backend->drm.fd, output->crtc_id,
756 scanout_state->fb->fb_id,
757 DRM_MODE_PAGE_FLIP_EVENT, output) < 0) {
758 weston_log("queueing pageflip failed: %s\n", strerror(errno));
759 goto err;
760 }
761
762 assert(!output->page_flip_pending);
763
764 if (output->pageflip_timer)
765 wl_event_source_timer_update(output->pageflip_timer,
766 backend->pageflip_timeout);
767
768 drm_output_set_cursor(state);
769
770 if (state->dpms != output->state_cur->dpms) {
771 wl_list_for_each(head, &output->base.head_list, base.output_link) {
772 dpms_prop = &head->props_conn[WDRM_CONNECTOR_DPMS];
773 if (dpms_prop->prop_id == 0)
774 continue;
775
776 ret = drmModeConnectorSetProperty(backend->drm.fd,
777 head->connector_id,
778 dpms_prop->prop_id,
779 state->dpms);
780 if (ret) {
781 weston_log("DRM: DPMS: failed property set for %s\n",
782 head->base.name);
783 }
784 }
785 }
786
787 drm_output_assign_state(state, DRM_STATE_APPLY_ASYNC);
788
789 return 0;
790
791err:
792 output->cursor_view = NULL;
793 drm_output_state_free(state);
794 return -1;
795}
796
Daniel Stone4c2fc702019-06-18 11:12:07 +0100797static int
798crtc_add_prop(drmModeAtomicReq *req, struct drm_output *output,
799 enum wdrm_crtc_property prop, uint64_t val)
800{
801 struct drm_property_info *info = &output->props_crtc[prop];
802 int ret;
803
804 if (info->prop_id == 0)
805 return -1;
806
807 ret = drmModeAtomicAddProperty(req, output->crtc_id, info->prop_id,
808 val);
809 drm_debug(output->backend, "\t\t\t[CRTC:%lu] %lu (%s) -> %llu (0x%llx)\n",
810 (unsigned long) output->crtc_id,
811 (unsigned long) info->prop_id, info->name,
812 (unsigned long long) val, (unsigned long long) val);
813 return (ret <= 0) ? -1 : 0;
814}
815
816static int
817connector_add_prop(drmModeAtomicReq *req, struct drm_head *head,
818 enum wdrm_connector_property prop, uint64_t val)
819{
820 struct drm_property_info *info = &head->props_conn[prop];
821 int ret;
822
823 if (info->prop_id == 0)
824 return -1;
825
826 ret = drmModeAtomicAddProperty(req, head->connector_id,
827 info->prop_id, val);
828 drm_debug(head->backend, "\t\t\t[CONN:%lu] %lu (%s) -> %llu (0x%llx)\n",
829 (unsigned long) head->connector_id,
830 (unsigned long) info->prop_id, info->name,
831 (unsigned long long) val, (unsigned long long) val);
832 return (ret <= 0) ? -1 : 0;
833}
834
835static int
836plane_add_prop(drmModeAtomicReq *req, struct drm_plane *plane,
837 enum wdrm_plane_property prop, uint64_t val)
838{
839 struct drm_property_info *info = &plane->props[prop];
840 int ret;
841
842 if (info->prop_id == 0)
843 return -1;
844
845 ret = drmModeAtomicAddProperty(req, plane->plane_id, info->prop_id,
846 val);
847 drm_debug(plane->backend, "\t\t\t[PLANE:%lu] %lu (%s) -> %llu (0x%llx)\n",
848 (unsigned long) plane->plane_id,
849 (unsigned long) info->prop_id, info->name,
850 (unsigned long long) val, (unsigned long long) val);
851 return (ret <= 0) ? -1 : 0;
852}
853
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530854static bool
855drm_head_has_prop(struct drm_head *head,
856 enum wdrm_connector_property prop)
857{
858 if (head && head->props_conn[prop].prop_id != 0)
859 return true;
860
861 return false;
862}
863
864/*
865 * This function converts the protection requests from weston_hdcp_protection
866 * corresponding drm values. These values can be set in "Content Protection"
867 * & "HDCP Content Type" connector properties.
868 */
869static void
870get_drm_protection_from_weston(enum weston_hdcp_protection weston_protection,
871 enum wdrm_content_protection_state *drm_protection,
872 enum wdrm_hdcp_content_type *drm_cp_type)
873{
874
875 switch (weston_protection) {
876 case WESTON_HDCP_DISABLE:
877 *drm_protection = WDRM_CONTENT_PROTECTION_UNDESIRED;
878 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE0;
879 break;
880 case WESTON_HDCP_ENABLE_TYPE_0:
881 *drm_protection = WDRM_CONTENT_PROTECTION_DESIRED;
882 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE0;
883 break;
884 case WESTON_HDCP_ENABLE_TYPE_1:
885 *drm_protection = WDRM_CONTENT_PROTECTION_DESIRED;
886 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE1;
887 break;
888 default:
889 assert(0 && "bad weston_hdcp_protection");
890 }
891}
892
893static void
894drm_head_set_hdcp_property(struct drm_head *head,
895 enum weston_hdcp_protection protection,
896 drmModeAtomicReq *req)
897{
898 int ret;
899 enum wdrm_content_protection_state drm_protection;
900 enum wdrm_hdcp_content_type drm_cp_type;
901 struct drm_property_enum_info *enum_info;
902 uint64_t prop_val;
903
904 get_drm_protection_from_weston(protection, &drm_protection,
905 &drm_cp_type);
906
907 if (!drm_head_has_prop(head, WDRM_CONNECTOR_CONTENT_PROTECTION))
908 return;
909
910 /*
911 * Content-type property is not exposed for platforms not supporting
912 * HDCP2.2, therefore, type-1 cannot be supported. The type-0 content
913 * still can be supported if the content-protection property is exposed.
914 */
915 if (!drm_head_has_prop(head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE) &&
916 drm_cp_type != WDRM_HDCP_CONTENT_TYPE0)
917 return;
918
919 enum_info = head->props_conn[WDRM_CONNECTOR_CONTENT_PROTECTION].enum_values;
920 prop_val = enum_info[drm_protection].value;
921 ret = connector_add_prop(req, head, WDRM_CONNECTOR_CONTENT_PROTECTION,
922 prop_val);
923 assert(ret == 0);
924
Ankit Nautiyalfc2c1802019-08-30 19:40:46 +0530925 if (!drm_head_has_prop(head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE))
926 return;
927
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530928 enum_info = head->props_conn[WDRM_CONNECTOR_HDCP_CONTENT_TYPE].enum_values;
929 prop_val = enum_info[drm_cp_type].value;
930 ret = connector_add_prop(req, head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE,
931 prop_val);
932 assert(ret == 0);
933}
934
Daniel Stone4c2fc702019-06-18 11:12:07 +0100935static int
936drm_output_apply_state_atomic(struct drm_output_state *state,
937 drmModeAtomicReq *req,
938 uint32_t *flags)
939{
940 struct drm_output *output = state->output;
941 struct drm_backend *b = to_drm_backend(output->base.compositor);
942 struct drm_plane_state *plane_state;
943 struct drm_mode *current_mode = to_drm_mode(output->base.current_mode);
944 struct drm_head *head;
945 int ret = 0;
946
947 drm_debug(b, "\t\t[atomic] %s output %lu (%s) state\n",
948 (*flags & DRM_MODE_ATOMIC_TEST_ONLY) ? "testing" : "applying",
949 (unsigned long) output->base.id, output->base.name);
950
951 if (state->dpms != output->state_cur->dpms) {
952 drm_debug(b, "\t\t\t[atomic] DPMS state differs, modeset OK\n");
953 *flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
954 }
955
956 if (state->dpms == WESTON_DPMS_ON) {
957 ret = drm_mode_ensure_blob(b, current_mode);
958 if (ret != 0)
959 return ret;
960
961 ret |= crtc_add_prop(req, output, WDRM_CRTC_MODE_ID,
962 current_mode->blob_id);
963 ret |= crtc_add_prop(req, output, WDRM_CRTC_ACTIVE, 1);
964
965 /* No need for the DPMS property, since it is implicit in
966 * routing and CRTC activity. */
967 wl_list_for_each(head, &output->base.head_list, base.output_link) {
968 ret |= connector_add_prop(req, head, WDRM_CONNECTOR_CRTC_ID,
969 output->crtc_id);
970 }
971 } else {
972 ret |= crtc_add_prop(req, output, WDRM_CRTC_MODE_ID, 0);
973 ret |= crtc_add_prop(req, output, WDRM_CRTC_ACTIVE, 0);
974
975 /* No need for the DPMS property, since it is implicit in
976 * routing and CRTC activity. */
977 wl_list_for_each(head, &output->base.head_list, base.output_link)
978 ret |= connector_add_prop(req, head, WDRM_CONNECTOR_CRTC_ID, 0);
979 }
980
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530981 wl_list_for_each(head, &output->base.head_list, base.output_link)
982 drm_head_set_hdcp_property(head, state->protection, req);
983
Daniel Stone4c2fc702019-06-18 11:12:07 +0100984 if (ret != 0) {
985 weston_log("couldn't set atomic CRTC/connector state\n");
986 return ret;
987 }
988
989 wl_list_for_each(plane_state, &state->plane_list, link) {
990 struct drm_plane *plane = plane_state->plane;
991 const struct pixel_format_info *pinfo = NULL;
992
993 ret |= plane_add_prop(req, plane, WDRM_PLANE_FB_ID,
994 plane_state->fb ? plane_state->fb->fb_id : 0);
995 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID,
996 plane_state->fb ? output->crtc_id : 0);
997 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_X,
998 plane_state->src_x);
999 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_Y,
1000 plane_state->src_y);
1001 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_W,
1002 plane_state->src_w);
1003 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_H,
1004 plane_state->src_h);
1005 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_X,
1006 plane_state->dest_x);
1007 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_Y,
1008 plane_state->dest_y);
1009 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_W,
1010 plane_state->dest_w);
1011 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_H,
1012 plane_state->dest_h);
Scott Anderson15c603c2020-06-02 17:39:43 +12001013 if (plane->props[WDRM_PLANE_FB_DAMAGE_CLIPS].prop_id != 0)
1014 ret |= plane_add_prop(req, plane, WDRM_PLANE_FB_DAMAGE_CLIPS,
1015 plane_state->damage_blob_id);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001016
1017 if (plane_state->fb && plane_state->fb->format)
1018 pinfo = plane_state->fb->format;
1019
1020 drm_debug(plane->backend, "\t\t\t[PLANE:%lu] FORMAT: %s\n",
1021 (unsigned long) plane->plane_id,
1022 pinfo ? pinfo->drm_format_name : "UNKNOWN");
1023
1024 if (plane_state->in_fence_fd >= 0) {
1025 ret |= plane_add_prop(req, plane,
1026 WDRM_PLANE_IN_FENCE_FD,
1027 plane_state->in_fence_fd);
1028 }
1029
Marius Vladcdd6fa22019-08-29 20:42:00 +03001030 /* do note, that 'invented' zpos values are set as immutable */
1031 if (plane_state->zpos != DRM_PLANE_ZPOS_INVALID_PLANE &&
1032 plane_state->plane->zpos_min != plane_state->plane->zpos_max)
1033 ret |= plane_add_prop(req, plane,
1034 WDRM_PLANE_ZPOS,
1035 plane_state->zpos);
1036
Daniel Stone4c2fc702019-06-18 11:12:07 +01001037 if (ret != 0) {
1038 weston_log("couldn't set plane state\n");
1039 return ret;
1040 }
1041 }
1042
1043 return 0;
1044}
1045
1046/**
1047 * Helper function used only by drm_pending_state_apply, with the same
1048 * guarantees and constraints as that function.
1049 */
1050static int
1051drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
1052 enum drm_state_apply_mode mode)
1053{
1054 struct drm_backend *b = pending_state->backend;
1055 struct drm_output_state *output_state, *tmp;
1056 struct drm_plane *plane;
1057 drmModeAtomicReq *req = drmModeAtomicAlloc();
1058 uint32_t flags;
1059 int ret = 0;
1060
1061 if (!req)
1062 return -1;
1063
1064 switch (mode) {
1065 case DRM_STATE_APPLY_SYNC:
1066 flags = 0;
1067 break;
1068 case DRM_STATE_APPLY_ASYNC:
1069 flags = DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_ATOMIC_NONBLOCK;
1070 break;
1071 case DRM_STATE_TEST_ONLY:
1072 flags = DRM_MODE_ATOMIC_TEST_ONLY;
1073 break;
1074 }
1075
1076 if (b->state_invalid) {
1077 struct weston_head *head_base;
1078 struct drm_head *head;
1079 uint32_t *unused;
1080 int err;
1081
1082 drm_debug(b, "\t\t[atomic] previous state invalid; "
1083 "starting with fresh state\n");
1084
1085 /* If we need to reset all our state (e.g. because we've
1086 * just started, or just been VT-switched in), explicitly
1087 * disable all the CRTCs and connectors we aren't using. */
1088 wl_list_for_each(head_base,
1089 &b->compositor->head_list, compositor_link) {
1090 struct drm_property_info *info;
1091
1092 if (weston_head_is_enabled(head_base))
1093 continue;
1094
1095 head = to_drm_head(head_base);
1096
1097 drm_debug(b, "\t\t[atomic] disabling inactive head %s\n",
1098 head_base->name);
1099
1100 info = &head->props_conn[WDRM_CONNECTOR_CRTC_ID];
1101 err = drmModeAtomicAddProperty(req, head->connector_id,
1102 info->prop_id, 0);
1103 drm_debug(b, "\t\t\t[CONN:%lu] %lu (%s) -> 0\n",
1104 (unsigned long) head->connector_id,
1105 (unsigned long) info->prop_id,
1106 info->name);
1107 if (err <= 0)
1108 ret = -1;
1109 }
1110
1111 wl_array_for_each(unused, &b->unused_crtcs) {
1112 struct drm_property_info infos[WDRM_CRTC__COUNT];
1113 struct drm_property_info *info;
1114 drmModeObjectProperties *props;
1115 uint64_t active;
1116
1117 memset(infos, 0, sizeof(infos));
1118
1119 /* We can't emit a disable on a CRTC that's already
1120 * off, as the kernel will refuse to generate an event
1121 * for an off->off state and fail the commit.
1122 */
1123 props = drmModeObjectGetProperties(b->drm.fd,
1124 *unused,
1125 DRM_MODE_OBJECT_CRTC);
1126 if (!props) {
1127 ret = -1;
1128 continue;
1129 }
1130
1131 drm_property_info_populate(b, crtc_props, infos,
1132 WDRM_CRTC__COUNT,
1133 props);
1134
1135 info = &infos[WDRM_CRTC_ACTIVE];
1136 active = drm_property_get_value(info, props, 0);
1137 drmModeFreeObjectProperties(props);
1138 if (active == 0) {
1139 drm_property_info_free(infos, WDRM_CRTC__COUNT);
1140 continue;
1141 }
1142
1143 drm_debug(b, "\t\t[atomic] disabling unused CRTC %lu\n",
1144 (unsigned long) *unused);
1145
1146 drm_debug(b, "\t\t\t[CRTC:%lu] %lu (%s) -> 0\n",
1147 (unsigned long) *unused,
1148 (unsigned long) info->prop_id, info->name);
1149 err = drmModeAtomicAddProperty(req, *unused,
1150 info->prop_id, 0);
1151 if (err <= 0)
1152 ret = -1;
1153
1154 info = &infos[WDRM_CRTC_MODE_ID];
1155 drm_debug(b, "\t\t\t[CRTC:%lu] %lu (%s) -> 0\n",
1156 (unsigned long) *unused,
1157 (unsigned long) info->prop_id, info->name);
1158 err = drmModeAtomicAddProperty(req, *unused,
1159 info->prop_id, 0);
1160 if (err <= 0)
1161 ret = -1;
1162
1163 drm_property_info_free(infos, WDRM_CRTC__COUNT);
1164 }
1165
1166 /* Disable all the planes; planes which are being used will
1167 * override this state in the output-state application. */
1168 wl_list_for_each(plane, &b->plane_list, link) {
1169 drm_debug(b, "\t\t[atomic] starting with plane %lu disabled\n",
1170 (unsigned long) plane->plane_id);
1171 plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID, 0);
1172 plane_add_prop(req, plane, WDRM_PLANE_FB_ID, 0);
1173 }
1174
1175 flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
1176 }
1177
1178 wl_list_for_each(output_state, &pending_state->output_list, link) {
1179 if (output_state->output->virtual)
1180 continue;
1181 if (mode == DRM_STATE_APPLY_SYNC)
1182 assert(output_state->dpms == WESTON_DPMS_OFF);
1183 ret |= drm_output_apply_state_atomic(output_state, req, &flags);
1184 }
1185
1186 if (ret != 0) {
1187 weston_log("atomic: couldn't compile atomic state\n");
1188 goto out;
1189 }
1190
1191 ret = drmModeAtomicCommit(b->drm.fd, req, flags, b);
1192 drm_debug(b, "[atomic] drmModeAtomicCommit\n");
1193
1194 /* Test commits do not take ownership of the state; return
1195 * without freeing here. */
1196 if (mode == DRM_STATE_TEST_ONLY) {
1197 drmModeAtomicFree(req);
1198 return ret;
1199 }
1200
1201 if (ret != 0) {
1202 weston_log("atomic: couldn't commit new state: %s\n",
1203 strerror(errno));
1204 goto out;
1205 }
1206
1207 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1208 link)
1209 drm_output_assign_state(output_state, mode);
1210
1211 b->state_invalid = false;
1212
1213 assert(wl_list_empty(&pending_state->output_list));
1214
1215out:
1216 drmModeAtomicFree(req);
1217 drm_pending_state_free(pending_state);
1218 return ret;
1219}
Daniel Stone4c2fc702019-06-18 11:12:07 +01001220
1221/**
1222 * Tests a pending state, to see if the kernel will accept the update as
1223 * constructed.
1224 *
1225 * Using atomic modesetting, the kernel performs the same checks as it would
1226 * on a real commit, returning success or failure without actually modifying
1227 * the running state. It does not return -EBUSY if there are pending updates
1228 * in flight, so states may be tested at any point, however this means a
1229 * state which passed testing may fail on a real commit if the timing is not
1230 * respected (e.g. committing before the previous commit has completed).
1231 *
1232 * Without atomic modesetting, we have no way to check, so we optimistically
1233 * claim it will work.
1234 *
1235 * Unlike drm_pending_state_apply() and drm_pending_state_apply_sync(), this
1236 * function does _not_ take ownership of pending_state, nor does it clear
1237 * state_invalid.
1238 */
1239int
1240drm_pending_state_test(struct drm_pending_state *pending_state)
1241{
Daniel Stone4c2fc702019-06-18 11:12:07 +01001242 struct drm_backend *b = pending_state->backend;
1243
1244 if (b->atomic_modeset)
1245 return drm_pending_state_apply_atomic(pending_state,
1246 DRM_STATE_TEST_ONLY);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001247
1248 /* We have no way to test state before application on the legacy
1249 * modesetting API, so just claim it succeeded. */
1250 return 0;
1251}
1252
1253/**
1254 * Applies all of a pending_state asynchronously: the primary entry point for
1255 * applying KMS state to a device. Updates the state for all outputs in the
1256 * pending_state, as well as disabling any unclaimed outputs.
1257 *
1258 * Unconditionally takes ownership of pending_state, and clears state_invalid.
1259 */
1260int
1261drm_pending_state_apply(struct drm_pending_state *pending_state)
1262{
1263 struct drm_backend *b = pending_state->backend;
1264 struct drm_output_state *output_state, *tmp;
1265 uint32_t *unused;
1266
Daniel Stone4c2fc702019-06-18 11:12:07 +01001267 if (b->atomic_modeset)
1268 return drm_pending_state_apply_atomic(pending_state,
1269 DRM_STATE_APPLY_ASYNC);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001270
1271 if (b->state_invalid) {
1272 /* If we need to reset all our state (e.g. because we've
1273 * just started, or just been VT-switched in), explicitly
1274 * disable all the CRTCs we aren't using. This also disables
1275 * all connectors on these CRTCs, so we don't need to do that
1276 * separately with the pre-atomic API. */
1277 wl_array_for_each(unused, &b->unused_crtcs)
1278 drmModeSetCrtc(b->drm.fd, *unused, 0, 0, 0, NULL, 0,
1279 NULL);
1280 }
1281
1282 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1283 link) {
1284 struct drm_output *output = output_state->output;
1285 int ret;
1286
1287 if (output->virtual) {
1288 drm_output_assign_state(output_state,
1289 DRM_STATE_APPLY_ASYNC);
1290 continue;
1291 }
1292
1293 ret = drm_output_apply_state_legacy(output_state);
1294 if (ret != 0) {
1295 weston_log("Couldn't apply state for output %s\n",
1296 output->base.name);
1297 }
1298 }
1299
1300 b->state_invalid = false;
1301
1302 assert(wl_list_empty(&pending_state->output_list));
1303
1304 drm_pending_state_free(pending_state);
1305
1306 return 0;
1307}
1308
1309/**
1310 * The synchronous version of drm_pending_state_apply. May only be used to
1311 * disable outputs. Does so synchronously: the request is guaranteed to have
1312 * completed on return, and the output will not be touched afterwards.
1313 *
1314 * Unconditionally takes ownership of pending_state, and clears state_invalid.
1315 */
1316int
1317drm_pending_state_apply_sync(struct drm_pending_state *pending_state)
1318{
1319 struct drm_backend *b = pending_state->backend;
1320 struct drm_output_state *output_state, *tmp;
1321 uint32_t *unused;
1322
Daniel Stone4c2fc702019-06-18 11:12:07 +01001323 if (b->atomic_modeset)
1324 return drm_pending_state_apply_atomic(pending_state,
1325 DRM_STATE_APPLY_SYNC);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001326
1327 if (b->state_invalid) {
1328 /* If we need to reset all our state (e.g. because we've
1329 * just started, or just been VT-switched in), explicitly
1330 * disable all the CRTCs we aren't using. This also disables
1331 * all connectors on these CRTCs, so we don't need to do that
1332 * separately with the pre-atomic API. */
1333 wl_array_for_each(unused, &b->unused_crtcs)
1334 drmModeSetCrtc(b->drm.fd, *unused, 0, 0, 0, NULL, 0,
1335 NULL);
1336 }
1337
1338 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1339 link) {
1340 int ret;
1341
1342 assert(output_state->dpms == WESTON_DPMS_OFF);
1343 ret = drm_output_apply_state_legacy(output_state);
1344 if (ret != 0) {
1345 weston_log("Couldn't apply state for output %s\n",
1346 output_state->output->base.name);
1347 }
1348 }
1349
1350 b->state_invalid = false;
1351
1352 assert(wl_list_empty(&pending_state->output_list));
1353
1354 drm_pending_state_free(pending_state);
1355
1356 return 0;
1357}
1358
1359void
1360drm_output_update_msc(struct drm_output *output, unsigned int seq)
1361{
1362 uint64_t msc_hi = output->base.msc >> 32;
1363
1364 if (seq < (output->base.msc & 0xffffffff))
1365 msc_hi++;
1366
1367 output->base.msc = (msc_hi << 32) + seq;
1368}
1369
1370static void
1371page_flip_handler(int fd, unsigned int frame,
1372 unsigned int sec, unsigned int usec, void *data)
1373{
1374 struct drm_output *output = data;
1375 struct drm_backend *b = to_drm_backend(output->base.compositor);
1376 uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC |
1377 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION |
1378 WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK;
1379
1380 drm_output_update_msc(output, frame);
1381
1382 assert(!b->atomic_modeset);
1383 assert(output->page_flip_pending);
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +01001384 output->page_flip_pending = false;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001385
1386 drm_output_update_complete(output, flags, sec, usec);
1387}
1388
Daniel Stone4c2fc702019-06-18 11:12:07 +01001389static void
1390atomic_flip_handler(int fd, unsigned int frame, unsigned int sec,
1391 unsigned int usec, unsigned int crtc_id, void *data)
1392{
1393 struct drm_backend *b = data;
1394 struct drm_output *output = drm_output_find_by_crtc(b, crtc_id);
1395 uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC |
1396 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION |
1397 WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK;
1398
1399 /* During the initial modeset, we can disable CRTCs which we don't
1400 * actually handle during normal operation; this will give us events
1401 * for unknown outputs. Ignore them. */
1402 if (!output || !output->base.enabled)
1403 return;
1404
1405 drm_output_update_msc(output, frame);
1406
1407 drm_debug(b, "[atomic][CRTC:%u] flip processing started\n", crtc_id);
1408 assert(b->atomic_modeset);
1409 assert(output->atomic_complete_pending);
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +01001410 output->atomic_complete_pending = false;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001411
1412 drm_output_update_complete(output, flags, sec, usec);
1413 drm_debug(b, "[atomic][CRTC:%u] flip processing completed\n", crtc_id);
1414}
Daniel Stone4c2fc702019-06-18 11:12:07 +01001415
1416int
1417on_drm_input(int fd, uint32_t mask, void *data)
1418{
Daniel Stone4c2fc702019-06-18 11:12:07 +01001419 struct drm_backend *b = data;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001420 drmEventContext evctx;
1421
1422 memset(&evctx, 0, sizeof evctx);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001423 evctx.version = 3;
1424 if (b->atomic_modeset)
1425 evctx.page_flip_handler2 = atomic_flip_handler;
1426 else
Daniel Stone4c2fc702019-06-18 11:12:07 +01001427 evctx.page_flip_handler = page_flip_handler;
1428 drmHandleEvent(fd, &evctx);
1429
1430 return 1;
1431}
1432
1433int
1434init_kms_caps(struct drm_backend *b)
1435{
1436 uint64_t cap;
1437 int ret;
1438 clockid_t clk_id;
1439
1440 weston_log("using %s\n", b->drm.filename);
1441
1442 ret = drmGetCap(b->drm.fd, DRM_CAP_TIMESTAMP_MONOTONIC, &cap);
1443 if (ret == 0 && cap == 1)
1444 clk_id = CLOCK_MONOTONIC;
1445 else
1446 clk_id = CLOCK_REALTIME;
1447
1448 if (weston_compositor_set_presentation_clock(b->compositor, clk_id) < 0) {
1449 weston_log("Error: failed to set presentation clock %d.\n",
1450 clk_id);
1451 return -1;
1452 }
1453
1454 ret = drmGetCap(b->drm.fd, DRM_CAP_CURSOR_WIDTH, &cap);
1455 if (ret == 0)
1456 b->cursor_width = cap;
1457 else
1458 b->cursor_width = 64;
1459
1460 ret = drmGetCap(b->drm.fd, DRM_CAP_CURSOR_HEIGHT, &cap);
1461 if (ret == 0)
1462 b->cursor_height = cap;
1463 else
1464 b->cursor_height = 64;
1465
1466 if (!getenv("WESTON_DISABLE_UNIVERSAL_PLANES")) {
1467 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
1468 b->universal_planes = (ret == 0);
1469 }
Daniel Stone4c2fc702019-06-18 11:12:07 +01001470
Daniel Stone4c2fc702019-06-18 11:12:07 +01001471 if (b->universal_planes && !getenv("WESTON_DISABLE_ATOMIC")) {
1472 ret = drmGetCap(b->drm.fd, DRM_CAP_CRTC_IN_VBLANK_EVENT, &cap);
1473 if (ret != 0)
1474 cap = 0;
1475 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_ATOMIC, 1);
1476 b->atomic_modeset = ((ret == 0) && (cap == 1));
1477 }
Daniel Stone4c2fc702019-06-18 11:12:07 +01001478 weston_log("DRM: %s atomic modesetting\n",
1479 b->atomic_modeset ? "supports" : "does not support");
1480
Stefan Agner465ab2c2020-06-17 23:36:44 +02001481 if (!getenv("WESTON_DISABLE_GBM_MODIFIERS")) {
1482 ret = drmGetCap(b->drm.fd, DRM_CAP_ADDFB2_MODIFIERS, &cap);
1483 if (ret == 0)
1484 b->fb_modifiers = cap;
1485 }
1486 weston_log("DRM: %s GBM modifiers\n",
1487 b->fb_modifiers ? "supports" : "does not support");
Daniel Stone4c2fc702019-06-18 11:12:07 +01001488
1489 /*
1490 * KMS support for hardware planes cannot properly synchronize
1491 * without nuclear page flip. Without nuclear/atomic, hw plane
1492 * and cursor plane updates would either tear or cause extra
1493 * waits for vblanks which means dropping the compositor framerate
1494 * to a fraction. For cursors, it's not so bad, so they are
1495 * enabled.
1496 */
1497 if (!b->atomic_modeset || getenv("WESTON_FORCE_RENDERER"))
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +01001498 b->sprites_are_broken = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001499
1500 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_ASPECT_RATIO, 1);
1501 b->aspect_ratio_supported = (ret == 0);
1502 weston_log("DRM: %s picture aspect ratio\n",
1503 b->aspect_ratio_supported ? "supports" : "does not support");
1504
1505 return 0;
1506}