blob: 7576c0060d365e3165125e21f5013ff7583b7cbe [file] [log] [blame]
Daniel Stone4c2fc702019-06-18 11:12:07 +01001/*
2 * Copyright © 2008-2011 Kristian Høgsberg
3 * Copyright © 2011 Intel Corporation
4 * Copyright © 2017, 2018 Collabora, Ltd.
5 * Copyright © 2017, 2018 General Electric Company
6 * Copyright (c) 2018 DisplayLink (UK) Ltd.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sublicense, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial
18 * portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 * SOFTWARE.
28 */
29
30#include "config.h"
31
32#include <stdint.h>
33
34#include <xf86drm.h>
35#include <xf86drmMode.h>
36#include <drm_fourcc.h>
37
Daniel Stone4c2fc702019-06-18 11:12:07 +010038#include <libweston/libweston.h>
39#include <libweston/backend-drm.h>
40#include "shared/helpers.h"
41#include "drm-internal.h"
42#include "pixel-formats.h"
43#include "presentation-time-server-protocol.h"
44
Tomohito Esaki29beeaf2019-06-24 17:23:44 +090045#ifndef DRM_FORMAT_MOD_LINEAR
46#define DRM_FORMAT_MOD_LINEAR 0
47#endif
48
Daniel Stone4c2fc702019-06-18 11:12:07 +010049struct drm_property_enum_info plane_type_enums[] = {
50 [WDRM_PLANE_TYPE_PRIMARY] = {
51 .name = "Primary",
52 },
53 [WDRM_PLANE_TYPE_OVERLAY] = {
54 .name = "Overlay",
55 },
56 [WDRM_PLANE_TYPE_CURSOR] = {
57 .name = "Cursor",
58 },
59};
60
61const struct drm_property_info plane_props[] = {
62 [WDRM_PLANE_TYPE] = {
63 .name = "type",
64 .enum_values = plane_type_enums,
65 .num_enum_values = WDRM_PLANE_TYPE__COUNT,
66 },
67 [WDRM_PLANE_SRC_X] = { .name = "SRC_X", },
68 [WDRM_PLANE_SRC_Y] = { .name = "SRC_Y", },
69 [WDRM_PLANE_SRC_W] = { .name = "SRC_W", },
70 [WDRM_PLANE_SRC_H] = { .name = "SRC_H", },
71 [WDRM_PLANE_CRTC_X] = { .name = "CRTC_X", },
72 [WDRM_PLANE_CRTC_Y] = { .name = "CRTC_Y", },
73 [WDRM_PLANE_CRTC_W] = { .name = "CRTC_W", },
74 [WDRM_PLANE_CRTC_H] = { .name = "CRTC_H", },
75 [WDRM_PLANE_FB_ID] = { .name = "FB_ID", },
76 [WDRM_PLANE_CRTC_ID] = { .name = "CRTC_ID", },
77 [WDRM_PLANE_IN_FORMATS] = { .name = "IN_FORMATS" },
78 [WDRM_PLANE_IN_FENCE_FD] = { .name = "IN_FENCE_FD" },
79 [WDRM_PLANE_FB_DAMAGE_CLIPS] = { .name = "FB_DAMAGE_CLIPS" },
Marius Vladcdd6fa22019-08-29 20:42:00 +030080 [WDRM_PLANE_ZPOS] = { .name = "zpos" },
Daniel Stone4c2fc702019-06-18 11:12:07 +010081};
82
83struct drm_property_enum_info dpms_state_enums[] = {
84 [WDRM_DPMS_STATE_OFF] = {
85 .name = "Off",
86 },
87 [WDRM_DPMS_STATE_ON] = {
88 .name = "On",
89 },
90 [WDRM_DPMS_STATE_STANDBY] = {
91 .name = "Standby",
92 },
93 [WDRM_DPMS_STATE_SUSPEND] = {
94 .name = "Suspend",
95 },
96};
97
Ankit Nautiyala344fe32019-05-14 18:36:08 +053098struct drm_property_enum_info content_protection_enums[] = {
99 [WDRM_CONTENT_PROTECTION_UNDESIRED] = {
100 .name = "Undesired",
101 },
102 [WDRM_CONTENT_PROTECTION_DESIRED] = {
103 .name = "Desired",
104 },
105 [WDRM_CONTENT_PROTECTION_ENABLED] = {
106 .name = "Enabled",
107 },
108};
109
110struct drm_property_enum_info hdcp_content_type_enums[] = {
111 [WDRM_HDCP_CONTENT_TYPE0] = {
112 .name = "HDCP Type0",
113 },
114 [WDRM_HDCP_CONTENT_TYPE1] = {
115 .name = "HDCP Type1",
116 },
117};
118
Lucas Stach72e7a1e2019-11-25 23:31:57 +0000119struct drm_property_enum_info panel_orientation_enums[] = {
120 [WDRM_PANEL_ORIENTATION_NORMAL] = { .name = "Normal", },
121 [WDRM_PANEL_ORIENTATION_UPSIDE_DOWN] = { .name = "Upside Down", },
122 [WDRM_PANEL_ORIENTATION_LEFT_SIDE_UP] = { .name = "Left Side Up", },
123 [WDRM_PANEL_ORIENTATION_RIGHT_SIDE_UP] = { .name = "Right Side Up", },
124};
125
Daniel Stone4c2fc702019-06-18 11:12:07 +0100126const struct drm_property_info connector_props[] = {
127 [WDRM_CONNECTOR_EDID] = { .name = "EDID" },
128 [WDRM_CONNECTOR_DPMS] = {
129 .name = "DPMS",
130 .enum_values = dpms_state_enums,
131 .num_enum_values = WDRM_DPMS_STATE__COUNT,
132 },
133 [WDRM_CONNECTOR_CRTC_ID] = { .name = "CRTC_ID", },
134 [WDRM_CONNECTOR_NON_DESKTOP] = { .name = "non-desktop", },
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530135 [WDRM_CONNECTOR_CONTENT_PROTECTION] = {
136 .name = "Content Protection",
137 .enum_values = content_protection_enums,
138 .num_enum_values = WDRM_CONTENT_PROTECTION__COUNT,
139 },
140 [WDRM_CONNECTOR_HDCP_CONTENT_TYPE] = {
141 .name = "HDCP Content Type",
142 .enum_values = hdcp_content_type_enums,
143 .num_enum_values = WDRM_HDCP_CONTENT_TYPE__COUNT,
144 },
Lucas Stach72e7a1e2019-11-25 23:31:57 +0000145 [WDRM_CONNECTOR_PANEL_ORIENTATION] = {
146 .name = "panel orientation",
147 .enum_values = panel_orientation_enums,
148 .num_enum_values = WDRM_PANEL_ORIENTATION__COUNT,
149 },
Daniel Stone4c2fc702019-06-18 11:12:07 +0100150};
151
152const struct drm_property_info crtc_props[] = {
153 [WDRM_CRTC_MODE_ID] = { .name = "MODE_ID", },
154 [WDRM_CRTC_ACTIVE] = { .name = "ACTIVE", },
155};
156
157
158/**
159 * Mode for drm_pending_state_apply and co.
160 */
161enum drm_state_apply_mode {
162 DRM_STATE_APPLY_SYNC, /**< state fully processed */
163 DRM_STATE_APPLY_ASYNC, /**< state pending event delivery */
164 DRM_STATE_TEST_ONLY, /**< test if the state can be applied */
165};
166
167/**
168 * Get the current value of a KMS property
169 *
170 * Given a drmModeObjectGetProperties return, as well as the drm_property_info
171 * for the target property, return the current value of that property,
172 * with an optional default. If the property is a KMS enum type, the return
173 * value will be translated into the appropriate internal enum.
174 *
175 * If the property is not present, the default value will be returned.
176 *
177 * @param info Internal structure for property to look up
178 * @param props Raw KMS properties for the target object
179 * @param def Value to return if property is not found
180 */
181uint64_t
182drm_property_get_value(struct drm_property_info *info,
183 const drmModeObjectProperties *props,
184 uint64_t def)
185{
186 unsigned int i;
187
188 if (info->prop_id == 0)
189 return def;
190
191 for (i = 0; i < props->count_props; i++) {
192 unsigned int j;
193
194 if (props->props[i] != info->prop_id)
195 continue;
196
197 /* Simple (non-enum) types can return the value directly */
198 if (info->num_enum_values == 0)
199 return props->prop_values[i];
200
201 /* Map from raw value to enum value */
202 for (j = 0; j < info->num_enum_values; j++) {
203 if (!info->enum_values[j].valid)
204 continue;
205 if (info->enum_values[j].value != props->prop_values[i])
206 continue;
207
208 return j;
209 }
210
211 /* We don't have a mapping for this enum; return default. */
212 break;
213 }
214
215 return def;
216}
217
218/**
Marius Vlad1accffe2019-11-01 12:00:09 +0200219 * Get the current range values of a KMS property
220 *
221 * Given a drmModeObjectGetProperties return, as well as the drm_property_info
222 * for the target property, return the current range values of that property,
223 *
224 * If the property is not present, or there's no it is not a prop range then
225 * NULL will be returned.
226 *
227 * @param info Internal structure for property to look up
228 * @param props Raw KMS properties for the target object
229 */
230uint64_t *
231drm_property_get_range_values(struct drm_property_info *info,
232 const drmModeObjectProperties *props)
233{
234 unsigned int i;
235
236 if (info->prop_id == 0)
237 return NULL;
238
239 for (i = 0; i < props->count_props; i++) {
240
241 if (props->props[i] != info->prop_id)
242 continue;
243
244 if (!(info->flags & DRM_MODE_PROP_RANGE) &&
245 !(info->flags & DRM_MODE_PROP_SIGNED_RANGE))
246 continue;
247
248 return info->range_values;
249 }
250
251 return NULL;
252}
253
254/**
Daniel Stone4c2fc702019-06-18 11:12:07 +0100255 * Cache DRM property values
256 *
257 * Update a per-object array of drm_property_info structures, given the
258 * DRM properties of the object.
259 *
260 * Call this every time an object newly appears (note that only connectors
261 * can be hotplugged), the first time it is seen, or when its status changes
262 * in a way which invalidates the potential property values (currently, the
263 * only case for this is connector hotplug).
264 *
265 * This updates the property IDs and enum values within the drm_property_info
266 * array.
267 *
268 * DRM property enum values are dynamic at runtime; the user must query the
269 * property to find out the desired runtime value for a requested string
270 * name. Using the 'type' field on planes as an example, there is no single
271 * hardcoded constant for primary plane types; instead, the property must be
272 * queried at runtime to find the value associated with the string "Primary".
273 *
274 * This helper queries and caches the enum values, to allow us to use a set
275 * of compile-time-constant enums portably across various implementations.
276 * The values given in enum_names are searched for, and stored in the
277 * same-indexed field of the map array.
278 *
279 * @param b DRM backend object
280 * @param src DRM property info array to source from
281 * @param info DRM property info array to copy into
282 * @param num_infos Number of entries in the source array
283 * @param props DRM object properties for the object
284 */
285void
286drm_property_info_populate(struct drm_backend *b,
287 const struct drm_property_info *src,
288 struct drm_property_info *info,
289 unsigned int num_infos,
290 drmModeObjectProperties *props)
291{
292 drmModePropertyRes *prop;
293 unsigned i, j;
294
295 for (i = 0; i < num_infos; i++) {
296 unsigned int j;
297
298 info[i].name = src[i].name;
299 info[i].prop_id = 0;
300 info[i].num_enum_values = src[i].num_enum_values;
301
302 if (src[i].num_enum_values == 0)
303 continue;
304
305 info[i].enum_values =
306 malloc(src[i].num_enum_values *
307 sizeof(*info[i].enum_values));
308 assert(info[i].enum_values);
309 for (j = 0; j < info[i].num_enum_values; j++) {
310 info[i].enum_values[j].name = src[i].enum_values[j].name;
311 info[i].enum_values[j].valid = false;
312 }
313 }
314
315 for (i = 0; i < props->count_props; i++) {
316 unsigned int k;
317
318 prop = drmModeGetProperty(b->drm.fd, props->props[i]);
319 if (!prop)
320 continue;
321
322 for (j = 0; j < num_infos; j++) {
323 if (!strcmp(prop->name, info[j].name))
324 break;
325 }
326
327 /* We don't know/care about this property. */
328 if (j == num_infos) {
329#ifdef DEBUG
330 weston_log("DRM debug: unrecognized property %u '%s'\n",
331 prop->prop_id, prop->name);
332#endif
333 drmModeFreeProperty(prop);
334 continue;
335 }
336
337 if (info[j].num_enum_values == 0 &&
338 (prop->flags & DRM_MODE_PROP_ENUM)) {
339 weston_log("DRM: expected property %s to not be an"
340 " enum, but it is; ignoring\n", prop->name);
341 drmModeFreeProperty(prop);
342 continue;
343 }
344
345 info[j].prop_id = props->props[i];
Marius Vlad1accffe2019-11-01 12:00:09 +0200346 info[j].flags = prop->flags;
347
348 if (prop->flags & DRM_MODE_PROP_RANGE ||
349 prop->flags & DRM_MODE_PROP_SIGNED_RANGE) {
350 info[j].num_range_values = prop->count_values;
351 for (int i = 0; i < prop->count_values; i++)
352 info[j].range_values[i] = prop->values[i];
353 }
354
Daniel Stone4c2fc702019-06-18 11:12:07 +0100355
356 if (info[j].num_enum_values == 0) {
357 drmModeFreeProperty(prop);
358 continue;
359 }
360
361 if (!(prop->flags & DRM_MODE_PROP_ENUM)) {
362 weston_log("DRM: expected property %s to be an enum,"
363 " but it is not; ignoring\n", prop->name);
364 drmModeFreeProperty(prop);
365 info[j].prop_id = 0;
366 continue;
367 }
368
369 for (k = 0; k < info[j].num_enum_values; k++) {
370 int l;
371
372 for (l = 0; l < prop->count_enums; l++) {
373 if (!strcmp(prop->enums[l].name,
374 info[j].enum_values[k].name))
375 break;
376 }
377
378 if (l == prop->count_enums)
379 continue;
380
381 info[j].enum_values[k].valid = true;
382 info[j].enum_values[k].value = prop->enums[l].value;
383 }
384
385 drmModeFreeProperty(prop);
386 }
387
388#ifdef DEBUG
389 for (i = 0; i < num_infos; i++) {
390 if (info[i].prop_id == 0)
391 weston_log("DRM warning: property '%s' missing\n",
392 info[i].name);
393 }
394#endif
395}
396
397/**
398 * Free DRM property information
399 *
400 * Frees all memory associated with a DRM property info array and zeroes
401 * it out, leaving it usable for a further drm_property_info_update() or
402 * drm_property_info_free().
403 *
404 * @param info DRM property info array
405 * @param num_props Number of entries in array to free
406 */
407void
408drm_property_info_free(struct drm_property_info *info, int num_props)
409{
410 int i;
411
412 for (i = 0; i < num_props; i++)
413 free(info[i].enum_values);
414
415 memset(info, 0, sizeof(*info) * num_props);
416}
417
Daniel Stone4c2fc702019-06-18 11:12:07 +0100418static inline uint32_t *
419formats_ptr(struct drm_format_modifier_blob *blob)
420{
421 return (uint32_t *)(((char *)blob) + blob->formats_offset);
422}
423
424static inline struct drm_format_modifier *
425modifiers_ptr(struct drm_format_modifier_blob *blob)
426{
427 return (struct drm_format_modifier *)
428 (((char *)blob) + blob->modifiers_offset);
429}
Daniel Stone4c2fc702019-06-18 11:12:07 +0100430
431/**
432 * Populates the plane's formats array, using either the IN_FORMATS blob
433 * property (if available), or the plane's format list if not.
434 */
435int
436drm_plane_populate_formats(struct drm_plane *plane, const drmModePlane *kplane,
437 const drmModeObjectProperties *props)
438{
439 unsigned i;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100440 drmModePropertyBlobRes *blob;
441 struct drm_format_modifier_blob *fmt_mod_blob;
442 struct drm_format_modifier *blob_modifiers;
443 uint32_t *blob_formats;
444 uint32_t blob_id;
445
446 blob_id = drm_property_get_value(&plane->props[WDRM_PLANE_IN_FORMATS],
447 props,
448 0);
449 if (blob_id == 0)
450 goto fallback;
451
452 blob = drmModeGetPropertyBlob(plane->backend->drm.fd, blob_id);
453 if (!blob)
454 goto fallback;
455
456 fmt_mod_blob = blob->data;
457 blob_formats = formats_ptr(fmt_mod_blob);
458 blob_modifiers = modifiers_ptr(fmt_mod_blob);
459
460 if (plane->count_formats != fmt_mod_blob->count_formats) {
461 weston_log("DRM backend: format count differs between "
462 "plane (%d) and IN_FORMATS (%d)\n",
463 plane->count_formats,
464 fmt_mod_blob->count_formats);
465 weston_log("This represents a kernel bug; Weston is "
466 "unable to continue.\n");
467 abort();
468 }
469
470 for (i = 0; i < fmt_mod_blob->count_formats; i++) {
471 uint32_t count_modifiers = 0;
472 uint64_t *modifiers = NULL;
473 unsigned j;
474
475 for (j = 0; j < fmt_mod_blob->count_modifiers; j++) {
476 struct drm_format_modifier *mod = &blob_modifiers[j];
477
478 if ((i < mod->offset) || (i > mod->offset + 63))
479 continue;
480 if (!(mod->formats & (1 << (i - mod->offset))))
481 continue;
482
483 modifiers = realloc(modifiers,
484 (count_modifiers + 1) *
485 sizeof(modifiers[0]));
486 assert(modifiers);
487 modifiers[count_modifiers++] = mod->modifier;
488 }
489
Tomohito Esaki29beeaf2019-06-24 17:23:44 +0900490 if (count_modifiers == 0) {
491 modifiers = malloc(sizeof(*modifiers));
492 *modifiers = DRM_FORMAT_MOD_LINEAR;
493 count_modifiers = 1;
494 }
495
Daniel Stone4c2fc702019-06-18 11:12:07 +0100496 plane->formats[i].format = blob_formats[i];
497 plane->formats[i].modifiers = modifiers;
498 plane->formats[i].count_modifiers = count_modifiers;
499 }
500
501 drmModeFreePropertyBlob(blob);
502
503 return 0;
504
505fallback:
Daniel Stone4c2fc702019-06-18 11:12:07 +0100506 /* No IN_FORMATS blob available, so just use the old. */
507 assert(plane->count_formats == kplane->count_formats);
Tomohito Esaki29beeaf2019-06-24 17:23:44 +0900508 for (i = 0; i < kplane->count_formats; i++) {
Daniel Stone4c2fc702019-06-18 11:12:07 +0100509 plane->formats[i].format = kplane->formats[i];
Tomohito Esaki29beeaf2019-06-24 17:23:44 +0900510 plane->formats[i].modifiers = malloc(sizeof(uint64_t));
511 plane->formats[i].modifiers[0] = DRM_FORMAT_MOD_LINEAR;
512 plane->formats[i].count_modifiers = 1;
513 }
Daniel Stone4c2fc702019-06-18 11:12:07 +0100514
515 return 0;
516}
517
518void
519drm_output_set_gamma(struct weston_output *output_base,
520 uint16_t size, uint16_t *r, uint16_t *g, uint16_t *b)
521{
522 int rc;
523 struct drm_output *output = to_drm_output(output_base);
524 struct drm_backend *backend =
525 to_drm_backend(output->base.compositor);
526
527 /* check */
528 if (output_base->gamma_size != size)
529 return;
530
531 rc = drmModeCrtcSetGamma(backend->drm.fd,
532 output->crtc_id,
533 size, r, g, b);
534 if (rc)
535 weston_log("set gamma failed: %s\n", strerror(errno));
536}
537
538/**
539 * Mark an output state as current on the output, i.e. it has been
540 * submitted to the kernel. The mode argument determines whether this
541 * update will be applied synchronously (e.g. when calling drmModeSetCrtc),
542 * or asynchronously (in which case we wait for events to complete).
543 */
544static void
545drm_output_assign_state(struct drm_output_state *state,
546 enum drm_state_apply_mode mode)
547{
548 struct drm_output *output = state->output;
549 struct drm_backend *b = to_drm_backend(output->base.compositor);
550 struct drm_plane_state *plane_state;
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530551 struct drm_head *head;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100552
553 assert(!output->state_last);
554
555 if (mode == DRM_STATE_APPLY_ASYNC)
556 output->state_last = output->state_cur;
557 else
558 drm_output_state_free(output->state_cur);
559
560 wl_list_remove(&state->link);
561 wl_list_init(&state->link);
562 state->pending_state = NULL;
563
564 output->state_cur = state;
565
566 if (b->atomic_modeset && mode == DRM_STATE_APPLY_ASYNC) {
567 drm_debug(b, "\t[CRTC:%u] setting pending flip\n", output->crtc_id);
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +0100568 output->atomic_complete_pending = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100569 }
570
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530571 if (b->atomic_modeset &&
572 state->protection == WESTON_HDCP_DISABLE)
573 wl_list_for_each(head, &output->base.head_list, base.output_link)
574 weston_head_set_content_protection_status(&head->base,
575 WESTON_HDCP_DISABLE);
576
Daniel Stone4c2fc702019-06-18 11:12:07 +0100577 /* Replace state_cur on each affected plane with the new state, being
578 * careful to dispose of orphaned (but only orphaned) previous state.
579 * If the previous state is not orphaned (still has an output_state
580 * attached), it will be disposed of by freeing the output_state. */
581 wl_list_for_each(plane_state, &state->plane_list, link) {
582 struct drm_plane *plane = plane_state->plane;
583
584 if (plane->state_cur && !plane->state_cur->output_state)
585 drm_plane_state_free(plane->state_cur, true);
586 plane->state_cur = plane_state;
587
588 if (mode != DRM_STATE_APPLY_ASYNC) {
589 plane_state->complete = true;
590 continue;
591 }
592
593 if (b->atomic_modeset)
594 continue;
595
596 assert(plane->type != WDRM_PLANE_TYPE_OVERLAY);
597 if (plane->type == WDRM_PLANE_TYPE_PRIMARY)
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +0100598 output->page_flip_pending = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100599 }
600}
601
602static void
603drm_output_set_cursor(struct drm_output_state *output_state)
604{
605 struct drm_output *output = output_state->output;
606 struct drm_backend *b = to_drm_backend(output->base.compositor);
607 struct drm_plane *plane = output->cursor_plane;
608 struct drm_plane_state *state;
Stefan Agner974390a2019-07-08 00:42:05 +0200609 uint32_t handle;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100610
611 if (!plane)
612 return;
613
614 state = drm_output_state_get_existing_plane(output_state, plane);
615 if (!state)
616 return;
617
618 if (!state->fb) {
619 pixman_region32_fini(&plane->base.damage);
620 pixman_region32_init(&plane->base.damage);
621 drmModeSetCursor(b->drm.fd, output->crtc_id, 0, 0, 0);
622 return;
623 }
624
625 assert(state->fb == output->gbm_cursor_fb[output->current_cursor]);
626 assert(!plane->state_cur->output || plane->state_cur->output == output);
627
Stefan Agner974390a2019-07-08 00:42:05 +0200628 handle = output->gbm_cursor_handle[output->current_cursor];
Daniel Stone4c2fc702019-06-18 11:12:07 +0100629 if (plane->state_cur->fb != state->fb) {
Daniel Stone4c2fc702019-06-18 11:12:07 +0100630 if (drmModeSetCursor(b->drm.fd, output->crtc_id, handle,
631 b->cursor_width, b->cursor_height)) {
632 weston_log("failed to set cursor: %s\n",
633 strerror(errno));
634 goto err;
635 }
636 }
637
638 pixman_region32_fini(&plane->base.damage);
639 pixman_region32_init(&plane->base.damage);
640
641 if (drmModeMoveCursor(b->drm.fd, output->crtc_id,
642 state->dest_x, state->dest_y)) {
643 weston_log("failed to move cursor: %s\n", strerror(errno));
644 goto err;
645 }
646
647 return;
648
649err:
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +0100650 b->cursors_are_broken = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +0100651 drmModeSetCursor(b->drm.fd, output->crtc_id, 0, 0, 0);
652}
653
654static int
655drm_output_apply_state_legacy(struct drm_output_state *state)
656{
657 struct drm_output *output = state->output;
658 struct drm_backend *backend = to_drm_backend(output->base.compositor);
659 struct drm_plane *scanout_plane = output->scanout_plane;
660 struct drm_property_info *dpms_prop;
661 struct drm_plane_state *scanout_state;
662 struct drm_mode *mode;
663 struct drm_head *head;
664 const struct pixel_format_info *pinfo = NULL;
665 uint32_t connectors[MAX_CLONED_CONNECTORS];
666 int n_conn = 0;
667 struct timespec now;
668 int ret = 0;
669
670 wl_list_for_each(head, &output->base.head_list, base.output_link) {
671 assert(n_conn < MAX_CLONED_CONNECTORS);
672 connectors[n_conn++] = head->connector_id;
673 }
674
675 /* If disable_planes is set then assign_planes() wasn't
676 * called for this render, so we could still have a stale
677 * cursor plane set up.
678 */
679 if (output->base.disable_planes) {
680 output->cursor_view = NULL;
681 if (output->cursor_plane) {
682 output->cursor_plane->base.x = INT32_MIN;
683 output->cursor_plane->base.y = INT32_MIN;
684 }
685 }
686
687 if (state->dpms != WESTON_DPMS_ON) {
688 if (output->cursor_plane) {
689 ret = drmModeSetCursor(backend->drm.fd, output->crtc_id,
690 0, 0, 0);
691 if (ret)
692 weston_log("drmModeSetCursor failed disable: %s\n",
693 strerror(errno));
694 }
695
696 ret = drmModeSetCrtc(backend->drm.fd, output->crtc_id, 0, 0, 0,
697 NULL, 0, NULL);
698 if (ret)
699 weston_log("drmModeSetCrtc failed disabling: %s\n",
700 strerror(errno));
701
702 drm_output_assign_state(state, DRM_STATE_APPLY_SYNC);
703 weston_compositor_read_presentation_clock(output->base.compositor, &now);
704 drm_output_update_complete(output,
705 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION,
706 now.tv_sec, now.tv_nsec / 1000);
707
708 return 0;
709 }
710
711 scanout_state =
712 drm_output_state_get_existing_plane(state, scanout_plane);
713
714 /* The legacy SetCrtc API doesn't allow us to do scaling, and the
715 * legacy PageFlip API doesn't allow us to do clipping either. */
716 assert(scanout_state->src_x == 0);
717 assert(scanout_state->src_y == 0);
718 assert(scanout_state->src_w ==
719 (unsigned) (output->base.current_mode->width << 16));
720 assert(scanout_state->src_h ==
721 (unsigned) (output->base.current_mode->height << 16));
722 assert(scanout_state->dest_x == 0);
723 assert(scanout_state->dest_y == 0);
724 assert(scanout_state->dest_w == scanout_state->src_w >> 16);
725 assert(scanout_state->dest_h == scanout_state->src_h >> 16);
726 /* The legacy SetCrtc API doesn't support fences */
727 assert(scanout_state->in_fence_fd == -1);
728
729 mode = to_drm_mode(output->base.current_mode);
730 if (backend->state_invalid ||
731 !scanout_plane->state_cur->fb ||
732 scanout_plane->state_cur->fb->strides[0] !=
733 scanout_state->fb->strides[0]) {
734
735 ret = drmModeSetCrtc(backend->drm.fd, output->crtc_id,
736 scanout_state->fb->fb_id,
737 0, 0,
738 connectors, n_conn,
739 &mode->mode_info);
740 if (ret) {
741 weston_log("set mode failed: %s\n", strerror(errno));
742 goto err;
743 }
744 }
745
746 pinfo = scanout_state->fb->format;
747 drm_debug(backend, "\t[CRTC:%u, PLANE:%u] FORMAT: %s\n",
748 output->crtc_id, scanout_state->plane->plane_id,
749 pinfo ? pinfo->drm_format_name : "UNKNOWN");
750
751 if (drmModePageFlip(backend->drm.fd, output->crtc_id,
752 scanout_state->fb->fb_id,
753 DRM_MODE_PAGE_FLIP_EVENT, output) < 0) {
754 weston_log("queueing pageflip failed: %s\n", strerror(errno));
755 goto err;
756 }
757
758 assert(!output->page_flip_pending);
759
760 if (output->pageflip_timer)
761 wl_event_source_timer_update(output->pageflip_timer,
762 backend->pageflip_timeout);
763
764 drm_output_set_cursor(state);
765
766 if (state->dpms != output->state_cur->dpms) {
767 wl_list_for_each(head, &output->base.head_list, base.output_link) {
768 dpms_prop = &head->props_conn[WDRM_CONNECTOR_DPMS];
769 if (dpms_prop->prop_id == 0)
770 continue;
771
772 ret = drmModeConnectorSetProperty(backend->drm.fd,
773 head->connector_id,
774 dpms_prop->prop_id,
775 state->dpms);
776 if (ret) {
777 weston_log("DRM: DPMS: failed property set for %s\n",
778 head->base.name);
779 }
780 }
781 }
782
783 drm_output_assign_state(state, DRM_STATE_APPLY_ASYNC);
784
785 return 0;
786
787err:
788 output->cursor_view = NULL;
789 drm_output_state_free(state);
790 return -1;
791}
792
Daniel Stone4c2fc702019-06-18 11:12:07 +0100793static int
794crtc_add_prop(drmModeAtomicReq *req, struct drm_output *output,
795 enum wdrm_crtc_property prop, uint64_t val)
796{
797 struct drm_property_info *info = &output->props_crtc[prop];
798 int ret;
799
800 if (info->prop_id == 0)
801 return -1;
802
803 ret = drmModeAtomicAddProperty(req, output->crtc_id, info->prop_id,
804 val);
805 drm_debug(output->backend, "\t\t\t[CRTC:%lu] %lu (%s) -> %llu (0x%llx)\n",
806 (unsigned long) output->crtc_id,
807 (unsigned long) info->prop_id, info->name,
808 (unsigned long long) val, (unsigned long long) val);
809 return (ret <= 0) ? -1 : 0;
810}
811
812static int
813connector_add_prop(drmModeAtomicReq *req, struct drm_head *head,
814 enum wdrm_connector_property prop, uint64_t val)
815{
816 struct drm_property_info *info = &head->props_conn[prop];
817 int ret;
818
819 if (info->prop_id == 0)
820 return -1;
821
822 ret = drmModeAtomicAddProperty(req, head->connector_id,
823 info->prop_id, val);
824 drm_debug(head->backend, "\t\t\t[CONN:%lu] %lu (%s) -> %llu (0x%llx)\n",
825 (unsigned long) head->connector_id,
826 (unsigned long) info->prop_id, info->name,
827 (unsigned long long) val, (unsigned long long) val);
828 return (ret <= 0) ? -1 : 0;
829}
830
831static int
832plane_add_prop(drmModeAtomicReq *req, struct drm_plane *plane,
833 enum wdrm_plane_property prop, uint64_t val)
834{
835 struct drm_property_info *info = &plane->props[prop];
836 int ret;
837
838 if (info->prop_id == 0)
839 return -1;
840
841 ret = drmModeAtomicAddProperty(req, plane->plane_id, info->prop_id,
842 val);
843 drm_debug(plane->backend, "\t\t\t[PLANE:%lu] %lu (%s) -> %llu (0x%llx)\n",
844 (unsigned long) plane->plane_id,
845 (unsigned long) info->prop_id, info->name,
846 (unsigned long long) val, (unsigned long long) val);
847 return (ret <= 0) ? -1 : 0;
848}
849
850
851static int
852plane_add_damage(drmModeAtomicReq *req, struct drm_backend *backend,
853 struct drm_plane_state *plane_state)
854{
855 struct drm_plane *plane = plane_state->plane;
856 struct drm_property_info *info =
857 &plane->props[WDRM_PLANE_FB_DAMAGE_CLIPS];
858 pixman_box32_t *rects;
859 uint32_t blob_id;
860 int n_rects;
861 int ret;
862
863 if (!pixman_region32_not_empty(&plane_state->damage))
864 return 0;
865
866 /*
867 * If a plane doesn't support fb damage blob property, kernel will
868 * perform full plane update.
869 */
870 if (info->prop_id == 0)
871 return 0;
872
873 rects = pixman_region32_rectangles(&plane_state->damage, &n_rects);
874
875 ret = drmModeCreatePropertyBlob(backend->drm.fd, rects,
876 sizeof(*rects) * n_rects, &blob_id);
877 if (ret != 0)
878 return ret;
879
880 ret = plane_add_prop(req, plane, WDRM_PLANE_FB_DAMAGE_CLIPS, blob_id);
881 if (ret != 0)
882 return ret;
883
884 return 0;
885}
886
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530887static bool
888drm_head_has_prop(struct drm_head *head,
889 enum wdrm_connector_property prop)
890{
891 if (head && head->props_conn[prop].prop_id != 0)
892 return true;
893
894 return false;
895}
896
897/*
898 * This function converts the protection requests from weston_hdcp_protection
899 * corresponding drm values. These values can be set in "Content Protection"
900 * & "HDCP Content Type" connector properties.
901 */
902static void
903get_drm_protection_from_weston(enum weston_hdcp_protection weston_protection,
904 enum wdrm_content_protection_state *drm_protection,
905 enum wdrm_hdcp_content_type *drm_cp_type)
906{
907
908 switch (weston_protection) {
909 case WESTON_HDCP_DISABLE:
910 *drm_protection = WDRM_CONTENT_PROTECTION_UNDESIRED;
911 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE0;
912 break;
913 case WESTON_HDCP_ENABLE_TYPE_0:
914 *drm_protection = WDRM_CONTENT_PROTECTION_DESIRED;
915 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE0;
916 break;
917 case WESTON_HDCP_ENABLE_TYPE_1:
918 *drm_protection = WDRM_CONTENT_PROTECTION_DESIRED;
919 *drm_cp_type = WDRM_HDCP_CONTENT_TYPE1;
920 break;
921 default:
922 assert(0 && "bad weston_hdcp_protection");
923 }
924}
925
926static void
927drm_head_set_hdcp_property(struct drm_head *head,
928 enum weston_hdcp_protection protection,
929 drmModeAtomicReq *req)
930{
931 int ret;
932 enum wdrm_content_protection_state drm_protection;
933 enum wdrm_hdcp_content_type drm_cp_type;
934 struct drm_property_enum_info *enum_info;
935 uint64_t prop_val;
936
937 get_drm_protection_from_weston(protection, &drm_protection,
938 &drm_cp_type);
939
940 if (!drm_head_has_prop(head, WDRM_CONNECTOR_CONTENT_PROTECTION))
941 return;
942
943 /*
944 * Content-type property is not exposed for platforms not supporting
945 * HDCP2.2, therefore, type-1 cannot be supported. The type-0 content
946 * still can be supported if the content-protection property is exposed.
947 */
948 if (!drm_head_has_prop(head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE) &&
949 drm_cp_type != WDRM_HDCP_CONTENT_TYPE0)
950 return;
951
952 enum_info = head->props_conn[WDRM_CONNECTOR_CONTENT_PROTECTION].enum_values;
953 prop_val = enum_info[drm_protection].value;
954 ret = connector_add_prop(req, head, WDRM_CONNECTOR_CONTENT_PROTECTION,
955 prop_val);
956 assert(ret == 0);
957
Ankit Nautiyalfc2c1802019-08-30 19:40:46 +0530958 if (!drm_head_has_prop(head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE))
959 return;
960
Ankit Nautiyala344fe32019-05-14 18:36:08 +0530961 enum_info = head->props_conn[WDRM_CONNECTOR_HDCP_CONTENT_TYPE].enum_values;
962 prop_val = enum_info[drm_cp_type].value;
963 ret = connector_add_prop(req, head, WDRM_CONNECTOR_HDCP_CONTENT_TYPE,
964 prop_val);
965 assert(ret == 0);
966}
967
Daniel Stone4c2fc702019-06-18 11:12:07 +0100968static int
969drm_output_apply_state_atomic(struct drm_output_state *state,
970 drmModeAtomicReq *req,
971 uint32_t *flags)
972{
973 struct drm_output *output = state->output;
974 struct drm_backend *b = to_drm_backend(output->base.compositor);
975 struct drm_plane_state *plane_state;
976 struct drm_mode *current_mode = to_drm_mode(output->base.current_mode);
977 struct drm_head *head;
978 int ret = 0;
979
980 drm_debug(b, "\t\t[atomic] %s output %lu (%s) state\n",
981 (*flags & DRM_MODE_ATOMIC_TEST_ONLY) ? "testing" : "applying",
982 (unsigned long) output->base.id, output->base.name);
983
984 if (state->dpms != output->state_cur->dpms) {
985 drm_debug(b, "\t\t\t[atomic] DPMS state differs, modeset OK\n");
986 *flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
987 }
988
989 if (state->dpms == WESTON_DPMS_ON) {
990 ret = drm_mode_ensure_blob(b, current_mode);
991 if (ret != 0)
992 return ret;
993
994 ret |= crtc_add_prop(req, output, WDRM_CRTC_MODE_ID,
995 current_mode->blob_id);
996 ret |= crtc_add_prop(req, output, WDRM_CRTC_ACTIVE, 1);
997
998 /* No need for the DPMS property, since it is implicit in
999 * routing and CRTC activity. */
1000 wl_list_for_each(head, &output->base.head_list, base.output_link) {
1001 ret |= connector_add_prop(req, head, WDRM_CONNECTOR_CRTC_ID,
1002 output->crtc_id);
1003 }
1004 } else {
1005 ret |= crtc_add_prop(req, output, WDRM_CRTC_MODE_ID, 0);
1006 ret |= crtc_add_prop(req, output, WDRM_CRTC_ACTIVE, 0);
1007
1008 /* No need for the DPMS property, since it is implicit in
1009 * routing and CRTC activity. */
1010 wl_list_for_each(head, &output->base.head_list, base.output_link)
1011 ret |= connector_add_prop(req, head, WDRM_CONNECTOR_CRTC_ID, 0);
1012 }
1013
Ankit Nautiyala344fe32019-05-14 18:36:08 +05301014 wl_list_for_each(head, &output->base.head_list, base.output_link)
1015 drm_head_set_hdcp_property(head, state->protection, req);
1016
Daniel Stone4c2fc702019-06-18 11:12:07 +01001017 if (ret != 0) {
1018 weston_log("couldn't set atomic CRTC/connector state\n");
1019 return ret;
1020 }
1021
1022 wl_list_for_each(plane_state, &state->plane_list, link) {
1023 struct drm_plane *plane = plane_state->plane;
1024 const struct pixel_format_info *pinfo = NULL;
1025
1026 ret |= plane_add_prop(req, plane, WDRM_PLANE_FB_ID,
1027 plane_state->fb ? plane_state->fb->fb_id : 0);
1028 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID,
1029 plane_state->fb ? output->crtc_id : 0);
1030 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_X,
1031 plane_state->src_x);
1032 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_Y,
1033 plane_state->src_y);
1034 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_W,
1035 plane_state->src_w);
1036 ret |= plane_add_prop(req, plane, WDRM_PLANE_SRC_H,
1037 plane_state->src_h);
1038 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_X,
1039 plane_state->dest_x);
1040 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_Y,
1041 plane_state->dest_y);
1042 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_W,
1043 plane_state->dest_w);
1044 ret |= plane_add_prop(req, plane, WDRM_PLANE_CRTC_H,
1045 plane_state->dest_h);
1046 ret |= plane_add_damage(req, b, plane_state);
1047
1048 if (plane_state->fb && plane_state->fb->format)
1049 pinfo = plane_state->fb->format;
1050
1051 drm_debug(plane->backend, "\t\t\t[PLANE:%lu] FORMAT: %s\n",
1052 (unsigned long) plane->plane_id,
1053 pinfo ? pinfo->drm_format_name : "UNKNOWN");
1054
1055 if (plane_state->in_fence_fd >= 0) {
1056 ret |= plane_add_prop(req, plane,
1057 WDRM_PLANE_IN_FENCE_FD,
1058 plane_state->in_fence_fd);
1059 }
1060
Marius Vladcdd6fa22019-08-29 20:42:00 +03001061 /* do note, that 'invented' zpos values are set as immutable */
1062 if (plane_state->zpos != DRM_PLANE_ZPOS_INVALID_PLANE &&
1063 plane_state->plane->zpos_min != plane_state->plane->zpos_max)
1064 ret |= plane_add_prop(req, plane,
1065 WDRM_PLANE_ZPOS,
1066 plane_state->zpos);
1067
Daniel Stone4c2fc702019-06-18 11:12:07 +01001068 if (ret != 0) {
1069 weston_log("couldn't set plane state\n");
1070 return ret;
1071 }
1072 }
1073
1074 return 0;
1075}
1076
1077/**
1078 * Helper function used only by drm_pending_state_apply, with the same
1079 * guarantees and constraints as that function.
1080 */
1081static int
1082drm_pending_state_apply_atomic(struct drm_pending_state *pending_state,
1083 enum drm_state_apply_mode mode)
1084{
1085 struct drm_backend *b = pending_state->backend;
1086 struct drm_output_state *output_state, *tmp;
1087 struct drm_plane *plane;
1088 drmModeAtomicReq *req = drmModeAtomicAlloc();
1089 uint32_t flags;
1090 int ret = 0;
1091
1092 if (!req)
1093 return -1;
1094
1095 switch (mode) {
1096 case DRM_STATE_APPLY_SYNC:
1097 flags = 0;
1098 break;
1099 case DRM_STATE_APPLY_ASYNC:
1100 flags = DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_ATOMIC_NONBLOCK;
1101 break;
1102 case DRM_STATE_TEST_ONLY:
1103 flags = DRM_MODE_ATOMIC_TEST_ONLY;
1104 break;
1105 }
1106
1107 if (b->state_invalid) {
1108 struct weston_head *head_base;
1109 struct drm_head *head;
1110 uint32_t *unused;
1111 int err;
1112
1113 drm_debug(b, "\t\t[atomic] previous state invalid; "
1114 "starting with fresh state\n");
1115
1116 /* If we need to reset all our state (e.g. because we've
1117 * just started, or just been VT-switched in), explicitly
1118 * disable all the CRTCs and connectors we aren't using. */
1119 wl_list_for_each(head_base,
1120 &b->compositor->head_list, compositor_link) {
1121 struct drm_property_info *info;
1122
1123 if (weston_head_is_enabled(head_base))
1124 continue;
1125
1126 head = to_drm_head(head_base);
1127
1128 drm_debug(b, "\t\t[atomic] disabling inactive head %s\n",
1129 head_base->name);
1130
1131 info = &head->props_conn[WDRM_CONNECTOR_CRTC_ID];
1132 err = drmModeAtomicAddProperty(req, head->connector_id,
1133 info->prop_id, 0);
1134 drm_debug(b, "\t\t\t[CONN:%lu] %lu (%s) -> 0\n",
1135 (unsigned long) head->connector_id,
1136 (unsigned long) info->prop_id,
1137 info->name);
1138 if (err <= 0)
1139 ret = -1;
1140 }
1141
1142 wl_array_for_each(unused, &b->unused_crtcs) {
1143 struct drm_property_info infos[WDRM_CRTC__COUNT];
1144 struct drm_property_info *info;
1145 drmModeObjectProperties *props;
1146 uint64_t active;
1147
1148 memset(infos, 0, sizeof(infos));
1149
1150 /* We can't emit a disable on a CRTC that's already
1151 * off, as the kernel will refuse to generate an event
1152 * for an off->off state and fail the commit.
1153 */
1154 props = drmModeObjectGetProperties(b->drm.fd,
1155 *unused,
1156 DRM_MODE_OBJECT_CRTC);
1157 if (!props) {
1158 ret = -1;
1159 continue;
1160 }
1161
1162 drm_property_info_populate(b, crtc_props, infos,
1163 WDRM_CRTC__COUNT,
1164 props);
1165
1166 info = &infos[WDRM_CRTC_ACTIVE];
1167 active = drm_property_get_value(info, props, 0);
1168 drmModeFreeObjectProperties(props);
1169 if (active == 0) {
1170 drm_property_info_free(infos, WDRM_CRTC__COUNT);
1171 continue;
1172 }
1173
1174 drm_debug(b, "\t\t[atomic] disabling unused CRTC %lu\n",
1175 (unsigned long) *unused);
1176
1177 drm_debug(b, "\t\t\t[CRTC:%lu] %lu (%s) -> 0\n",
1178 (unsigned long) *unused,
1179 (unsigned long) info->prop_id, info->name);
1180 err = drmModeAtomicAddProperty(req, *unused,
1181 info->prop_id, 0);
1182 if (err <= 0)
1183 ret = -1;
1184
1185 info = &infos[WDRM_CRTC_MODE_ID];
1186 drm_debug(b, "\t\t\t[CRTC:%lu] %lu (%s) -> 0\n",
1187 (unsigned long) *unused,
1188 (unsigned long) info->prop_id, info->name);
1189 err = drmModeAtomicAddProperty(req, *unused,
1190 info->prop_id, 0);
1191 if (err <= 0)
1192 ret = -1;
1193
1194 drm_property_info_free(infos, WDRM_CRTC__COUNT);
1195 }
1196
1197 /* Disable all the planes; planes which are being used will
1198 * override this state in the output-state application. */
1199 wl_list_for_each(plane, &b->plane_list, link) {
1200 drm_debug(b, "\t\t[atomic] starting with plane %lu disabled\n",
1201 (unsigned long) plane->plane_id);
1202 plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID, 0);
1203 plane_add_prop(req, plane, WDRM_PLANE_FB_ID, 0);
1204 }
1205
1206 flags |= DRM_MODE_ATOMIC_ALLOW_MODESET;
1207 }
1208
1209 wl_list_for_each(output_state, &pending_state->output_list, link) {
1210 if (output_state->output->virtual)
1211 continue;
1212 if (mode == DRM_STATE_APPLY_SYNC)
1213 assert(output_state->dpms == WESTON_DPMS_OFF);
1214 ret |= drm_output_apply_state_atomic(output_state, req, &flags);
1215 }
1216
1217 if (ret != 0) {
1218 weston_log("atomic: couldn't compile atomic state\n");
1219 goto out;
1220 }
1221
1222 ret = drmModeAtomicCommit(b->drm.fd, req, flags, b);
1223 drm_debug(b, "[atomic] drmModeAtomicCommit\n");
1224
1225 /* Test commits do not take ownership of the state; return
1226 * without freeing here. */
1227 if (mode == DRM_STATE_TEST_ONLY) {
1228 drmModeAtomicFree(req);
1229 return ret;
1230 }
1231
1232 if (ret != 0) {
1233 weston_log("atomic: couldn't commit new state: %s\n",
1234 strerror(errno));
1235 goto out;
1236 }
1237
1238 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1239 link)
1240 drm_output_assign_state(output_state, mode);
1241
1242 b->state_invalid = false;
1243
1244 assert(wl_list_empty(&pending_state->output_list));
1245
1246out:
1247 drmModeAtomicFree(req);
1248 drm_pending_state_free(pending_state);
1249 return ret;
1250}
Daniel Stone4c2fc702019-06-18 11:12:07 +01001251
1252/**
1253 * Tests a pending state, to see if the kernel will accept the update as
1254 * constructed.
1255 *
1256 * Using atomic modesetting, the kernel performs the same checks as it would
1257 * on a real commit, returning success or failure without actually modifying
1258 * the running state. It does not return -EBUSY if there are pending updates
1259 * in flight, so states may be tested at any point, however this means a
1260 * state which passed testing may fail on a real commit if the timing is not
1261 * respected (e.g. committing before the previous commit has completed).
1262 *
1263 * Without atomic modesetting, we have no way to check, so we optimistically
1264 * claim it will work.
1265 *
1266 * Unlike drm_pending_state_apply() and drm_pending_state_apply_sync(), this
1267 * function does _not_ take ownership of pending_state, nor does it clear
1268 * state_invalid.
1269 */
1270int
1271drm_pending_state_test(struct drm_pending_state *pending_state)
1272{
Daniel Stone4c2fc702019-06-18 11:12:07 +01001273 struct drm_backend *b = pending_state->backend;
1274
1275 if (b->atomic_modeset)
1276 return drm_pending_state_apply_atomic(pending_state,
1277 DRM_STATE_TEST_ONLY);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001278
1279 /* We have no way to test state before application on the legacy
1280 * modesetting API, so just claim it succeeded. */
1281 return 0;
1282}
1283
1284/**
1285 * Applies all of a pending_state asynchronously: the primary entry point for
1286 * applying KMS state to a device. Updates the state for all outputs in the
1287 * pending_state, as well as disabling any unclaimed outputs.
1288 *
1289 * Unconditionally takes ownership of pending_state, and clears state_invalid.
1290 */
1291int
1292drm_pending_state_apply(struct drm_pending_state *pending_state)
1293{
1294 struct drm_backend *b = pending_state->backend;
1295 struct drm_output_state *output_state, *tmp;
1296 uint32_t *unused;
1297
Daniel Stone4c2fc702019-06-18 11:12:07 +01001298 if (b->atomic_modeset)
1299 return drm_pending_state_apply_atomic(pending_state,
1300 DRM_STATE_APPLY_ASYNC);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001301
1302 if (b->state_invalid) {
1303 /* If we need to reset all our state (e.g. because we've
1304 * just started, or just been VT-switched in), explicitly
1305 * disable all the CRTCs we aren't using. This also disables
1306 * all connectors on these CRTCs, so we don't need to do that
1307 * separately with the pre-atomic API. */
1308 wl_array_for_each(unused, &b->unused_crtcs)
1309 drmModeSetCrtc(b->drm.fd, *unused, 0, 0, 0, NULL, 0,
1310 NULL);
1311 }
1312
1313 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1314 link) {
1315 struct drm_output *output = output_state->output;
1316 int ret;
1317
1318 if (output->virtual) {
1319 drm_output_assign_state(output_state,
1320 DRM_STATE_APPLY_ASYNC);
1321 continue;
1322 }
1323
1324 ret = drm_output_apply_state_legacy(output_state);
1325 if (ret != 0) {
1326 weston_log("Couldn't apply state for output %s\n",
1327 output->base.name);
1328 }
1329 }
1330
1331 b->state_invalid = false;
1332
1333 assert(wl_list_empty(&pending_state->output_list));
1334
1335 drm_pending_state_free(pending_state);
1336
1337 return 0;
1338}
1339
1340/**
1341 * The synchronous version of drm_pending_state_apply. May only be used to
1342 * disable outputs. Does so synchronously: the request is guaranteed to have
1343 * completed on return, and the output will not be touched afterwards.
1344 *
1345 * Unconditionally takes ownership of pending_state, and clears state_invalid.
1346 */
1347int
1348drm_pending_state_apply_sync(struct drm_pending_state *pending_state)
1349{
1350 struct drm_backend *b = pending_state->backend;
1351 struct drm_output_state *output_state, *tmp;
1352 uint32_t *unused;
1353
Daniel Stone4c2fc702019-06-18 11:12:07 +01001354 if (b->atomic_modeset)
1355 return drm_pending_state_apply_atomic(pending_state,
1356 DRM_STATE_APPLY_SYNC);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001357
1358 if (b->state_invalid) {
1359 /* If we need to reset all our state (e.g. because we've
1360 * just started, or just been VT-switched in), explicitly
1361 * disable all the CRTCs we aren't using. This also disables
1362 * all connectors on these CRTCs, so we don't need to do that
1363 * separately with the pre-atomic API. */
1364 wl_array_for_each(unused, &b->unused_crtcs)
1365 drmModeSetCrtc(b->drm.fd, *unused, 0, 0, 0, NULL, 0,
1366 NULL);
1367 }
1368
1369 wl_list_for_each_safe(output_state, tmp, &pending_state->output_list,
1370 link) {
1371 int ret;
1372
1373 assert(output_state->dpms == WESTON_DPMS_OFF);
1374 ret = drm_output_apply_state_legacy(output_state);
1375 if (ret != 0) {
1376 weston_log("Couldn't apply state for output %s\n",
1377 output_state->output->base.name);
1378 }
1379 }
1380
1381 b->state_invalid = false;
1382
1383 assert(wl_list_empty(&pending_state->output_list));
1384
1385 drm_pending_state_free(pending_state);
1386
1387 return 0;
1388}
1389
1390void
1391drm_output_update_msc(struct drm_output *output, unsigned int seq)
1392{
1393 uint64_t msc_hi = output->base.msc >> 32;
1394
1395 if (seq < (output->base.msc & 0xffffffff))
1396 msc_hi++;
1397
1398 output->base.msc = (msc_hi << 32) + seq;
1399}
1400
1401static void
1402page_flip_handler(int fd, unsigned int frame,
1403 unsigned int sec, unsigned int usec, void *data)
1404{
1405 struct drm_output *output = data;
1406 struct drm_backend *b = to_drm_backend(output->base.compositor);
1407 uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC |
1408 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION |
1409 WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK;
1410
1411 drm_output_update_msc(output, frame);
1412
1413 assert(!b->atomic_modeset);
1414 assert(output->page_flip_pending);
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +01001415 output->page_flip_pending = false;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001416
1417 drm_output_update_complete(output, flags, sec, usec);
1418}
1419
Daniel Stone4c2fc702019-06-18 11:12:07 +01001420static void
1421atomic_flip_handler(int fd, unsigned int frame, unsigned int sec,
1422 unsigned int usec, unsigned int crtc_id, void *data)
1423{
1424 struct drm_backend *b = data;
1425 struct drm_output *output = drm_output_find_by_crtc(b, crtc_id);
1426 uint32_t flags = WP_PRESENTATION_FEEDBACK_KIND_VSYNC |
1427 WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION |
1428 WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK;
1429
1430 /* During the initial modeset, we can disable CRTCs which we don't
1431 * actually handle during normal operation; this will give us events
1432 * for unknown outputs. Ignore them. */
1433 if (!output || !output->base.enabled)
1434 return;
1435
1436 drm_output_update_msc(output, frame);
1437
1438 drm_debug(b, "[atomic][CRTC:%u] flip processing started\n", crtc_id);
1439 assert(b->atomic_modeset);
1440 assert(output->atomic_complete_pending);
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +01001441 output->atomic_complete_pending = false;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001442
1443 drm_output_update_complete(output, flags, sec, usec);
1444 drm_debug(b, "[atomic][CRTC:%u] flip processing completed\n", crtc_id);
1445}
Daniel Stone4c2fc702019-06-18 11:12:07 +01001446
1447int
1448on_drm_input(int fd, uint32_t mask, void *data)
1449{
Daniel Stone4c2fc702019-06-18 11:12:07 +01001450 struct drm_backend *b = data;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001451 drmEventContext evctx;
1452
1453 memset(&evctx, 0, sizeof evctx);
Daniel Stone4c2fc702019-06-18 11:12:07 +01001454 evctx.version = 3;
1455 if (b->atomic_modeset)
1456 evctx.page_flip_handler2 = atomic_flip_handler;
1457 else
Daniel Stone4c2fc702019-06-18 11:12:07 +01001458 evctx.page_flip_handler = page_flip_handler;
1459 drmHandleEvent(fd, &evctx);
1460
1461 return 1;
1462}
1463
1464int
1465init_kms_caps(struct drm_backend *b)
1466{
1467 uint64_t cap;
1468 int ret;
1469 clockid_t clk_id;
1470
1471 weston_log("using %s\n", b->drm.filename);
1472
1473 ret = drmGetCap(b->drm.fd, DRM_CAP_TIMESTAMP_MONOTONIC, &cap);
1474 if (ret == 0 && cap == 1)
1475 clk_id = CLOCK_MONOTONIC;
1476 else
1477 clk_id = CLOCK_REALTIME;
1478
1479 if (weston_compositor_set_presentation_clock(b->compositor, clk_id) < 0) {
1480 weston_log("Error: failed to set presentation clock %d.\n",
1481 clk_id);
1482 return -1;
1483 }
1484
1485 ret = drmGetCap(b->drm.fd, DRM_CAP_CURSOR_WIDTH, &cap);
1486 if (ret == 0)
1487 b->cursor_width = cap;
1488 else
1489 b->cursor_width = 64;
1490
1491 ret = drmGetCap(b->drm.fd, DRM_CAP_CURSOR_HEIGHT, &cap);
1492 if (ret == 0)
1493 b->cursor_height = cap;
1494 else
1495 b->cursor_height = 64;
1496
1497 if (!getenv("WESTON_DISABLE_UNIVERSAL_PLANES")) {
1498 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
1499 b->universal_planes = (ret == 0);
1500 }
1501 weston_log("DRM: %s universal planes\n",
1502 b->universal_planes ? "supports" : "does not support");
1503
Daniel Stone4c2fc702019-06-18 11:12:07 +01001504 if (b->universal_planes && !getenv("WESTON_DISABLE_ATOMIC")) {
1505 ret = drmGetCap(b->drm.fd, DRM_CAP_CRTC_IN_VBLANK_EVENT, &cap);
1506 if (ret != 0)
1507 cap = 0;
1508 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_ATOMIC, 1);
1509 b->atomic_modeset = ((ret == 0) && (cap == 1));
1510 }
Daniel Stone4c2fc702019-06-18 11:12:07 +01001511 weston_log("DRM: %s atomic modesetting\n",
1512 b->atomic_modeset ? "supports" : "does not support");
1513
Daniel Stone4c2fc702019-06-18 11:12:07 +01001514 ret = drmGetCap(b->drm.fd, DRM_CAP_ADDFB2_MODIFIERS, &cap);
1515 if (ret == 0)
1516 b->fb_modifiers = cap;
1517 else
Daniel Stone4c2fc702019-06-18 11:12:07 +01001518 b->fb_modifiers = 0;
1519
1520 /*
1521 * KMS support for hardware planes cannot properly synchronize
1522 * without nuclear page flip. Without nuclear/atomic, hw plane
1523 * and cursor plane updates would either tear or cause extra
1524 * waits for vblanks which means dropping the compositor framerate
1525 * to a fraction. For cursors, it's not so bad, so they are
1526 * enabled.
1527 */
1528 if (!b->atomic_modeset || getenv("WESTON_FORCE_RENDERER"))
Emmanuel Gil Peyrot1b3ad092019-12-09 02:50:55 +01001529 b->sprites_are_broken = true;
Daniel Stone4c2fc702019-06-18 11:12:07 +01001530
1531 ret = drmSetClientCap(b->drm.fd, DRM_CLIENT_CAP_ASPECT_RATIO, 1);
1532 b->aspect_ratio_supported = (ret == 0);
1533 weston_log("DRM: %s picture aspect ratio\n",
1534 b->aspect_ratio_supported ? "supports" : "does not support");
1535
1536 return 0;
1537}