blob: 0f631197a167ac9dfdab343008d5619f5464b42b [file] [log] [blame]
Daniel Stone7580b3c2019-06-18 11:16:53 +01001/*
2 * Copyright © 2008-2011 Kristian Høgsberg
3 * Copyright © 2011 Intel Corporation
4 * Copyright © 2017, 2018 Collabora, Ltd.
5 * Copyright © 2017, 2018 General Electric Company
6 * Copyright (c) 2018 DisplayLink (UK) Ltd.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sublicense, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial
18 * portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 * SOFTWARE.
28 */
29
30#include "config.h"
31
32#include <stdint.h>
33
34#include <xf86drm.h>
35#include <xf86drmMode.h>
Daniel Stone7580b3c2019-06-18 11:16:53 +010036
Daniel Stone7580b3c2019-06-18 11:16:53 +010037#include <libweston/libweston.h>
38#include <libweston/backend-drm.h>
39#include <libweston/pixel-formats.h>
40#include <libweston/linux-dmabuf.h>
41#include "shared/helpers.h"
Pekka Paalanen4b301fe2021-02-04 17:39:45 +020042#include "shared/weston-drm-fourcc.h"
Daniel Stone7580b3c2019-06-18 11:16:53 +010043#include "drm-internal.h"
44#include "linux-dmabuf.h"
45
leng.fang32af9fc2024-06-13 11:22:15 +080046#include "aml-weston/aml-backend.h"
xiaotao.weib5a8b812024-07-01 08:59:57 +000047#include <limits.h> /* CHAR_BIT */
48
49#ifndef DRM_FORMAT_MOD_ARM_TYPE_MASK
50#define DRM_FORMAT_MOD_ARM_TYPE_MASK 0xf
51#endif
52
53#define fourcc_mod_code_get_vendor(val) ((val) >> 56)
54#define fourcc_mod_code_get_type(val) (((val) >> 52) & DRM_FORMAT_MOD_ARM_TYPE_MASK)
leng.fang32af9fc2024-06-13 11:22:15 +080055
Daniel Stone7580b3c2019-06-18 11:16:53 +010056static void
57drm_fb_destroy(struct drm_fb *fb)
58{
leng.fang32af9fc2024-06-13 11:22:15 +080059 drm_fb_destroy_aml(fb);
Daniel Stone7580b3c2019-06-18 11:16:53 +010060}
61
62static void
63drm_fb_destroy_dumb(struct drm_fb *fb)
64{
65 struct drm_mode_destroy_dumb destroy_arg;
66
67 assert(fb->type == BUFFER_PIXMAN_DUMB);
68
69 if (fb->map && fb->size > 0)
70 munmap(fb->map, fb->size);
71
72 memset(&destroy_arg, 0, sizeof(destroy_arg));
73 destroy_arg.handle = fb->handles[0];
74 drmIoctl(fb->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_arg);
75
76 drm_fb_destroy(fb);
77}
78
Daniel Stone7580b3c2019-06-18 11:16:53 +010079static int
80drm_fb_addfb(struct drm_backend *b, struct drm_fb *fb)
81{
82 int ret = -EINVAL;
Daniel Stone7580b3c2019-06-18 11:16:53 +010083 uint64_t mods[4] = { };
84 size_t i;
Daniel Stone7580b3c2019-06-18 11:16:53 +010085
86 /* If we have a modifier set, we must only use the WithModifiers
87 * entrypoint; we cannot import it through legacy ioctls. */
88 if (b->fb_modifiers && fb->modifier != DRM_FORMAT_MOD_INVALID) {
89 /* KMS demands that if a modifier is set, it must be the same
90 * for all planes. */
Daniel Stone7580b3c2019-06-18 11:16:53 +010091 for (i = 0; i < ARRAY_LENGTH(mods) && fb->handles[i]; i++)
92 mods[i] = fb->modifier;
93 ret = drmModeAddFB2WithModifiers(fb->fd, fb->width, fb->height,
94 fb->format->format,
95 fb->handles, fb->strides,
96 fb->offsets, mods, &fb->fb_id,
97 DRM_MODE_FB_MODIFIERS);
Daniel Stone7580b3c2019-06-18 11:16:53 +010098 return ret;
99 }
100
101 ret = drmModeAddFB2(fb->fd, fb->width, fb->height, fb->format->format,
102 fb->handles, fb->strides, fb->offsets, &fb->fb_id,
103 0);
104 if (ret == 0)
105 return 0;
106
107 /* Legacy AddFB can't always infer the format from depth/bpp alone, so
108 * check if our format is one of the lucky ones. */
109 if (!fb->format->depth || !fb->format->bpp)
110 return ret;
111
112 /* Cannot fall back to AddFB for multi-planar formats either. */
113 if (fb->handles[1] || fb->handles[2] || fb->handles[3])
114 return ret;
115
116 ret = drmModeAddFB(fb->fd, fb->width, fb->height,
117 fb->format->depth, fb->format->bpp,
118 fb->strides[0], fb->handles[0], &fb->fb_id);
119 return ret;
120}
121
122struct drm_fb *
123drm_fb_create_dumb(struct drm_backend *b, int width, int height,
124 uint32_t format)
125{
126 struct drm_fb *fb;
127 int ret;
128
129 struct drm_mode_create_dumb create_arg;
130 struct drm_mode_destroy_dumb destroy_arg;
131 struct drm_mode_map_dumb map_arg;
132
133 fb = zalloc(sizeof *fb);
134 if (!fb)
135 return NULL;
136 fb->refcnt = 1;
137
138 fb->format = pixel_format_get_info(format);
139 if (!fb->format) {
140 weston_log("failed to look up format 0x%lx\n",
141 (unsigned long) format);
142 goto err_fb;
143 }
144
145 if (!fb->format->depth || !fb->format->bpp) {
146 weston_log("format 0x%lx is not compatible with dumb buffers\n",
147 (unsigned long) format);
148 goto err_fb;
149 }
150
151 memset(&create_arg, 0, sizeof create_arg);
152 create_arg.bpp = fb->format->bpp;
153 create_arg.width = width;
154 create_arg.height = height;
155
156 ret = drmIoctl(b->drm.fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_arg);
157 if (ret)
158 goto err_fb;
159
160 fb->type = BUFFER_PIXMAN_DUMB;
161 fb->modifier = DRM_FORMAT_MOD_INVALID;
162 fb->handles[0] = create_arg.handle;
163 fb->strides[0] = create_arg.pitch;
164 fb->num_planes = 1;
165 fb->size = create_arg.size;
166 fb->width = width;
167 fb->height = height;
168 fb->fd = b->drm.fd;
169
170 if (drm_fb_addfb(b, fb) != 0) {
171 weston_log("failed to create kms fb: %s\n", strerror(errno));
172 goto err_bo;
173 }
174
175 memset(&map_arg, 0, sizeof map_arg);
176 map_arg.handle = fb->handles[0];
177 ret = drmIoctl(fb->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_arg);
178 if (ret)
179 goto err_add_fb;
180
181 fb->map = mmap(NULL, fb->size, PROT_WRITE,
182 MAP_SHARED, b->drm.fd, map_arg.offset);
183 if (fb->map == MAP_FAILED)
184 goto err_add_fb;
185
186 return fb;
187
188err_add_fb:
189 drmModeRmFB(b->drm.fd, fb->fb_id);
190err_bo:
191 memset(&destroy_arg, 0, sizeof(destroy_arg));
192 destroy_arg.handle = create_arg.handle;
193 drmIoctl(b->drm.fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_arg);
194err_fb:
195 free(fb);
196 return NULL;
197}
198
199struct drm_fb *
200drm_fb_ref(struct drm_fb *fb)
201{
202 fb->refcnt++;
203 return fb;
204}
205
Stefan Agnerccf24072019-07-09 22:02:00 +0200206#ifdef BUILD_DRM_GBM
207static void
208drm_fb_destroy_gbm(struct gbm_bo *bo, void *data)
209{
210 struct drm_fb *fb = data;
211
212 assert(fb->type == BUFFER_GBM_SURFACE || fb->type == BUFFER_CLIENT ||
213 fb->type == BUFFER_CURSOR);
214 drm_fb_destroy(fb);
215}
216
Daniel Stone7580b3c2019-06-18 11:16:53 +0100217static void
218drm_fb_destroy_dmabuf(struct drm_fb *fb)
219{
220 /* We deliberately do not close the GEM handles here; GBM manages
221 * their lifetime through the BO. */
222 if (fb->bo)
223 gbm_bo_destroy(fb->bo);
224 drm_fb_destroy(fb);
225}
226
227static struct drm_fb *
228drm_fb_get_from_dmabuf(struct linux_dmabuf_buffer *dmabuf,
Leandro Ribeiro0a7034c2021-09-13 14:52:53 -0300229 struct drm_backend *backend, bool is_opaque,
230 uint32_t *try_view_on_plane_failure_reasons)
Daniel Stone7580b3c2019-06-18 11:16:53 +0100231{
Leandro Ribeiro5aea1bc2021-04-21 10:38:58 -0300232#ifndef HAVE_GBM_FD_IMPORT
233 /* Importing a buffer to KMS requires explicit modifiers, so
234 * we can't continue with the legacy GBM_BO_IMPORT_FD instead
235 * of GBM_BO_IMPORT_FD_MODIFIER. */
236 return NULL;
237#else
Daniel Stone7580b3c2019-06-18 11:16:53 +0100238 struct drm_fb *fb;
Leandro Ribeiro5aea1bc2021-04-21 10:38:58 -0300239 int i;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100240 struct gbm_import_fd_modifier_data import_mod = {
241 .width = dmabuf->attributes.width,
242 .height = dmabuf->attributes.height,
243 .format = dmabuf->attributes.format,
244 .num_fds = dmabuf->attributes.n_planes,
245 .modifier = dmabuf->attributes.modifier[0],
246 };
Nicholas Niro7aab7462019-10-29 22:13:09 -0400247
Emmanuel Gil Peyroteff793a2021-07-31 17:25:41 +0200248 /* We should not import to KMS a buffer that has been allocated using no
249 * modifiers. Usually drivers use linear layouts to allocate with no
250 * modifiers, but this is not a rule. The driver could use, for
251 * instance, a tiling layout under the hood - and both Weston and the
252 * KMS driver can't know. So giving the buffer to KMS is not safe, as
253 * not knowing its layout can result in garbage being displayed. In
254 * short, importing a buffer to KMS requires explicit modifiers. */
Leandro Ribeiro0a7034c2021-09-13 14:52:53 -0300255 if (dmabuf->attributes.modifier[0] == DRM_FORMAT_MOD_INVALID) {
256 if (try_view_on_plane_failure_reasons)
257 *try_view_on_plane_failure_reasons |=
258 FAILURE_REASONS_DMABUF_MODIFIER_INVALID;
Leandro Ribeiro5aea1bc2021-04-21 10:38:58 -0300259 return NULL;
Leandro Ribeiro0a7034c2021-09-13 14:52:53 -0300260 }
Daniel Stone7580b3c2019-06-18 11:16:53 +0100261
262 /* XXX: TODO:
263 *
264 * Currently the buffer is rejected if any dmabuf attribute
265 * flag is set. This keeps us from passing an inverted /
266 * interlaced / bottom-first buffer (or any other type that may
267 * be added in the future) through to an overlay. Ultimately,
268 * these types of buffers should be handled through buffer
269 * transforms and not as spot-checks requiring specific
270 * knowledge. */
271 if (dmabuf->attributes.flags)
272 return NULL;
273
274 fb = zalloc(sizeof *fb);
275 if (fb == NULL)
276 return NULL;
277
278 fb->refcnt = 1;
279 fb->type = BUFFER_DMABUF;
280
Leandro Ribeiro3193ab52021-04-27 19:00:34 -0300281 ARRAY_COPY(import_mod.fds, dmabuf->attributes.fd);
282 ARRAY_COPY(import_mod.strides, dmabuf->attributes.stride);
283 ARRAY_COPY(import_mod.offsets, dmabuf->attributes.offset);
Daniel Stone7580b3c2019-06-18 11:16:53 +0100284
Leandro Ribeiro5aea1bc2021-04-21 10:38:58 -0300285 fb->bo = gbm_bo_import(backend->gbm, GBM_BO_IMPORT_FD_MODIFIER,
286 &import_mod, GBM_BO_USE_SCANOUT);
Daniel Stone7580b3c2019-06-18 11:16:53 +0100287 if (!fb->bo)
288 goto err_free;
289
290 fb->width = dmabuf->attributes.width;
291 fb->height = dmabuf->attributes.height;
292 fb->modifier = dmabuf->attributes.modifier[0];
293 fb->size = 0;
294 fb->fd = backend->drm.fd;
295
Leandro Ribeiro3193ab52021-04-27 19:00:34 -0300296 ARRAY_COPY(fb->strides, dmabuf->attributes.stride);
297 ARRAY_COPY(fb->offsets, dmabuf->attributes.offset);
Daniel Stone7580b3c2019-06-18 11:16:53 +0100298
299 fb->format = pixel_format_get_info(dmabuf->attributes.format);
300 if (!fb->format) {
301 weston_log("couldn't look up format info for 0x%lx\n",
302 (unsigned long) dmabuf->attributes.format);
303 goto err_free;
304 }
305
306 if (is_opaque)
307 fb->format = pixel_format_get_opaque_substitute(fb->format);
308
309 if (backend->min_width > fb->width ||
310 fb->width > backend->max_width ||
311 backend->min_height > fb->height ||
312 fb->height > backend->max_height) {
313 weston_log("bo geometry out of bounds\n");
314 goto err_free;
315 }
316
317 fb->num_planes = dmabuf->attributes.n_planes;
318 for (i = 0; i < dmabuf->attributes.n_planes; i++) {
319 union gbm_bo_handle handle;
320
321 handle = gbm_bo_get_handle_for_plane(fb->bo, i);
322 if (handle.s32 == -1)
323 goto err_free;
324 fb->handles[i] = handle.u32;
325 }
326
Leandro Ribeiro0a7034c2021-09-13 14:52:53 -0300327 if (drm_fb_addfb(backend, fb) != 0) {
328 if (try_view_on_plane_failure_reasons)
329 *try_view_on_plane_failure_reasons |=
330 FAILURE_REASONS_ADD_FB_FAILED;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100331 goto err_free;
Leandro Ribeiro0a7034c2021-09-13 14:52:53 -0300332 }
Daniel Stone7580b3c2019-06-18 11:16:53 +0100333
334 return fb;
335
336err_free:
337 drm_fb_destroy_dmabuf(fb);
Daniel Stone7580b3c2019-06-18 11:16:53 +0100338 return NULL;
Leandro Ribeiro5aea1bc2021-04-21 10:38:58 -0300339#endif
Daniel Stone7580b3c2019-06-18 11:16:53 +0100340}
341
xiaotao.weib5a8b812024-07-01 08:59:57 +0000342/* Returns true if the modifier describes an AFRC format. */
343static bool drm_fourcc_modifier_is_afrc(uint64_t modifier)
344{
345 uint32_t vendor = fourcc_mod_code_get_vendor(modifier);
346 uint32_t type = fourcc_mod_code_get_type(modifier);
347 return DRM_FORMAT_MOD_VENDOR_ARM == vendor && DRM_FORMAT_MOD_ARM_TYPE_AFRC == type;
348}
349
Daniel Stone7580b3c2019-06-18 11:16:53 +0100350struct drm_fb *
351drm_fb_get_from_bo(struct gbm_bo *bo, struct drm_backend *backend,
352 bool is_opaque, enum drm_fb_type type)
353{
354 struct drm_fb *fb = gbm_bo_get_user_data(bo);
355#ifdef HAVE_GBM_MODIFIERS
356 int i;
357#endif
358
359 if (fb) {
360 assert(fb->type == type);
361 return drm_fb_ref(fb);
362 }
363
364 fb = zalloc(sizeof *fb);
365 if (fb == NULL)
366 return NULL;
367
368 fb->type = type;
369 fb->refcnt = 1;
370 fb->bo = bo;
371 fb->fd = backend->drm.fd;
372
373 fb->width = gbm_bo_get_width(bo);
374 fb->height = gbm_bo_get_height(bo);
375 fb->format = pixel_format_get_info(gbm_bo_get_format(bo));
376 fb->size = 0;
377
378#ifdef HAVE_GBM_MODIFIERS
leng.fang32af9fc2024-06-13 11:22:15 +0800379 if (backend->compositor->use_gbm_modifiers) {
380 fb->modifier = gbm_bo_get_modifier(bo);
381 fb->num_planes = gbm_bo_get_plane_count(bo);
382 for (i = 0; i < fb->num_planes; i++) {
xiaotao.weib5a8b812024-07-01 08:59:57 +0000383 if (drm_fourcc_modifier_is_afrc(fb->modifier)) {
384 fb->strides[i] = fb->width * gbm_bo_get_bpp(bo) / CHAR_BIT;
385 } else {
386 fb->strides[i] = gbm_bo_get_stride_for_plane(bo, i);
387 }
leng.fang32af9fc2024-06-13 11:22:15 +0800388 fb->handles[i] = gbm_bo_get_handle_for_plane(bo, i).u32;
389 fb->offsets[i] = gbm_bo_get_offset(bo, i);
390 }
391 } else {
392 fb->num_planes = 1;
393 fb->strides[0] = gbm_bo_get_stride(bo);
394 fb->handles[0] = gbm_bo_get_handle(bo).u32;
395 fb->modifier = DRM_FORMAT_MOD_INVALID;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100396 }
397#else
398 fb->num_planes = 1;
399 fb->strides[0] = gbm_bo_get_stride(bo);
400 fb->handles[0] = gbm_bo_get_handle(bo).u32;
401 fb->modifier = DRM_FORMAT_MOD_INVALID;
402#endif
403
404 if (!fb->format) {
405 weston_log("couldn't look up format 0x%lx\n",
406 (unsigned long) gbm_bo_get_format(bo));
407 goto err_free;
408 }
409
410 /* We can scanout an ARGB buffer if the surface's opaque region covers
411 * the whole output, but we have to use XRGB as the KMS format code. */
412 if (is_opaque)
413 fb->format = pixel_format_get_opaque_substitute(fb->format);
414
415 if (backend->min_width > fb->width ||
416 fb->width > backend->max_width ||
417 backend->min_height > fb->height ||
418 fb->height > backend->max_height) {
419 weston_log("bo geometry out of bounds\n");
420 goto err_free;
421 }
422
423 if (drm_fb_addfb(backend, fb) != 0) {
424 if (type == BUFFER_GBM_SURFACE)
425 weston_log("failed to create kms fb: %s\n",
426 strerror(errno));
427 goto err_free;
428 }
429
430 gbm_bo_set_user_data(bo, fb, drm_fb_destroy_gbm);
431
432 return fb;
433
434err_free:
435 free(fb);
436 return NULL;
437}
Stefan Agnerccf24072019-07-09 22:02:00 +0200438#endif
Daniel Stone7580b3c2019-06-18 11:16:53 +0100439
440void
441drm_fb_unref(struct drm_fb *fb)
442{
443 if (!fb)
444 return;
445
446 assert(fb->refcnt > 0);
447 if (--fb->refcnt > 0)
448 return;
449
450 switch (fb->type) {
451 case BUFFER_PIXMAN_DUMB:
452 drm_fb_destroy_dumb(fb);
453 break;
Stefan Agnerccf24072019-07-09 22:02:00 +0200454#ifdef BUILD_DRM_GBM
Daniel Stone7580b3c2019-06-18 11:16:53 +0100455 case BUFFER_CURSOR:
456 case BUFFER_CLIENT:
457 gbm_bo_destroy(fb->bo);
458 break;
459 case BUFFER_GBM_SURFACE:
460 gbm_surface_release_buffer(fb->gbm_surface, fb->bo);
461 break;
462 case BUFFER_DMABUF:
463 drm_fb_destroy_dmabuf(fb);
464 break;
Stefan Agnerccf24072019-07-09 22:02:00 +0200465#endif
Daniel Stone7580b3c2019-06-18 11:16:53 +0100466 default:
467 assert(NULL);
468 break;
469 }
470}
471
Stefan Agnerccf24072019-07-09 22:02:00 +0200472#ifdef BUILD_DRM_GBM
Marius Vlad81bada52019-11-11 00:27:17 +0200473bool
474drm_can_scanout_dmabuf(struct weston_compositor *ec,
475 struct linux_dmabuf_buffer *dmabuf)
476{
477 struct drm_fb *fb;
478 struct drm_backend *b = to_drm_backend(ec);
479 bool ret = false;
480
Leandro Ribeiro0a7034c2021-09-13 14:52:53 -0300481 fb = drm_fb_get_from_dmabuf(dmabuf, b, true, NULL);
Marius Vlad81bada52019-11-11 00:27:17 +0200482 if (fb)
483 ret = true;
484
485 drm_fb_unref(fb);
486 drm_debug(b, "[dmabuf] dmabuf %p, import test %s\n", dmabuf,
487 ret ? "succeeded" : "failed");
488 return ret;
489}
490
Daniel Stone57d609a2021-11-16 18:56:09 +0000491static bool
492drm_fb_compatible_with_plane(struct drm_fb *fb, struct drm_plane *plane)
493{
494 struct drm_backend *b = plane->backend;
495 struct weston_drm_format *fmt;
496
497 /* Check whether the format is supported */
498 fmt = weston_drm_format_array_find_format(&plane->formats,
499 fb->format->format);
500 if (fmt) {
501 /* We never try to promote a dmabuf with DRM_FORMAT_MOD_INVALID
502 * to a KMS plane (see drm_fb_get_from_dmabuf() for more details).
503 * So if fb->modifier == DRM_FORMAT_MOD_INVALID, we are sure
504 * that this is for the legacy GBM import path, in which a
505 * wl_drm is being used for scanout. Mesa is the only user we
506 * care in this case (even though recent versions are also using
507 * dmabufs), and it should know better what works or not. */
508 if (fb->modifier == DRM_FORMAT_MOD_INVALID)
509 return true;
510
511 if (weston_drm_format_has_modifier(fmt, fb->modifier))
512 return true;
513 }
514
515 drm_debug(b, "\t\t\t\t[%s] not placing view on %s: "
516 "no free %s planes matching format %s (0x%lx) "
517 "modifier 0x%llx\n",
518 drm_output_get_plane_type_name(plane),
519 drm_output_get_plane_type_name(plane),
520 drm_output_get_plane_type_name(plane),
521 fb->format->drm_format_name,
522 (unsigned long) fb->format->format,
523 (unsigned long long) fb->modifier);
524
525 return false;
526}
527
Daniel Stone7d27df42021-11-18 16:01:03 +0000528static void
529drm_fb_handle_buffer_destroy(struct wl_listener *listener, void *data)
530{
531 struct drm_buffer_fb *buf_fb =
532 container_of(listener, struct drm_buffer_fb, buffer_destroy_listener);
533
534 if (buf_fb->fb) {
535 assert(buf_fb->fb->type == BUFFER_CLIENT ||
536 buf_fb->fb->type == BUFFER_DMABUF);
537 drm_fb_unref(buf_fb->fb);
538 }
539
540 free(buf_fb);
541}
542
Daniel Stone7580b3c2019-06-18 11:16:53 +0100543struct drm_fb *
Leandro Ribeiro0a7034c2021-09-13 14:52:53 -0300544drm_fb_get_from_view(struct drm_output_state *state, struct weston_view *ev,
545 uint32_t *try_view_on_plane_failure_reasons)
Daniel Stone7580b3c2019-06-18 11:16:53 +0100546{
547 struct drm_output *output = state->output;
548 struct drm_backend *b = to_drm_backend(output->base.compositor);
549 struct weston_buffer *buffer = ev->surface->buffer_ref.buffer;
Daniel Stone7d27df42021-11-18 16:01:03 +0000550 struct drm_buffer_fb *buf_fb;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100551 bool is_opaque = weston_view_is_opaque(ev, &ev->transform.boundingbox);
552 struct linux_dmabuf_buffer *dmabuf;
553 struct drm_fb *fb;
Daniel Stone57d609a2021-11-16 18:56:09 +0000554 struct drm_plane *plane;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100555
556 if (ev->alpha != 1.0f)
557 return NULL;
558
limin.tianc616dad2024-07-15 11:35:38 +0000559 //if (!drm_view_transform_supported(ev, &output->base))
560 // return NULL;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100561
Daniel Stoned32dfcf2019-08-19 16:53:40 +0100562 if (ev->surface->protection_mode == WESTON_SURFACE_PROTECTION_MODE_ENFORCED &&
563 ev->surface->desired_protection > output->base.current_protection)
564 return NULL;
565
Daniel Stone7580b3c2019-06-18 11:16:53 +0100566 if (!buffer)
567 return NULL;
568
Daniel Stone7d27df42021-11-18 16:01:03 +0000569 if (buffer->backend_private) {
570 buf_fb = buffer->backend_private;
571 *try_view_on_plane_failure_reasons |= buf_fb->failure_reasons;
572 return buf_fb->fb ? drm_fb_ref(buf_fb->fb) : NULL;
573 }
574
575 buf_fb = zalloc(sizeof(*buf_fb));
576 buffer->backend_private = buf_fb;
577 buf_fb->buffer_destroy_listener.notify = drm_fb_handle_buffer_destroy;
578 wl_signal_add(&buffer->destroy_signal, &buf_fb->buffer_destroy_listener);
579
Daniel Stone7580b3c2019-06-18 11:16:53 +0100580 if (wl_shm_buffer_get(buffer->resource))
Daniel Stone7d27df42021-11-18 16:01:03 +0000581 goto unsuitable;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100582
583 /* GBM is used for dmabuf import as well as from client wl_buffer. */
584 if (!b->gbm)
Daniel Stone7d27df42021-11-18 16:01:03 +0000585 goto unsuitable;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100586
587 dmabuf = linux_dmabuf_buffer_get(buffer->resource);
588 if (dmabuf) {
Leandro Ribeiro0a7034c2021-09-13 14:52:53 -0300589 fb = drm_fb_get_from_dmabuf(dmabuf, b, is_opaque,
Daniel Stone7d27df42021-11-18 16:01:03 +0000590 &buf_fb->failure_reasons);
Daniel Stone7580b3c2019-06-18 11:16:53 +0100591 if (!fb)
Daniel Stone7d27df42021-11-18 16:01:03 +0000592 goto unsuitable;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100593 } else {
594 struct gbm_bo *bo;
595
596 bo = gbm_bo_import(b->gbm, GBM_BO_IMPORT_WL_BUFFER,
597 buffer->resource, GBM_BO_USE_SCANOUT);
598 if (!bo)
Daniel Stone7d27df42021-11-18 16:01:03 +0000599 goto unsuitable;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100600
601 fb = drm_fb_get_from_bo(bo, b, is_opaque, BUFFER_CLIENT);
602 if (!fb) {
603 gbm_bo_destroy(bo);
Daniel Stone7d27df42021-11-18 16:01:03 +0000604 goto unsuitable;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100605 }
606 }
607
Daniel Stone57d609a2021-11-16 18:56:09 +0000608 /* Check if this buffer can ever go on any planes. If it can't, we have
609 * no reason to ever have a drm_fb, so we fail it here. */
610 wl_list_for_each(plane, &b->plane_list, link) {
611 if (drm_fb_compatible_with_plane(fb, plane))
612 fb->plane_mask |= (1 << plane->plane_idx);
613 }
614 if (fb->plane_mask == 0) {
615 drm_fb_unref(fb);
616 buf_fb->failure_reasons |= FAILURE_REASONS_FB_FORMAT_INCOMPATIBLE;
617 goto unsuitable;
618 }
619
Daniel Stone7d27df42021-11-18 16:01:03 +0000620 /* The caller holds its own ref to the drm_fb, so when creating a new
621 * drm_fb we take an additional ref for the weston_buffer's cache. */
622 buf_fb->fb = drm_fb_ref(fb);
623
Daniel Stone7580b3c2019-06-18 11:16:53 +0100624 drm_debug(b, "\t\t\t[view] view %p format: %s\n",
625 ev, fb->format->drm_format_name);
Daniel Stone7580b3c2019-06-18 11:16:53 +0100626 return fb;
Daniel Stone7d27df42021-11-18 16:01:03 +0000627
628unsuitable:
629 *try_view_on_plane_failure_reasons |= buf_fb->failure_reasons;
630 return NULL;
Daniel Stone7580b3c2019-06-18 11:16:53 +0100631}
Stefan Agnerccf24072019-07-09 22:02:00 +0200632#endif