blob: 5afcbc59f1476e2357c233967e5b4e3cb8cdbb47 [file] [log] [blame]
xuesong.jiangae1548e2022-05-06 16:38:46 +08001/* GStreamer
2 * Copyright (C) 2022 <xuesong.jiang@amlogic.com>
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Library General Public License for more details.
13 *
14 * You should have received a copy of the GNU Library General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
17 * Boston, MA 02110-1335, USA.
18 */
19
20#include "config.h"
21
22#include "ext/videodev2.h"
23
24#include "gstamlv4l2object.h"
25#include "gstamlv4l2allocator.h"
26
27#include <gst/allocators/gstdmabuf.h>
28
29#include <fcntl.h>
30#include <string.h>
31#include <sys/stat.h>
32#include <sys/types.h>
33#include <sys/mman.h>
34#include <unistd.h>
xuesong.jiangc5dac0f2023-02-01 14:42:24 +080035#include <stdio.h>
xuesong.jiangae1548e2022-05-06 16:38:46 +080036
37#define GST_AML_V4L2_MEMORY_TYPE "V4l2Memory"
38
39#define gst_aml_v4l2_allocator_parent_class parent_class
40G_DEFINE_TYPE(GstAmlV4l2Allocator, gst_aml_v4l2_allocator, GST_TYPE_ALLOCATOR);
41
42GST_DEBUG_CATEGORY_STATIC(amlv4l2allocator_debug);
43#define GST_CAT_DEFAULT amlv4l2allocator_debug
44
45#define UNSET_QUEUED(buffer) \
46 ((buffer).flags &= ~(V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE))
47
48#define SET_QUEUED(buffer) ((buffer).flags |= V4L2_BUF_FLAG_QUEUED)
49
50#define IS_QUEUED(buffer) \
51 ((buffer).flags & (V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE))
52
53enum
54{
bo.xiao857b8682024-09-12 16:40:32 +080055 GROUP_RELEASED,
56 LAST_SIGNAL
xuesong.jiangae1548e2022-05-06 16:38:46 +080057};
58
59static guint gst_aml_v4l2_allocator_signals[LAST_SIGNAL] = {0};
60
xuesong.jiangc5dac0f2023-02-01 14:42:24 +080061static void gst_aml_v4l2_allocator_dump_es_buf(GstAmlV4l2Allocator *allocator, GstAmlV4l2MemoryGroup *group)
62{
63 const gchar *dump_dir = NULL;
64 gchar *full_file_name = NULL;
65 FILE *out = NULL;
66
67 dump_dir = g_getenv("GST_AML_DUMP_AML_V4L2_ES_BUF_DIR");
68 if (G_LIKELY(dump_dir == NULL))
69 return;
70
71 if (allocator->obj->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && allocator->obj->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
72 return;
73
74 GST_DEBUG_OBJECT(allocator, "assert ok start dump");
75 full_file_name = g_strdup_printf("%s" G_DIR_SEPARATOR_S "amlv4l2_es.bin", dump_dir);
76 if ((out = fopen(full_file_name, "ab")))
77 {
78 GST_DEBUG_OBJECT(allocator, "open dir ok");
79 GstMapInfo map;
80 memset(&map, 0, sizeof(GstMapInfo));
81 for (int i = 0; i < group->n_mem; i++)
82 {
83 if (gst_memory_map(group->mem[i], &map, GST_MAP_READ))
84 {
85 GST_DEBUG_OBJECT(allocator, "es ts:%llu dump_size:%d,v4l2_buf_byteused:%d,%d",
86 group->buffer.timestamp.tv_sec * 1000000000ULL + group->buffer.timestamp.tv_usec * 1000,
87 map.size,
88 group->buffer.bytesused, group->planes[0].bytesused);
89 fwrite(map.data, map.size, 1, out);
90 gst_memory_unmap(group->mem[i], &map);
91 }
92 }
93 fclose(out);
94 out = NULL;
95 }
96 g_free(full_file_name);
97}
98
xuesong.jiangae1548e2022-05-06 16:38:46 +080099static void gst_aml_v4l2_allocator_release(GstAmlV4l2Allocator *allocator,
bo.xiao857b8682024-09-12 16:40:32 +0800100 GstAmlV4l2Memory *mem);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800101
102static const gchar *
bo.xiao857b8682024-09-12 16:40:32 +0800103memory_type_to_str (guint32 memory)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800104{
bo.xiao857b8682024-09-12 16:40:32 +0800105 switch (memory)
106 {
xuesong.jiangae1548e2022-05-06 16:38:46 +0800107 case V4L2_MEMORY_MMAP:
bo.xiao857b8682024-09-12 16:40:32 +0800108 return "mmap";
xuesong.jiangae1548e2022-05-06 16:38:46 +0800109 case V4L2_MEMORY_USERPTR:
bo.xiao857b8682024-09-12 16:40:32 +0800110 return "userptr";
xuesong.jiangae1548e2022-05-06 16:38:46 +0800111 case V4L2_MEMORY_DMABUF:
bo.xiao857b8682024-09-12 16:40:32 +0800112 return "dmabuf";
xuesong.jiangae1548e2022-05-06 16:38:46 +0800113 default:
bo.xiao857b8682024-09-12 16:40:32 +0800114 return "unknown";
115 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800116}
117
118/*************************************/
119/* GstAmlV4lMemory implementation */
120/*************************************/
121
122static gpointer
123_v4l2mem_map(GstAmlV4l2Memory *mem, gsize maxsize, GstMapFlags flags)
124{
bo.xiao857b8682024-09-12 16:40:32 +0800125 gpointer data = NULL;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800126
bo.xiao857b8682024-09-12 16:40:32 +0800127 switch (mem->group->buffer.memory)
128 {
xuesong.jiangae1548e2022-05-06 16:38:46 +0800129 case V4L2_MEMORY_MMAP:
130 case V4L2_MEMORY_USERPTR:
bo.xiao857b8682024-09-12 16:40:32 +0800131 data = mem->data;
132 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800133 case V4L2_MEMORY_DMABUF:
bo.xiao857b8682024-09-12 16:40:32 +0800134 /* v4l2 dmabuf memory are not shared with downstream */
135 g_assert_not_reached ();
136 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800137 default:
bo.xiao857b8682024-09-12 16:40:32 +0800138 GST_WARNING ("Unknown memory type %i", mem->group->buffer.memory);
139 break;
140 }
141 return data;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800142}
143
144static gboolean
145_v4l2mem_unmap(GstAmlV4l2Memory *mem)
146{
bo.xiao857b8682024-09-12 16:40:32 +0800147 gboolean ret = FALSE;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800148
bo.xiao857b8682024-09-12 16:40:32 +0800149 switch (mem->group->buffer.memory)
150 {
xuesong.jiangae1548e2022-05-06 16:38:46 +0800151 case V4L2_MEMORY_MMAP:
152 case V4L2_MEMORY_USERPTR:
bo.xiao857b8682024-09-12 16:40:32 +0800153 ret = TRUE;
154 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800155 case V4L2_MEMORY_DMABUF:
bo.xiao857b8682024-09-12 16:40:32 +0800156 /* v4l2 dmabuf memory are not share with downstream */
157 g_assert_not_reached ();
158 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800159 default:
bo.xiao857b8682024-09-12 16:40:32 +0800160 GST_WARNING ("Unknown memory type %i", mem->group->buffer.memory);
161 break;
162 }
163 return ret;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800164}
165
166static gboolean
167_v4l2mem_dispose(GstAmlV4l2Memory *mem)
168{
bo.xiao857b8682024-09-12 16:40:32 +0800169 GstAmlV4l2Allocator *allocator = (GstAmlV4l2Allocator *)mem->mem.allocator;
170 GstAmlV4l2MemoryGroup *group = mem->group;
171 gboolean ret;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800172
bo.xiao857b8682024-09-12 16:40:32 +0800173 if (group->mem[mem->plane])
174 {
175 /* We may have a dmabuf, replace it with returned original memory */
176 group->mem[mem->plane] = gst_memory_ref ((GstMemory *) mem);
177 gst_aml_v4l2_allocator_release(allocator, mem);
178 ret = FALSE;
179 }
180 else
181 {
182 gst_object_ref (allocator);
183 ret = TRUE;
184 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800185
bo.xiao857b8682024-09-12 16:40:32 +0800186 return ret;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800187}
188
189static inline GstAmlV4l2Memory *
bo.xiao857b8682024-09-12 16:40:32 +0800190_v4l2mem_new (GstMemoryFlags flags, GstAllocator * allocator,
191 GstMemory * parent, gsize maxsize, gsize align, gsize offset, gsize size,
192 gint plane, gpointer data, int dmafd, GstAmlV4l2MemoryGroup *group)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800193{
bo.xiao857b8682024-09-12 16:40:32 +0800194 GstAmlV4l2Memory *mem;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800195
bo.xiao857b8682024-09-12 16:40:32 +0800196 mem = g_slice_new0 (GstAmlV4l2Memory);
197 gst_memory_init (GST_MEMORY_CAST (mem),
198 flags, allocator, parent, maxsize, align, offset, size);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800199
bo.xiao857b8682024-09-12 16:40:32 +0800200 if (parent == NULL)
201 mem->mem.mini_object.dispose =
202 (GstMiniObjectDisposeFunction) _v4l2mem_dispose;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800203
bo.xiao857b8682024-09-12 16:40:32 +0800204 mem->plane = plane;
205 mem->data = data;
206 mem->dmafd = dmafd;
207 mem->group = group;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800208
bo.xiao857b8682024-09-12 16:40:32 +0800209 return mem;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800210}
211
212static GstAmlV4l2Memory *
213_v4l2mem_share(GstAmlV4l2Memory *mem, gssize offset, gsize size)
214{
bo.xiao857b8682024-09-12 16:40:32 +0800215 GstAmlV4l2Memory *sub;
216 GstMemory *parent;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800217
bo.xiao857b8682024-09-12 16:40:32 +0800218 /* find the real parent */
219 if ((parent = mem->mem.parent) == NULL)
220 parent = (GstMemory *) mem;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800221
bo.xiao857b8682024-09-12 16:40:32 +0800222 if (size == -1)
223 size = mem->mem.size - offset;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800224
bo.xiao857b8682024-09-12 16:40:32 +0800225 /* the shared memory is always readonly */
226 sub = _v4l2mem_new (GST_MINI_OBJECT_FLAGS (parent) |
227 GST_MINI_OBJECT_FLAG_LOCK_READONLY, mem->mem.allocator, parent,
228 mem->mem.maxsize, mem->mem.align, offset, size, mem->plane, mem->data,
229 -1, mem->group);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800230
bo.xiao857b8682024-09-12 16:40:32 +0800231 return sub;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800232}
233
234static gboolean
235_v4l2mem_is_span(GstAmlV4l2Memory *mem1, GstAmlV4l2Memory *mem2, gsize *offset)
236{
bo.xiao857b8682024-09-12 16:40:32 +0800237 if (offset)
238 *offset = mem1->mem.offset - mem1->mem.parent->offset;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800239
bo.xiao857b8682024-09-12 16:40:32 +0800240 /* and memory is contiguous */
241 return mem1->mem.offset + mem1->mem.size == mem2->mem.offset;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800242}
243
244gboolean
bo.xiao857b8682024-09-12 16:40:32 +0800245gst_aml_is_v4l2_memory(GstMemory *mem)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800246{
bo.xiao857b8682024-09-12 16:40:32 +0800247 return gst_memory_is_type(mem, GST_AML_V4L2_MEMORY_TYPE);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800248}
249
250GQuark
251gst_aml_v4l2_memory_quark(void)
252{
bo.xiao857b8682024-09-12 16:40:32 +0800253 static GQuark quark = 0;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800254
bo.xiao857b8682024-09-12 16:40:32 +0800255 if (quark == 0)
256 quark = g_quark_from_string ("GstAmlV4l2Memory");
xuesong.jiangae1548e2022-05-06 16:38:46 +0800257
bo.xiao857b8682024-09-12 16:40:32 +0800258 return quark;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800259}
260
261/*************************************/
262/* GstAmlV4l2MemoryGroup implementation */
263/*************************************/
264
265static void
266gst_aml_v4l2_memory_group_free(GstAmlV4l2MemoryGroup *group)
267{
bo.xiao857b8682024-09-12 16:40:32 +0800268 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800269
bo.xiao857b8682024-09-12 16:40:32 +0800270 for (i = 0; i < group->n_mem; i++)
271 {
272 GstMemory *mem = group->mem[i];
273 group->mem[i] = NULL;
274 if (mem)
275 gst_memory_unref (mem);
276 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800277
bo.xiao857b8682024-09-12 16:40:32 +0800278 g_slice_free(GstAmlV4l2MemoryGroup, group);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800279}
280
281static GstAmlV4l2MemoryGroup *
282gst_aml_v4l2_memory_group_new(GstAmlV4l2Allocator *allocator, guint32 index)
283{
bo.xiao857b8682024-09-12 16:40:32 +0800284 GstAmlV4l2Object *obj = allocator->obj;
285 guint32 memory = allocator->memory;
286 struct v4l2_format *format = &obj->format;
287 GstAmlV4l2MemoryGroup *group;
288 gsize img_size, buf_size;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800289
bo.xiao857b8682024-09-12 16:40:32 +0800290 group = g_slice_new0 (GstAmlV4l2MemoryGroup);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800291
bo.xiao857b8682024-09-12 16:40:32 +0800292 group->buffer.type = format->type;
293 group->buffer.index = index;
294 group->buffer.memory = memory;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800295
bo.xiao857b8682024-09-12 16:40:32 +0800296 if (V4L2_TYPE_IS_MULTIPLANAR(format->type))
297 {
298 group->n_mem = group->buffer.length = format->fmt.pix_mp.num_planes;
299 group->buffer.m.planes = group->planes;
300 }
301 else
302 {
303 group->n_mem = 1;
304 }
305
306 if (obj->ioctl (obj->video_fd, VIDIOC_QUERYBUF, &group->buffer) < 0)
307 goto querybuf_failed;
308
309 if (group->buffer.index != index)
310 {
311 GST_ERROR_OBJECT (allocator, "Buffer index returned by VIDIOC_QUERYBUF "
312 "didn't match, this indicate the presence of a bug in your driver or "
313 "libv4l2");
314 g_slice_free (GstAmlV4l2MemoryGroup, group);
315 return NULL;
316 }
317
318 /* Check that provided size matches the format we have negotiation. Failing
319 * there usually means a driver of libv4l bug. */
320 if (V4L2_TYPE_IS_MULTIPLANAR(obj->type))
321 {
322 gint i;
323
324 for (i = 0; i < group->n_mem; i++)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800325 {
bo.xiao857b8682024-09-12 16:40:32 +0800326 img_size = obj->format.fmt.pix_mp.plane_fmt[i].sizeimage;
327 buf_size = group->planes[i].length;
328 if (buf_size < img_size)
329 goto buffer_too_short;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800330 }
bo.xiao857b8682024-09-12 16:40:32 +0800331 }
332 else
333 {
334 img_size = obj->format.fmt.pix.sizeimage;
335 buf_size = group->buffer.length;
336 if (buf_size < img_size)
337 goto buffer_too_short;
338 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800339
bo.xiao857b8682024-09-12 16:40:32 +0800340 /* We save non planar buffer information into the multi-planar plane array
341 * to avoid duplicating the code later */
342 if (!V4L2_TYPE_IS_MULTIPLANAR(format->type))
343 {
344 group->planes[0].bytesused = group->buffer.bytesused;
345 group->planes[0].length = group->buffer.length;
346 group->planes[0].data_offset = 0;
347 g_assert (sizeof (group->planes[0].m) == sizeof (group->buffer.m));
348 memcpy (&group->planes[0].m, &group->buffer.m, sizeof (group->buffer.m));
349 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800350
bo.xiao857b8682024-09-12 16:40:32 +0800351 GST_LOG_OBJECT (allocator, "Got %s buffer", memory_type_to_str (memory));
352 GST_LOG_OBJECT (allocator, " index: %u", group->buffer.index);
353 GST_LOG_OBJECT (allocator, " type: %d", group->buffer.type);
354 GST_LOG_OBJECT (allocator, " flags: %08x", group->buffer.flags);
355 GST_LOG_OBJECT (allocator, " field: %d", group->buffer.field);
356 GST_LOG_OBJECT (allocator, " memory: %d", group->buffer.memory);
357 GST_LOG_OBJECT (allocator, " planes: %d", group->n_mem);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800358
359#ifndef GST_DISABLE_GST_DEBUG
bo.xiao857b8682024-09-12 16:40:32 +0800360 if (memory == V4L2_MEMORY_MMAP)
361 {
362 gint i;
363 for (i = 0; i < group->n_mem; i++)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800364 {
bo.xiao857b8682024-09-12 16:40:32 +0800365 GST_LOG_OBJECT (allocator,
366 " [%u] bytesused: %u, length: %u, offset: %u", i,
367 group->planes[i].bytesused, group->planes[i].length,
368 group->planes[i].data_offset);
369 GST_LOG_OBJECT (allocator, " [%u] MMAP offset: %u", i,
370 group->planes[i].m.mem_offset);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800371 }
bo.xiao857b8682024-09-12 16:40:32 +0800372 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800373#endif
374
bo.xiao857b8682024-09-12 16:40:32 +0800375 return group;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800376
377querybuf_failed:
bo.xiao857b8682024-09-12 16:40:32 +0800378 {
379 GST_ERROR ("error querying buffer %d: %s", index, g_strerror (errno));
xuesong.jiangae1548e2022-05-06 16:38:46 +0800380 goto failed;
bo.xiao857b8682024-09-12 16:40:32 +0800381 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800382buffer_too_short:
bo.xiao857b8682024-09-12 16:40:32 +0800383 {
384 GST_ERROR ("buffer size %" G_GSIZE_FORMAT
385 " is smaller then negotiated size %" G_GSIZE_FORMAT
386 ", this is usually the result of a bug in the v4l2 driver or libv4l.",
387 buf_size, img_size);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800388 goto failed;
bo.xiao857b8682024-09-12 16:40:32 +0800389 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800390failed:
bo.xiao857b8682024-09-12 16:40:32 +0800391 gst_aml_v4l2_memory_group_free(group);
392 return NULL;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800393}
394
bo.xiao857b8682024-09-12 16:40:32 +0800395
xuesong.jiangae1548e2022-05-06 16:38:46 +0800396/*************************************/
397/* GstV4lAllocator implementation */
398/*************************************/
399
400static void
401gst_aml_v4l2_allocator_release(GstAmlV4l2Allocator *allocator, GstAmlV4l2Memory *mem)
402{
bo.xiao857b8682024-09-12 16:40:32 +0800403 GstAmlV4l2MemoryGroup *group = mem->group;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800404
bo.xiao857b8682024-09-12 16:40:32 +0800405 GST_LOG_OBJECT (allocator, "plane %i of buffer %u released",
406 mem->plane, group->buffer.index);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800407
bo.xiao857b8682024-09-12 16:40:32 +0800408 switch (allocator->memory)
409 {
xuesong.jiangae1548e2022-05-06 16:38:46 +0800410 case V4L2_MEMORY_DMABUF:
bo.xiao857b8682024-09-12 16:40:32 +0800411 close (mem->dmafd);
412 mem->dmafd = -1;
413 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800414 case V4L2_MEMORY_USERPTR:
bo.xiao857b8682024-09-12 16:40:32 +0800415 mem->data = NULL;
416 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800417 default:
bo.xiao857b8682024-09-12 16:40:32 +0800418 break;
419 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800420
bo.xiao857b8682024-09-12 16:40:32 +0800421 /* When all memory are back, put the group back in the free queue */
422 if (g_atomic_int_dec_and_test(&group->mems_allocated))
423 {
424 GST_LOG_OBJECT (allocator, "buffer %u released", group->buffer.index);
425 gst_atomic_queue_push (allocator->free_queue, group);
426 g_signal_emit (allocator, gst_aml_v4l2_allocator_signals[GROUP_RELEASED], 0);
427 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800428
bo.xiao857b8682024-09-12 16:40:32 +0800429 /* Keep last, allocator may be freed after this call */
430 g_object_unref (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800431}
432
433static void
434gst_aml_v4l2_allocator_free(GstAllocator *gallocator, GstMemory *gmem)
435{
bo.xiao857b8682024-09-12 16:40:32 +0800436 GstAmlV4l2Allocator *allocator = (GstAmlV4l2Allocator *)gallocator;
437 GstAmlV4l2Object *obj = allocator->obj;
438 GstAmlV4l2Memory *mem = (GstAmlV4l2Memory *)gmem;
439 GstAmlV4l2MemoryGroup *group = mem->group;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800440
bo.xiao857b8682024-09-12 16:40:32 +0800441 /* Only free unparented memory */
442 if (mem->mem.parent == NULL)
443 {
444 GST_LOG_OBJECT (allocator, "freeing plane %i of buffer %u",
445 mem->plane, group->buffer.index);
446
447 if (allocator->memory == V4L2_MEMORY_MMAP)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800448 {
bo.xiao857b8682024-09-12 16:40:32 +0800449 if (mem->data)
450 obj->munmap (mem->data, group->planes[mem->plane].length);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800451 }
452
bo.xiao857b8682024-09-12 16:40:32 +0800453 /* This apply for both mmap with expbuf, and dmabuf imported memory */
454 if (mem->dmafd >= 0)
455 close (mem->dmafd);
456 }
457
458 g_slice_free (GstAmlV4l2Memory, mem);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800459}
460
461static void
462gst_aml_v4l2_allocator_dispose(GObject *obj)
463{
bo.xiao857b8682024-09-12 16:40:32 +0800464 GstAmlV4l2Allocator *allocator = (GstAmlV4l2Allocator *)obj;
465 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800466
bo.xiao857b8682024-09-12 16:40:32 +0800467 GST_LOG_OBJECT (obj, "called");
xuesong.jiangae1548e2022-05-06 16:38:46 +0800468
bo.xiao857b8682024-09-12 16:40:32 +0800469 for (i = 0; i < allocator->count; i++)
470 {
471 GstAmlV4l2MemoryGroup *group = allocator->groups[i];
472 allocator->groups[i] = NULL;
473 if (group)
474 gst_aml_v4l2_memory_group_free(group);
475 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800476
bo.xiao857b8682024-09-12 16:40:32 +0800477 G_OBJECT_CLASS (parent_class)->dispose (obj);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800478}
479
480static void
481gst_aml_v4l2_allocator_finalize(GObject *obj)
482{
bo.xiao857b8682024-09-12 16:40:32 +0800483 GstAmlV4l2Allocator *allocator = (GstAmlV4l2Allocator *)obj;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800484
bo.xiao857b8682024-09-12 16:40:32 +0800485 GST_LOG_OBJECT (obj, "called");
xuesong.jiangae1548e2022-05-06 16:38:46 +0800486
bo.xiao857b8682024-09-12 16:40:32 +0800487 gst_atomic_queue_unref (allocator->free_queue);
488 gst_object_unref (allocator->obj->element);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800489
bo.xiao857b8682024-09-12 16:40:32 +0800490 G_OBJECT_CLASS (parent_class)->finalize (obj);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800491}
492
493static void
494gst_aml_v4l2_allocator_class_init(GstAmlV4l2AllocatorClass *klass)
495{
bo.xiao857b8682024-09-12 16:40:32 +0800496 GObjectClass *object_class;
497 GstAllocatorClass *allocator_class;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800498
bo.xiao857b8682024-09-12 16:40:32 +0800499 allocator_class = (GstAllocatorClass *) klass;
500 object_class = (GObjectClass *) klass;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800501
bo.xiao857b8682024-09-12 16:40:32 +0800502 allocator_class->alloc = NULL;
503 allocator_class->free = gst_aml_v4l2_allocator_free;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800504
bo.xiao857b8682024-09-12 16:40:32 +0800505 object_class->dispose = gst_aml_v4l2_allocator_dispose;
506 object_class->finalize = gst_aml_v4l2_allocator_finalize;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800507
bo.xiao857b8682024-09-12 16:40:32 +0800508 gst_aml_v4l2_allocator_signals[GROUP_RELEASED] = g_signal_new("group-released",
509 G_TYPE_FROM_CLASS(object_class), G_SIGNAL_RUN_LAST, 0, NULL, NULL, NULL,
510 G_TYPE_NONE, 0);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800511
xuesong.jiang5fccce22024-10-22 20:28:50 +0800512 GST_DEBUG_CATEGORY_INIT(amlv4l2allocator_debug, "amlv4l2allocator", 0,
bo.xiao857b8682024-09-12 16:40:32 +0800513 "V4L2 Allocator");
xuesong.jiangae1548e2022-05-06 16:38:46 +0800514}
515
516static void
517gst_aml_v4l2_allocator_init(GstAmlV4l2Allocator *allocator)
518{
bo.xiao857b8682024-09-12 16:40:32 +0800519 GstAllocator *alloc = GST_ALLOCATOR_CAST (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800520
bo.xiao857b8682024-09-12 16:40:32 +0800521 alloc->mem_type = GST_AML_V4L2_MEMORY_TYPE;
522 alloc->mem_map = (GstMemoryMapFunction) _v4l2mem_map;
523 alloc->mem_unmap = (GstMemoryUnmapFunction) _v4l2mem_unmap;
524 alloc->mem_share = (GstMemoryShareFunction) _v4l2mem_share;
525 alloc->mem_is_span = (GstMemoryIsSpanFunction) _v4l2mem_is_span;
526 /* Use the default, fallback copy function */
xuesong.jiangae1548e2022-05-06 16:38:46 +0800527
bo.xiao857b8682024-09-12 16:40:32 +0800528 allocator->free_queue = gst_atomic_queue_new (VIDEO_MAX_FRAME);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800529
bo.xiao857b8682024-09-12 16:40:32 +0800530 GST_OBJECT_FLAG_SET (allocator, GST_ALLOCATOR_FLAG_CUSTOM_ALLOC);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800531}
532
533#define GST_AML_V4L2_ALLOCATOR_PROBE(obj, type) \
bo.xiao857b8682024-09-12 16:40:32 +0800534 gst_aml_v4l2_allocator_probe((obj), V4L2_MEMORY_##type, \
535 GST_V4L2_ALLOCATOR_FLAG_##type##_REQBUFS, \
536 GST_V4L2_ALLOCATOR_FLAG_##type##_CREATE_BUFS)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800537static guint32
538gst_aml_v4l2_allocator_probe(GstAmlV4l2Allocator *allocator, guint32 memory,
bo.xiao857b8682024-09-12 16:40:32 +0800539 guint32 breq_flag, guint32 bcreate_flag)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800540{
bo.xiao857b8682024-09-12 16:40:32 +0800541 GstAmlV4l2Object *obj = allocator->obj;
542 struct v4l2_requestbuffers breq = { 0 };
543 guint32 flags = 0;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800544
bo.xiao857b8682024-09-12 16:40:32 +0800545 breq.type = obj->type;
546 breq.count = 0;
547 breq.memory = memory;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800548
bo.xiao857b8682024-09-12 16:40:32 +0800549 if (obj->ioctl(obj->video_fd, VIDIOC_REQBUFS, &breq) == 0)
550 {
551 struct v4l2_create_buffers bcreate = { 0 };
xuesong.jiangae1548e2022-05-06 16:38:46 +0800552
bo.xiao857b8682024-09-12 16:40:32 +0800553 flags |= breq_flag;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800554
bo.xiao857b8682024-09-12 16:40:32 +0800555 bcreate.memory = memory;
556 bcreate.format = obj->format;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800557
bo.xiao857b8682024-09-12 16:40:32 +0800558 if ((obj->ioctl (obj->video_fd, VIDIOC_CREATE_BUFS, &bcreate) == 0))
559 flags |= bcreate_flag;
560 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800561
bo.xiao857b8682024-09-12 16:40:32 +0800562 if (breq.capabilities & V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS)
563 {
564 flags |= GST_V4L2_ALLOCATOR_FLAG_SUPPORTS_ORPHANED_BUFS;
565 GST_DEBUG_OBJECT(allocator, "v4l2 support GST_V4L2_ALLOCATOR_FLAG_SUPPORTS_ORPHANED_BUFS");
566 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800567
bo.xiao857b8682024-09-12 16:40:32 +0800568 return flags;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800569}
570
571static GstAmlV4l2MemoryGroup *
572gst_aml_v4l2_allocator_create_buf(GstAmlV4l2Allocator *allocator)
573{
bo.xiao857b8682024-09-12 16:40:32 +0800574 GstAmlV4l2Object *obj = allocator->obj;
575 struct v4l2_create_buffers bcreate = { 0 };
576 GstAmlV4l2MemoryGroup *group = NULL;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800577
bo.xiao857b8682024-09-12 16:40:32 +0800578 GST_OBJECT_LOCK (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800579
bo.xiao857b8682024-09-12 16:40:32 +0800580 if (!g_atomic_int_get (&allocator->active))
581 goto done;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800582
bo.xiao857b8682024-09-12 16:40:32 +0800583 if (GST_AML_V4L2_ALLOCATOR_IS_ORPHANED(allocator))
584 goto orphaned_bug;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800585
bo.xiao857b8682024-09-12 16:40:32 +0800586 bcreate.memory = allocator->memory;
587 bcreate.format = obj->format;
588 bcreate.count = 1;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800589
bo.xiao857b8682024-09-12 16:40:32 +0800590 if (!allocator->can_allocate)
591 goto done;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800592
bo.xiao857b8682024-09-12 16:40:32 +0800593 if (obj->ioctl (obj->video_fd, VIDIOC_CREATE_BUFS, &bcreate) < 0)
594 goto create_bufs_failed;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800595
bo.xiao857b8682024-09-12 16:40:32 +0800596 if (allocator->groups[bcreate.index] != NULL)
597 goto create_bufs_bug;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800598
bo.xiao857b8682024-09-12 16:40:32 +0800599 group = gst_aml_v4l2_memory_group_new(allocator, bcreate.index);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800600
bo.xiao857b8682024-09-12 16:40:32 +0800601 if (group)
602 {
603 allocator->groups[bcreate.index] = group;
604 allocator->count++;
605 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800606
607done:
bo.xiao857b8682024-09-12 16:40:32 +0800608 GST_OBJECT_UNLOCK (allocator);
609 return group;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800610
611orphaned_bug:
bo.xiao857b8682024-09-12 16:40:32 +0800612 {
613 GST_ERROR_OBJECT (allocator, "allocator was orphaned, "
614 "not creating new buffers");
xuesong.jiangae1548e2022-05-06 16:38:46 +0800615 goto done;
bo.xiao857b8682024-09-12 16:40:32 +0800616 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800617create_bufs_failed:
bo.xiao857b8682024-09-12 16:40:32 +0800618 {
619 GST_WARNING_OBJECT (allocator, "error creating a new buffer: %s",
620 g_strerror (errno));
xuesong.jiangae1548e2022-05-06 16:38:46 +0800621 goto done;
bo.xiao857b8682024-09-12 16:40:32 +0800622 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800623create_bufs_bug:
bo.xiao857b8682024-09-12 16:40:32 +0800624 {
625 GST_ERROR_OBJECT (allocator, "created buffer has already used buffer "
626 "index %i, this means there is an bug in your driver or libv4l2",
627 bcreate.index);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800628 goto done;
bo.xiao857b8682024-09-12 16:40:32 +0800629 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800630}
631
632static GstAmlV4l2MemoryGroup *
633gst_aml_v4l2_allocator_alloc(GstAmlV4l2Allocator *allocator)
634{
bo.xiao857b8682024-09-12 16:40:32 +0800635 GstAmlV4l2MemoryGroup *group;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800636
bo.xiao857b8682024-09-12 16:40:32 +0800637 if (!g_atomic_int_get (&allocator->active))
638 return NULL;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800639
bo.xiao857b8682024-09-12 16:40:32 +0800640 group = gst_atomic_queue_pop (allocator->free_queue);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800641
bo.xiao857b8682024-09-12 16:40:32 +0800642 if (group == NULL)
643 {
644 if (allocator->can_allocate)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800645 {
bo.xiao857b8682024-09-12 16:40:32 +0800646 group = gst_aml_v4l2_allocator_create_buf(allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800647
bo.xiao857b8682024-09-12 16:40:32 +0800648 /* Don't hammer on CREATE_BUFS */
649 if (group == NULL)
650 allocator->can_allocate = FALSE;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800651 }
bo.xiao857b8682024-09-12 16:40:32 +0800652 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800653
bo.xiao857b8682024-09-12 16:40:32 +0800654 return group;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800655}
656
657static void
658gst_aml_v4l2_allocator_reset_size(GstAmlV4l2Allocator *allocator,
659 GstAmlV4l2MemoryGroup *group)
660{
bo.xiao857b8682024-09-12 16:40:32 +0800661 gint i;
662 for (i = 0; i < group->n_mem; i++)
663 {
664 group->mem[i]->maxsize = group->planes[i].length;
665 group->mem[i]->offset = 0;
666 group->mem[i]->size = group->planes[i].length;
667 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800668}
669
670static void
671_cleanup_failed_alloc(GstAmlV4l2Allocator *allocator, GstAmlV4l2MemoryGroup *group)
672{
bo.xiao857b8682024-09-12 16:40:32 +0800673 if (group->mems_allocated > 0)
674 {
675 gint i;
676 /* If one or more mmap worked, we need to unref the memory, otherwise
677 * they will keep a ref on the allocator and leak it. This will put back
678 * the group into the free_queue */
679 for (i = 0; i < group->n_mem; i++)
680 gst_memory_unref (group->mem[i]);
681 }
682 else
683 {
684 /* Otherwise, group has to be on free queue for _stop() to work */
685 gst_atomic_queue_push (allocator->free_queue, group);
686 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800687}
688
689GstAmlV4l2Allocator *
690gst_aml_v4l2_allocator_new(GstObject *parent, GstAmlV4l2Object *v4l2object)
691{
bo.xiao857b8682024-09-12 16:40:32 +0800692 GstAmlV4l2Allocator *allocator;
693 guint32 flags = 0;
694 gchar *name, *parent_name;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800695
bo.xiao857b8682024-09-12 16:40:32 +0800696 parent_name = gst_object_get_name (parent);
697 name = g_strconcat (parent_name, ":allocator", NULL);
698 g_free (parent_name);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800699
bo.xiao857b8682024-09-12 16:40:32 +0800700 allocator = g_object_new(GST_TYPE_AML_V4L2_ALLOCATOR, "name", name, NULL);
701 gst_object_ref_sink (allocator);
702 g_free (name);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800703
bo.xiao857b8682024-09-12 16:40:32 +0800704 /* Save everything */
705 allocator->obj = v4l2object;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800706
bo.xiao857b8682024-09-12 16:40:32 +0800707 /* Keep a ref on the element so obj does not disappear */
708 gst_object_ref (allocator->obj->element);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800709
bo.xiao857b8682024-09-12 16:40:32 +0800710 flags |= GST_AML_V4L2_ALLOCATOR_PROBE(allocator, MMAP);
711 flags |= GST_AML_V4L2_ALLOCATOR_PROBE(allocator, USERPTR);
712 flags |= GST_AML_V4L2_ALLOCATOR_PROBE(allocator, DMABUF);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800713
xuesong.jiangae1548e2022-05-06 16:38:46 +0800714
bo.xiao857b8682024-09-12 16:40:32 +0800715 if (flags == 0)
716 {
717 /* Drivers not ported from videobuf to videbuf2 don't allow freeing buffers
718 * using REQBUFS(0). This is a workaround to still support these drivers,
719 * which are known to have MMAP support. */
720 GST_WARNING_OBJECT (allocator, "Could not probe supported memory type, "
721 "assuming MMAP is supported, this is expected for older drivers not "
722 " yet ported to videobuf2 framework");
723 flags = GST_V4L2_ALLOCATOR_FLAG_MMAP_REQBUFS;
724 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800725
bo.xiao857b8682024-09-12 16:40:32 +0800726 GST_OBJECT_FLAG_SET (allocator, flags);
727
728 return allocator;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800729}
730
bo.xiao857b8682024-09-12 16:40:32 +0800731guint
732gst_aml_v4l2_allocator_start(GstAmlV4l2Allocator *allocator, guint32 count,
xuesong.jiangae1548e2022-05-06 16:38:46 +0800733 guint32 memory)
734{
bo.xiao857b8682024-09-12 16:40:32 +0800735 GstAmlV4l2Object *obj = allocator->obj;
736 struct v4l2_requestbuffers breq = { count, obj->type, memory };
737 gboolean can_allocate;
738 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800739
bo.xiao857b8682024-09-12 16:40:32 +0800740 g_return_val_if_fail (count != 0, 0);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800741
bo.xiao857b8682024-09-12 16:40:32 +0800742 GST_OBJECT_LOCK (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800743
bo.xiao857b8682024-09-12 16:40:32 +0800744 if (g_atomic_int_get (&allocator->active))
745 goto already_active;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800746
bo.xiao857b8682024-09-12 16:40:32 +0800747 if (GST_AML_V4L2_ALLOCATOR_IS_ORPHANED(allocator))
748 goto orphaned;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800749
bo.xiao857b8682024-09-12 16:40:32 +0800750 if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) < 0)
751 goto reqbufs_failed;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800752
bo.xiao857b8682024-09-12 16:40:32 +0800753 if (breq.count < 1)
754 goto out_of_memory;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800755
bo.xiao857b8682024-09-12 16:40:32 +0800756 switch (memory)
757 {
xuesong.jiangae1548e2022-05-06 16:38:46 +0800758 case V4L2_MEMORY_MMAP:
bo.xiao857b8682024-09-12 16:40:32 +0800759 can_allocate = GST_AML_V4L2_ALLOCATOR_CAN_ALLOCATE(allocator, MMAP);
760 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800761 case V4L2_MEMORY_USERPTR:
bo.xiao857b8682024-09-12 16:40:32 +0800762 can_allocate = GST_AML_V4L2_ALLOCATOR_CAN_ALLOCATE(allocator, USERPTR);
763 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800764 case V4L2_MEMORY_DMABUF:
bo.xiao857b8682024-09-12 16:40:32 +0800765 can_allocate = GST_AML_V4L2_ALLOCATOR_CAN_ALLOCATE(allocator, DMABUF);
766 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800767 default:
bo.xiao857b8682024-09-12 16:40:32 +0800768 can_allocate = FALSE;
769 break;
770 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800771
bo.xiao857b8682024-09-12 16:40:32 +0800772 GST_DEBUG_OBJECT (allocator, "allocated %u %s buffers out of %u requested",
773 breq.count, memory_type_to_str (memory), count);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800774
bo.xiao857b8682024-09-12 16:40:32 +0800775 allocator->can_allocate = can_allocate;
776 allocator->count = breq.count;
777 allocator->memory = memory;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800778
bo.xiao857b8682024-09-12 16:40:32 +0800779 /* Create memory groups */
780 for (i = 0; i < allocator->count; i++)
781 {
782 allocator->groups[i] = gst_aml_v4l2_memory_group_new(allocator, i);
783 if (allocator->groups[i] == NULL)
784 goto error;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800785
bo.xiao857b8682024-09-12 16:40:32 +0800786 gst_atomic_queue_push (allocator->free_queue, allocator->groups[i]);
787 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800788
bo.xiao857b8682024-09-12 16:40:32 +0800789 g_atomic_int_set (&allocator->active, TRUE);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800790
791done:
bo.xiao857b8682024-09-12 16:40:32 +0800792 GST_OBJECT_UNLOCK (allocator);
793 return breq.count;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800794
795already_active:
bo.xiao857b8682024-09-12 16:40:32 +0800796 {
797 GST_ERROR_OBJECT (allocator, "allocator already active");
xuesong.jiangae1548e2022-05-06 16:38:46 +0800798 goto error;
bo.xiao857b8682024-09-12 16:40:32 +0800799 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800800orphaned:
bo.xiao857b8682024-09-12 16:40:32 +0800801 {
802 GST_ERROR_OBJECT (allocator, "allocator was orphaned");
xuesong.jiangae1548e2022-05-06 16:38:46 +0800803 goto error;
bo.xiao857b8682024-09-12 16:40:32 +0800804 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800805reqbufs_failed:
bo.xiao857b8682024-09-12 16:40:32 +0800806 {
807 GST_ERROR_OBJECT (allocator,
808 "error requesting %d buffers: %s", count, g_strerror (errno));
xuesong.jiangae1548e2022-05-06 16:38:46 +0800809 goto error;
bo.xiao857b8682024-09-12 16:40:32 +0800810 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800811out_of_memory:
bo.xiao857b8682024-09-12 16:40:32 +0800812 {
813 GST_ERROR_OBJECT (allocator, "Not enough memory to allocate buffers");
xuesong.jiangae1548e2022-05-06 16:38:46 +0800814 goto error;
bo.xiao857b8682024-09-12 16:40:32 +0800815 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800816error:
bo.xiao857b8682024-09-12 16:40:32 +0800817 {
xuesong.jiangae1548e2022-05-06 16:38:46 +0800818 breq.count = 0;
819 goto done;
bo.xiao857b8682024-09-12 16:40:32 +0800820 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800821}
822
823GstAmlV4l2Return
824gst_aml_v4l2_allocator_stop(GstAmlV4l2Allocator *allocator)
825{
bo.xiao857b8682024-09-12 16:40:32 +0800826 GstAmlV4l2Object *obj = allocator->obj;
827 struct v4l2_requestbuffers breq = { 0, obj->type, allocator->memory };
828 gint i = 0;
829 GstAmlV4l2Return ret = GST_AML_V4L2_OK;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800830
bo.xiao857b8682024-09-12 16:40:32 +0800831 GST_DEBUG_OBJECT (allocator, "stop allocator");
xuesong.jiangae1548e2022-05-06 16:38:46 +0800832
bo.xiao857b8682024-09-12 16:40:32 +0800833 GST_OBJECT_LOCK (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800834
bo.xiao857b8682024-09-12 16:40:32 +0800835 if (!g_atomic_int_get (&allocator->active))
836 goto done;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800837
bo.xiao857b8682024-09-12 16:40:32 +0800838 if (gst_atomic_queue_length(allocator->free_queue) != allocator->count)
839 {
840 GST_DEBUG_OBJECT (allocator, "allocator is still in use");
841 ret = GST_AML_V4L2_BUSY;
842 goto done;
843 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800844
bo.xiao857b8682024-09-12 16:40:32 +0800845 while (gst_atomic_queue_pop(allocator->free_queue))
846 {
847 /* nothing */
848 };
xuesong.jiangae1548e2022-05-06 16:38:46 +0800849
bo.xiao857b8682024-09-12 16:40:32 +0800850 for (i = 0; i < allocator->count; i++)
851 {
852 GstAmlV4l2MemoryGroup *group = allocator->groups[i];
853 allocator->groups[i] = NULL;
854 if (group)
855 gst_aml_v4l2_memory_group_free(group);
856 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800857
bo.xiao857b8682024-09-12 16:40:32 +0800858 if (!GST_AML_V4L2_ALLOCATOR_IS_ORPHANED(allocator))
859 {
860 /* Not all drivers support rebufs(0), so warn only */
861 if (obj->ioctl (obj->video_fd, VIDIOC_REQBUFS, &breq) < 0)
862 GST_WARNING_OBJECT (allocator,
863 "error releasing buffers buffers: %s", g_strerror (errno));
864 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800865
bo.xiao857b8682024-09-12 16:40:32 +0800866 allocator->count = 0;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800867
bo.xiao857b8682024-09-12 16:40:32 +0800868 g_atomic_int_set (&allocator->active, FALSE);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800869
870done:
bo.xiao857b8682024-09-12 16:40:32 +0800871 GST_OBJECT_UNLOCK (allocator);
872 return ret;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800873}
874
875gboolean
876gst_aml_v4l2_allocator_orphan(GstAmlV4l2Allocator *allocator)
877{
bo.xiao857b8682024-09-12 16:40:32 +0800878 GstAmlV4l2Object *obj = allocator->obj;
879 struct v4l2_requestbuffers breq = { 0, obj->type, allocator->memory };
xuesong.jiangae1548e2022-05-06 16:38:46 +0800880
bo.xiao857b8682024-09-12 16:40:32 +0800881 if (!GST_AML_V4L2_ALLOCATOR_CAN_ORPHAN_BUFS(allocator))
882 return FALSE;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800883
bo.xiao857b8682024-09-12 16:40:32 +0800884 GST_OBJECT_FLAG_SET (allocator, GST_V4L2_ALLOCATOR_FLAG_ORPHANED);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800885
bo.xiao857b8682024-09-12 16:40:32 +0800886 if (obj->ioctl(obj->video_fd, VIDIOC_REQBUFS, &breq) < 0)
887 {
888 GST_ERROR_OBJECT (allocator,
889 "error orphaning buffers buffers: %s", g_strerror (errno));
890 return FALSE;
891 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800892
bo.xiao857b8682024-09-12 16:40:32 +0800893 return TRUE;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800894}
895
896GstAmlV4l2MemoryGroup *
897gst_aml_v4l2_allocator_alloc_mmap(GstAmlV4l2Allocator *allocator)
898{
bo.xiao857b8682024-09-12 16:40:32 +0800899 GstAmlV4l2Object *obj = allocator->obj;
900 GstAmlV4l2MemoryGroup *group;
901 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800902
bo.xiao857b8682024-09-12 16:40:32 +0800903 g_return_val_if_fail (allocator->memory == V4L2_MEMORY_MMAP, NULL);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800904
bo.xiao857b8682024-09-12 16:40:32 +0800905 group = gst_aml_v4l2_allocator_alloc(allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800906
bo.xiao857b8682024-09-12 16:40:32 +0800907 if (group == NULL)
908 return NULL;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800909
bo.xiao857b8682024-09-12 16:40:32 +0800910 for (i = 0; i < group->n_mem; i++)
911 {
912 if (group->mem[i] == NULL)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800913 {
bo.xiao857b8682024-09-12 16:40:32 +0800914 gpointer data;
915 data = obj->mmap (NULL, group->planes[i].length, PROT_READ | PROT_WRITE,
916 MAP_SHARED, obj->video_fd, group->planes[i].m.mem_offset);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800917
bo.xiao857b8682024-09-12 16:40:32 +0800918 if (data == MAP_FAILED)
919 goto mmap_failed;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800920
bo.xiao857b8682024-09-12 16:40:32 +0800921 GST_LOG_OBJECT (allocator,
922 "mmap buffer length %d, data offset %d, plane %d",
923 group->planes[i].length, group->planes[i].data_offset, i);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800924
bo.xiao857b8682024-09-12 16:40:32 +0800925 group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
926 NULL, group->planes[i].length, 0, 0, group->planes[i].length, i, data,
927 -1, group);
928 }
929 else
930 {
931 /* Take back the allocator reference */
932 gst_object_ref (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800933 }
934
bo.xiao857b8682024-09-12 16:40:32 +0800935 group->mems_allocated++;
936 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800937
bo.xiao857b8682024-09-12 16:40:32 +0800938 /* Ensure group size. Unlike GST, v4l2 have size (bytesused) initially set
939 * to 0. As length might be bigger then the expected size exposed in the
940 * format, we simply set bytesused initially and reset it here for
941 * simplicity */
942 gst_aml_v4l2_allocator_reset_size(allocator, group);
943
944 return group;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800945
946mmap_failed:
bo.xiao857b8682024-09-12 16:40:32 +0800947 {
948 GST_ERROR_OBJECT (allocator, "Failed to mmap buffer: %s",
949 g_strerror (errno));
950 _cleanup_failed_alloc (allocator, group);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800951 return NULL;
bo.xiao857b8682024-09-12 16:40:32 +0800952 }
xuesong.jiangae1548e2022-05-06 16:38:46 +0800953}
954
955GstAmlV4l2MemoryGroup *
956gst_aml_v4l2_allocator_alloc_dmabuf(GstAmlV4l2Allocator *allocator,
bo.xiao857b8682024-09-12 16:40:32 +0800957 GstAllocator * dmabuf_allocator)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800958{
bo.xiao857b8682024-09-12 16:40:32 +0800959 GstAmlV4l2Object *obj = allocator->obj;
960 GstAmlV4l2MemoryGroup *group;
961 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800962
bo.xiao857b8682024-09-12 16:40:32 +0800963 g_return_val_if_fail (allocator->memory == V4L2_MEMORY_MMAP, NULL);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800964
bo.xiao857b8682024-09-12 16:40:32 +0800965 group = gst_aml_v4l2_allocator_alloc(allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800966
bo.xiao857b8682024-09-12 16:40:32 +0800967 if (group == NULL)
968 return NULL;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800969
bo.xiao857b8682024-09-12 16:40:32 +0800970 for (i = 0; i < group->n_mem; i++)
971 {
972 GstAmlV4l2Memory *mem;
973 GstMemory *dma_mem;
974
975 if (group->mem[i] == NULL)
xuesong.jiangae1548e2022-05-06 16:38:46 +0800976 {
bo.xiao857b8682024-09-12 16:40:32 +0800977 struct v4l2_exportbuffer expbuf = { 0 };
xuesong.jiangae1548e2022-05-06 16:38:46 +0800978
bo.xiao857b8682024-09-12 16:40:32 +0800979 expbuf.type = obj->type;
980 expbuf.index = group->buffer.index;
981 expbuf.plane = i;
982 expbuf.flags = O_CLOEXEC | O_RDWR;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800983
bo.xiao857b8682024-09-12 16:40:32 +0800984 if (obj->ioctl (obj->video_fd, VIDIOC_EXPBUF, &expbuf) < 0)
985 goto expbuf_failed;
xuesong.jiangae1548e2022-05-06 16:38:46 +0800986
bo.xiao857b8682024-09-12 16:40:32 +0800987 GST_LOG_OBJECT (allocator, "exported DMABUF as fd %i plane %d",
988 expbuf.fd, i);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800989
bo.xiao857b8682024-09-12 16:40:32 +0800990 group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
991 NULL, group->planes[i].length, 0, group->planes[i].data_offset,
992 group->planes[i].length - group->planes[i].data_offset, i, NULL,
993 expbuf.fd, group);
994 }
995 else
996 {
997 /* Take back the allocator reference */
998 gst_object_ref (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +0800999 }
1000
bo.xiao857b8682024-09-12 16:40:32 +08001001 group->mems_allocated++;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001002
bo.xiao857b8682024-09-12 16:40:32 +08001003 g_assert(gst_aml_is_v4l2_memory(group->mem[i]));
1004 mem = (GstAmlV4l2Memory *)group->mem[i];
1005
1006 dma_mem = gst_fd_allocator_alloc (dmabuf_allocator, mem->dmafd,
1007 group->planes[i].length, GST_FD_MEMORY_FLAG_DONT_CLOSE);
1008 gst_memory_resize (dma_mem, group->planes[i].data_offset,
1009 group->planes[i].length - group->planes[i].data_offset);
1010
1011 gst_mini_object_set_qdata (GST_MINI_OBJECT (dma_mem),
1012 GST_AML_V4L2_MEMORY_QUARK, mem, (GDestroyNotify)gst_memory_unref);
1013
1014 group->mem[i] = dma_mem;
1015 }
1016
1017 gst_aml_v4l2_allocator_reset_size(allocator, group);
1018
1019 return group;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001020
1021expbuf_failed:
bo.xiao857b8682024-09-12 16:40:32 +08001022 {
1023 GST_ERROR_OBJECT (allocator, "Failed to export DMABUF: %s",
1024 g_strerror (errno));
xuesong.jiangae1548e2022-05-06 16:38:46 +08001025 goto cleanup;
bo.xiao857b8682024-09-12 16:40:32 +08001026 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001027cleanup:
bo.xiao857b8682024-09-12 16:40:32 +08001028 {
1029 _cleanup_failed_alloc (allocator, group);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001030 return NULL;
bo.xiao857b8682024-09-12 16:40:32 +08001031 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001032}
1033
1034static void
1035gst_aml_v4l2_allocator_clear_dmabufin(GstAmlV4l2Allocator *allocator,
bo.xiao857b8682024-09-12 16:40:32 +08001036 GstAmlV4l2MemoryGroup *group)
xuesong.jiangae1548e2022-05-06 16:38:46 +08001037{
bo.xiao857b8682024-09-12 16:40:32 +08001038 GstAmlV4l2Object *obj = allocator->obj;
1039 GstAmlV4l2Memory *mem;
1040 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001041
bo.xiao857b8682024-09-12 16:40:32 +08001042 g_return_if_fail (allocator->memory == V4L2_MEMORY_DMABUF);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001043
bo.xiao857b8682024-09-12 16:40:32 +08001044 for (i = 0; i < group->n_mem; i++)
1045 {
xuesong.jiangae1548e2022-05-06 16:38:46 +08001046
bo.xiao857b8682024-09-12 16:40:32 +08001047 mem = (GstAmlV4l2Memory *)group->mem[i];
xuesong.jiangae1548e2022-05-06 16:38:46 +08001048
bo.xiao857b8682024-09-12 16:40:32 +08001049 GST_LOG_OBJECT (allocator, "[%i] clearing DMABUF import, fd %i plane %d",
1050 group->buffer.index, mem->dmafd, i);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001051
bo.xiao857b8682024-09-12 16:40:32 +08001052 /* Update memory */
1053 mem->mem.maxsize = 0;
1054 mem->mem.offset = 0;
1055 mem->mem.size = 0;
1056 mem->dmafd = -1;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001057
bo.xiao857b8682024-09-12 16:40:32 +08001058 /* Update v4l2 structure */
1059 group->planes[i].length = 0;
1060 group->planes[i].bytesused = 0;
1061 group->planes[i].m.fd = -1;
1062 group->planes[i].data_offset = 0;
1063 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001064
bo.xiao857b8682024-09-12 16:40:32 +08001065 if (!V4L2_TYPE_IS_MULTIPLANAR(obj->type))
1066 {
1067 group->buffer.bytesused = 0;
1068 group->buffer.length = 0;
1069 group->buffer.m.fd = -1;
1070 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001071}
1072
1073GstAmlV4l2MemoryGroup *
1074gst_aml_v4l2_allocator_alloc_dmabufin(GstAmlV4l2Allocator *allocator)
1075{
bo.xiao857b8682024-09-12 16:40:32 +08001076 GstAmlV4l2MemoryGroup *group;
1077 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001078
bo.xiao857b8682024-09-12 16:40:32 +08001079 g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, NULL);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001080
bo.xiao857b8682024-09-12 16:40:32 +08001081 group = gst_aml_v4l2_allocator_alloc(allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001082
bo.xiao857b8682024-09-12 16:40:32 +08001083 if (group == NULL)
1084 return NULL;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001085
bo.xiao857b8682024-09-12 16:40:32 +08001086 GST_LOG_OBJECT (allocator, "allocating empty DMABUF import group");
xuesong.jiangae1548e2022-05-06 16:38:46 +08001087
bo.xiao857b8682024-09-12 16:40:32 +08001088 for (i = 0; i < group->n_mem; i++)
1089 {
1090 if (group->mem[i] == NULL)
xuesong.jiangae1548e2022-05-06 16:38:46 +08001091 {
bo.xiao857b8682024-09-12 16:40:32 +08001092 group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
1093 NULL, 0, 0, 0, 0, i, NULL, -1, group);
1094 }
1095 else
1096 {
1097 /* Take back the allocator reference */
1098 gst_object_ref (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001099 }
1100
bo.xiao857b8682024-09-12 16:40:32 +08001101 group->mems_allocated++;
1102 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001103
bo.xiao857b8682024-09-12 16:40:32 +08001104 gst_aml_v4l2_allocator_clear_dmabufin(allocator, group);
1105
1106 return group;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001107}
1108
1109static void
1110gst_aml_v4l2_allocator_clear_userptr(GstAmlV4l2Allocator *allocator,
bo.xiao857b8682024-09-12 16:40:32 +08001111 GstAmlV4l2MemoryGroup *group)
xuesong.jiangae1548e2022-05-06 16:38:46 +08001112{
bo.xiao857b8682024-09-12 16:40:32 +08001113 GstAmlV4l2Object *obj = allocator->obj;
1114 GstAmlV4l2Memory *mem;
1115 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001116
bo.xiao857b8682024-09-12 16:40:32 +08001117 g_return_if_fail (allocator->memory == V4L2_MEMORY_USERPTR);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001118
bo.xiao857b8682024-09-12 16:40:32 +08001119 for (i = 0; i < group->n_mem; i++)
1120 {
1121 mem = (GstAmlV4l2Memory *)group->mem[i];
xuesong.jiangae1548e2022-05-06 16:38:46 +08001122
bo.xiao857b8682024-09-12 16:40:32 +08001123 GST_LOG_OBJECT (allocator, "[%i] clearing USERPTR %p plane %d size %"
1124 G_GSIZE_FORMAT, group->buffer.index, mem->data, i, mem->mem.size);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001125
bo.xiao857b8682024-09-12 16:40:32 +08001126 mem->mem.maxsize = 0;
1127 mem->mem.size = 0;
1128 mem->data = NULL;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001129
bo.xiao857b8682024-09-12 16:40:32 +08001130 group->planes[i].length = 0;
1131 group->planes[i].bytesused = 0;
1132 group->planes[i].m.userptr = 0;
1133 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001134
bo.xiao857b8682024-09-12 16:40:32 +08001135 if (!V4L2_TYPE_IS_MULTIPLANAR(obj->type))
1136 {
1137 group->buffer.bytesused = 0;
1138 group->buffer.length = 0;
1139 group->buffer.m.userptr = 0;
1140 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001141}
1142
1143GstAmlV4l2MemoryGroup *
1144gst_aml_v4l2_allocator_alloc_userptr(GstAmlV4l2Allocator *allocator)
1145{
bo.xiao857b8682024-09-12 16:40:32 +08001146 GstAmlV4l2MemoryGroup *group;
1147 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001148
bo.xiao857b8682024-09-12 16:40:32 +08001149 g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, NULL);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001150
bo.xiao857b8682024-09-12 16:40:32 +08001151 group = gst_aml_v4l2_allocator_alloc (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001152
bo.xiao857b8682024-09-12 16:40:32 +08001153 if (group == NULL)
1154 return NULL;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001155
bo.xiao857b8682024-09-12 16:40:32 +08001156 GST_LOG_OBJECT (allocator, "allocating empty USERPTR group");
xuesong.jiangae1548e2022-05-06 16:38:46 +08001157
bo.xiao857b8682024-09-12 16:40:32 +08001158 for (i = 0; i < group->n_mem; i++)
1159 {
1160
1161 if (group->mem[i] == NULL)
xuesong.jiangae1548e2022-05-06 16:38:46 +08001162 {
bo.xiao857b8682024-09-12 16:40:32 +08001163 group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
1164 NULL, 0, 0, 0, 0, i, NULL, -1, group);
1165 }
1166 else
1167 {
1168 /* Take back the allocator reference */
1169 gst_object_ref (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001170 }
1171
bo.xiao857b8682024-09-12 16:40:32 +08001172 group->mems_allocated++;
1173 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001174
bo.xiao857b8682024-09-12 16:40:32 +08001175 gst_aml_v4l2_allocator_clear_userptr(allocator, group);
1176
1177 return group;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001178}
1179
1180gboolean
1181gst_aml_v4l2_allocator_import_dmabuf(GstAmlV4l2Allocator *allocator,
bo.xiao857b8682024-09-12 16:40:32 +08001182 GstAmlV4l2MemoryGroup *group, gint n_mem, GstMemory **dma_mem)
xuesong.jiangae1548e2022-05-06 16:38:46 +08001183{
bo.xiao857b8682024-09-12 16:40:32 +08001184 GstAmlV4l2Object *obj = allocator->obj;
1185 GstAmlV4l2Memory *mem;
1186 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001187
bo.xiao857b8682024-09-12 16:40:32 +08001188 g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, FALSE);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001189
bo.xiao857b8682024-09-12 16:40:32 +08001190 if (group->n_mem != n_mem)
1191 goto n_mem_missmatch;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001192
bo.xiao857b8682024-09-12 16:40:32 +08001193 for (i = 0; i < group->n_mem; i++)
1194 {
1195 gint dmafd;
1196 gsize size, offset, maxsize;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001197
bo.xiao857b8682024-09-12 16:40:32 +08001198 if (!gst_is_dmabuf_memory (dma_mem[i]))
1199 goto not_dmabuf;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001200
bo.xiao857b8682024-09-12 16:40:32 +08001201 size = gst_memory_get_sizes (dma_mem[i], &offset, &maxsize);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001202
bo.xiao857b8682024-09-12 16:40:32 +08001203 dmafd = gst_dmabuf_memory_get_fd (dma_mem[i]);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001204
bo.xiao857b8682024-09-12 16:40:32 +08001205 GST_LOG_OBJECT (allocator, "[%i] imported DMABUF as fd %i plane %d",
1206 group->buffer.index, dmafd, i);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001207
bo.xiao857b8682024-09-12 16:40:32 +08001208 mem = (GstAmlV4l2Memory *) group->mem[i];
xuesong.jiangae1548e2022-05-06 16:38:46 +08001209
bo.xiao857b8682024-09-12 16:40:32 +08001210 /* Update memory */
1211 mem->mem.maxsize = maxsize;
1212 mem->mem.offset = offset;
1213 mem->mem.size = size;
1214 mem->dmafd = dmafd;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001215
bo.xiao857b8682024-09-12 16:40:32 +08001216 /* Update v4l2 structure */
1217 group->planes[i].length = maxsize;
1218 group->planes[i].bytesused = size + offset;
1219 group->planes[i].m.fd = dmafd;
1220 group->planes[i].data_offset = offset;
1221 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001222
bo.xiao857b8682024-09-12 16:40:32 +08001223 /* Copy into buffer structure if not using planes */
1224 if (!V4L2_TYPE_IS_MULTIPLANAR(obj->type))
1225 {
1226 group->buffer.bytesused = group->planes[0].bytesused;
1227 group->buffer.length = group->planes[0].length;
1228 group->buffer.m.fd = group->planes[0].m.fd;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001229
bo.xiao857b8682024-09-12 16:40:32 +08001230 /* FIXME Check if data_offset > 0 and fail for non-multi-planar */
1231 g_assert (group->planes[0].data_offset == 0);
1232 }
1233 else
1234 {
1235 group->buffer.length = group->n_mem;
1236 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001237
bo.xiao857b8682024-09-12 16:40:32 +08001238 return TRUE;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001239
1240n_mem_missmatch:
bo.xiao857b8682024-09-12 16:40:32 +08001241 {
1242 GST_ERROR_OBJECT (allocator, "Got %i dmabuf but needed %i", n_mem,
1243 group->n_mem);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001244 return FALSE;
bo.xiao857b8682024-09-12 16:40:32 +08001245 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001246not_dmabuf:
bo.xiao857b8682024-09-12 16:40:32 +08001247 {
1248 GST_ERROR_OBJECT (allocator, "Memory %i is not of DMABUF", i);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001249 return FALSE;
bo.xiao857b8682024-09-12 16:40:32 +08001250 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001251}
1252
1253gboolean
1254gst_aml_v4l2_allocator_import_userptr(GstAmlV4l2Allocator *allocator,
bo.xiao857b8682024-09-12 16:40:32 +08001255 GstAmlV4l2MemoryGroup *group, gsize img_size, int n_planes,
1256 gpointer * data, gsize * size)
xuesong.jiangae1548e2022-05-06 16:38:46 +08001257{
bo.xiao857b8682024-09-12 16:40:32 +08001258 GstAmlV4l2Object *obj = allocator->obj;
1259 GstAmlV4l2Memory *mem;
1260 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001261
bo.xiao857b8682024-09-12 16:40:32 +08001262 g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, FALSE);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001263
bo.xiao857b8682024-09-12 16:40:32 +08001264 /* TODO Support passing N plane from 1 memory to MPLANE v4l2 format */
1265 if (V4L2_TYPE_IS_MULTIPLANAR (obj->type) && n_planes != group->n_mem)
1266 goto n_mem_missmatch;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001267
bo.xiao857b8682024-09-12 16:40:32 +08001268 for (i = 0; i < group->n_mem; i++)
1269 {
1270 gsize maxsize, psize;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001271
bo.xiao857b8682024-09-12 16:40:32 +08001272 /* TODO request used size and maxsize separately */
1273 if (V4L2_TYPE_IS_MULTIPLANAR (obj->type))
1274 maxsize = psize = size[i];
xuesong.jiangae1548e2022-05-06 16:38:46 +08001275 else
bo.xiao857b8682024-09-12 16:40:32 +08001276 maxsize = psize = img_size;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001277
bo.xiao857b8682024-09-12 16:40:32 +08001278 g_assert (psize <= img_size);
1279
1280 GST_LOG_OBJECT(allocator, "[%i] imported USERPTR %p plane %d size %" G_GSIZE_FORMAT, group->buffer.index, data[i], i, psize);
1281
1282 mem = (GstAmlV4l2Memory *)group->mem[i];
1283
1284 mem->mem.maxsize = maxsize;
1285 mem->mem.size = psize;
1286 mem->data = data[i];
1287
1288 group->planes[i].length = maxsize;
1289 group->planes[i].bytesused = psize;
1290 group->planes[i].m.userptr = (unsigned long) data[i];
1291 group->planes[i].data_offset = 0;
1292 }
1293
1294 /* Copy into buffer structure if not using planes */
1295 if (!V4L2_TYPE_IS_MULTIPLANAR(obj->type))
1296 {
1297 group->buffer.bytesused = group->planes[0].bytesused;
1298 group->buffer.length = group->planes[0].length;
1299 group->buffer.m.userptr = group->planes[0].m.userptr;
1300 }
1301 else
1302 {
1303 group->buffer.length = group->n_mem;
1304 }
1305
1306 return TRUE;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001307
1308n_mem_missmatch:
bo.xiao857b8682024-09-12 16:40:32 +08001309 {
1310 GST_ERROR_OBJECT (allocator, "Got %i userptr plane while driver need %i",
1311 n_planes, group->n_mem);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001312 return FALSE;
bo.xiao857b8682024-09-12 16:40:32 +08001313 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001314}
1315
1316void gst_aml_v4l2_allocator_flush(GstAmlV4l2Allocator *allocator)
1317{
bo.xiao857b8682024-09-12 16:40:32 +08001318 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001319
bo.xiao857b8682024-09-12 16:40:32 +08001320 GST_OBJECT_LOCK (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001321
bo.xiao857b8682024-09-12 16:40:32 +08001322 if (!g_atomic_int_get (&allocator->active))
1323 goto done;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001324
bo.xiao857b8682024-09-12 16:40:32 +08001325 for (i = 0; i < allocator->count; i++)
1326 {
1327 GstAmlV4l2MemoryGroup *group = allocator->groups[i];
1328 gint n;
1329
1330 if (IS_QUEUED (group->buffer))
xuesong.jiangae1548e2022-05-06 16:38:46 +08001331 {
bo.xiao857b8682024-09-12 16:40:32 +08001332 UNSET_QUEUED (group->buffer);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001333
bo.xiao857b8682024-09-12 16:40:32 +08001334 gst_aml_v4l2_allocator_reset_group (allocator, group);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001335
bo.xiao857b8682024-09-12 16:40:32 +08001336 for (n = 0; n < group->n_mem; n++)
1337 gst_memory_unref (group->mem[n]);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001338 }
bo.xiao857b8682024-09-12 16:40:32 +08001339 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001340
1341done:
bo.xiao857b8682024-09-12 16:40:32 +08001342 GST_OBJECT_UNLOCK (allocator);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001343}
1344
1345gboolean
1346gst_aml_v4l2_allocator_qbuf(GstAmlV4l2Allocator *allocator,
1347 GstAmlV4l2MemoryGroup *group)
1348{
bo.xiao857b8682024-09-12 16:40:32 +08001349 GstAmlV4l2Object *obj = allocator->obj;
1350 gboolean ret = TRUE;
1351 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001352
bo.xiao857b8682024-09-12 16:40:32 +08001353 g_return_val_if_fail (g_atomic_int_get (&allocator->active), FALSE);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001354
bo.xiao857b8682024-09-12 16:40:32 +08001355 /* update sizes */
1356 if (V4L2_TYPE_IS_MULTIPLANAR (obj->type))
1357 {
xuesong.jiangae1548e2022-05-06 16:38:46 +08001358 for (i = 0; i < group->n_mem; i++)
bo.xiao857b8682024-09-12 16:40:32 +08001359 group->planes[i].bytesused =
1360 gst_memory_get_sizes (group->mem[i], NULL, NULL);
1361 }
1362 else
1363 {
1364 group->buffer.bytesused = gst_memory_get_sizes (group->mem[0], NULL, NULL);
1365 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001366
bo.xiao857b8682024-09-12 16:40:32 +08001367 /* Ensure the memory will stay around and is RO */
1368 for (i = 0; i < group->n_mem; i++)
1369 gst_memory_ref (group->mem[i]);
fei.dengbee20862022-06-14 14:59:48 +08001370
bo.xiao857b8682024-09-12 16:40:32 +08001371 gint64 currFramePTS= 0;
1372 if (group->buffer.timestamp.tv_sec != -1) {
1373 currFramePTS= group->buffer.timestamp.tv_sec * 1000000LL + group->buffer.timestamp.tv_usec;
1374 }
xuesong.jiang5fccce22024-10-22 20:28:50 +08001375 GST_LOG_OBJECT(allocator, "q buffer, timestamp:%lld(us), tv_sec:%ld, tv_usec:%ld",currFramePTS, group->buffer.timestamp.tv_sec, group->buffer.timestamp.tv_usec);
xuesong.jiangc5dac0f2023-02-01 14:42:24 +08001376
bo.xiao857b8682024-09-12 16:40:32 +08001377 gst_aml_v4l2_allocator_dump_es_buf(allocator, group);
1378
1379 if (obj->ioctl(obj->video_fd, VIDIOC_QBUF, &group->buffer) < 0)
1380 {
1381 GST_ERROR_OBJECT (allocator, "failed queueing buffer %i: %s",
1382 group->buffer.index, g_strerror (errno));
1383
1384 /* Release the memory, possibly making it RW again */
1385 for (i = 0; i < group->n_mem; i++)
1386 gst_memory_unref (group->mem[i]);
1387
1388 ret = FALSE;
1389 if (IS_QUEUED(group->buffer))
xuesong.jiangae1548e2022-05-06 16:38:46 +08001390 {
bo.xiao857b8682024-09-12 16:40:32 +08001391 GST_DEBUG_OBJECT (allocator,
1392 "driver pretends buffer is queued even if queue failed");
1393 UNSET_QUEUED (group->buffer);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001394 }
bo.xiao857b8682024-09-12 16:40:32 +08001395 goto done;
1396 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001397
bo.xiao857b8682024-09-12 16:40:32 +08001398 GST_LOG_OBJECT (allocator, "queued buffer %i (flags 0x%X)",
1399 group->buffer.index, group->buffer.flags);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001400
bo.xiao857b8682024-09-12 16:40:32 +08001401 if (!IS_QUEUED(group->buffer))
1402 {
1403 GST_DEBUG_OBJECT (allocator,
1404 "driver pretends buffer is not queued even if queue succeeded");
1405 SET_QUEUED (group->buffer);
1406 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001407
1408done:
bo.xiao857b8682024-09-12 16:40:32 +08001409 return ret;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001410}
1411
1412GstFlowReturn
1413gst_aml_v4l2_allocator_dqbuf(GstAmlV4l2Allocator *allocator,
1414 GstAmlV4l2MemoryGroup **group_out)
1415{
bo.xiao857b8682024-09-12 16:40:32 +08001416 GstAmlV4l2Object *obj = allocator->obj;
1417 struct v4l2_buffer buffer = { 0 };
1418 struct v4l2_plane planes[VIDEO_MAX_PLANES] = { {0} };
1419 gint i;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001420
bo.xiao857b8682024-09-12 16:40:32 +08001421 GstAmlV4l2MemoryGroup *group = NULL;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001422
bo.xiao857b8682024-09-12 16:40:32 +08001423 g_return_val_if_fail (g_atomic_int_get (&allocator->active), GST_FLOW_ERROR);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001424
bo.xiao857b8682024-09-12 16:40:32 +08001425 buffer.type = obj->type;
1426 buffer.memory = allocator->memory;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001427
bo.xiao857b8682024-09-12 16:40:32 +08001428 if (V4L2_TYPE_IS_MULTIPLANAR (obj->type))
1429 {
1430 buffer.length = obj->format.fmt.pix_mp.num_planes;
1431 buffer.m.planes = planes;
1432 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001433
bo.xiao857b8682024-09-12 16:40:32 +08001434 if (obj->ioctl (obj->video_fd, VIDIOC_DQBUF, &buffer) < 0)
1435 goto error;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001436
bo.xiao857b8682024-09-12 16:40:32 +08001437 group = allocator->groups[buffer.index];
xuesong.jiangae1548e2022-05-06 16:38:46 +08001438
bo.xiao857b8682024-09-12 16:40:32 +08001439 if (!IS_QUEUED(group->buffer))
1440 {
1441 GST_ERROR_OBJECT (allocator,
1442 "buffer %i was not queued, this indicate a driver bug.", buffer.index);
1443 return GST_FLOW_ERROR;
1444 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001445
bo.xiao857b8682024-09-12 16:40:32 +08001446 group->buffer = buffer;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001447
bo.xiao857b8682024-09-12 16:40:32 +08001448 GST_LOG_OBJECT (allocator, "dequeued buffer %i (flags 0x%X)", buffer.index,
1449 buffer.flags);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001450
bo.xiao857b8682024-09-12 16:40:32 +08001451 if (IS_QUEUED(group->buffer))
1452 {
1453 GST_DEBUG_OBJECT (allocator,
1454 "driver pretends buffer is queued even if dequeue succeeded");
1455 UNSET_QUEUED (group->buffer);
1456 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001457
bo.xiao857b8682024-09-12 16:40:32 +08001458 if (V4L2_TYPE_IS_MULTIPLANAR(obj->type))
1459 {
1460 group->buffer.m.planes = group->planes;
1461 memcpy (group->planes, buffer.m.planes, sizeof (planes));
1462 }
1463 else
1464 {
1465 group->planes[0].bytesused = group->buffer.bytesused;
1466 group->planes[0].length = group->buffer.length;
1467 g_assert (sizeof (group->planes[0].m) == sizeof (group->buffer.m));
1468 memcpy (&group->planes[0].m, &group->buffer.m, sizeof (group->buffer.m));
1469 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001470
bo.xiao857b8682024-09-12 16:40:32 +08001471 /* And update memory size */
1472 if (V4L2_TYPE_IS_OUTPUT(obj->type))
1473 {
1474 gst_aml_v4l2_allocator_reset_size(allocator, group);
1475 }
1476 else
1477 {
1478 /* for capture, simply read the size */
xuesong.jiangae1548e2022-05-06 16:38:46 +08001479 for (i = 0; i < group->n_mem; i++)
bo.xiao857b8682024-09-12 16:40:32 +08001480 {
1481 gsize size, offset;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001482
bo.xiao857b8682024-09-12 16:40:32 +08001483 GST_LOG_OBJECT (allocator,
1484 "Dequeued capture buffer, length: %u bytesused: %u data_offset: %u",
1485 group->planes[i].length, group->planes[i].bytesused,
1486 group->planes[i].data_offset);
1487
1488 offset = group->planes[i].data_offset;
1489
1490 if (group->planes[i].bytesused > group->planes[i].data_offset)
1491 {
1492 size = group->planes[i].bytesused - group->planes[i].data_offset;
1493 }
1494 else
1495 {
1496 GST_WARNING_OBJECT(allocator, "V4L2 provided buffer has bytesused %" G_GUINT32_FORMAT " which is too small to include data_offset %" G_GUINT32_FORMAT, group->planes[i].bytesused,
1497 group->planes[i].data_offset);
1498 size = group->planes[i].bytesused;
1499 /*if this buffer is last buffer,bytesused is 0,so size equal 0,this cause assert fail of gst_memory_resize*/
1500 if ( (group->buffer.flags & V4L2_BUF_FLAG_LAST) &&(group->buffer.bytesused == 0))
1501 {
1502 size = group->planes[i].length;
1503 }
1504 }
1505
1506 if (G_LIKELY (size + offset <= group->mem[i]->maxsize))
1507 gst_memory_resize (group->mem[i], offset, size);
1508 else
1509 {
1510 GST_WARNING_OBJECT (allocator,
1511 "v4l2 provided buffer that is too big for the memory it was "
1512 "writing into. v4l2 claims %" G_GSIZE_FORMAT " bytes used but "
1513 "memory is only %" G_GSIZE_FORMAT "B. This is probably a driver "
1514 "bug.", size, group->mem[i]->maxsize);
1515 gst_memory_resize (group->mem[i], 0, group->mem[i]->maxsize);
1516 }
1517 }
1518 }
1519
1520 /* Release the memory, possibly making it RW again */
1521 for (i = 0; i < group->n_mem; i++)
1522 gst_memory_unref (group->mem[i]);
1523
1524 *group_out = group;
1525 return GST_FLOW_OK;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001526
1527error:
bo.xiao857b8682024-09-12 16:40:32 +08001528 if (errno == EPIPE)
1529 {
1530 GST_DEBUG_OBJECT (allocator, "broken pipe signals last buffer");
1531 return GST_FLOW_EOS;
1532 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001533
bo.xiao857b8682024-09-12 16:40:32 +08001534 GST_ERROR_OBJECT (allocator, "failed dequeuing a %s buffer: %s",
1535 memory_type_to_str (allocator->memory), g_strerror (errno));
xuesong.jiangae1548e2022-05-06 16:38:46 +08001536
bo.xiao857b8682024-09-12 16:40:32 +08001537 switch (errno)
1538 {
xuesong.jiangae1548e2022-05-06 16:38:46 +08001539 case EAGAIN:
bo.xiao857b8682024-09-12 16:40:32 +08001540 GST_WARNING_OBJECT (allocator,
1541 "Non-blocking I/O has been selected using O_NONBLOCK and"
1542 " no buffer was in the outgoing queue.");
1543 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001544 case EINVAL:
bo.xiao857b8682024-09-12 16:40:32 +08001545 GST_ERROR_OBJECT (allocator,
1546 "The buffer type is not supported, or the index is out of bounds, "
1547 "or no buffers have been allocated yet, or the userptr "
1548 "or length are invalid.");
1549 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001550 case ENOMEM:
bo.xiao857b8682024-09-12 16:40:32 +08001551 GST_ERROR_OBJECT (allocator,
1552 "insufficient memory to enqueue a user pointer buffer");
1553 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001554 case EIO:
bo.xiao857b8682024-09-12 16:40:32 +08001555 GST_INFO_OBJECT (allocator,
1556 "VIDIOC_DQBUF failed due to an internal error."
1557 " Can also indicate temporary problems like signal loss."
1558 " Note the driver might dequeue an (empty) buffer despite"
1559 " returning an error, or even stop capturing.");
1560 /* have we de-queued a buffer ? */
1561 if (!IS_QUEUED(buffer))
1562 {
1563 GST_DEBUG_OBJECT (allocator, "reenqueueing buffer");
1564 /* FIXME ... should we do something here? */
1565 }
1566 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001567 case EINTR:
bo.xiao857b8682024-09-12 16:40:32 +08001568 GST_WARNING_OBJECT (allocator, "could not sync on a buffer on device");
1569 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001570 default:
bo.xiao857b8682024-09-12 16:40:32 +08001571 GST_WARNING_OBJECT (allocator,
1572 "Grabbing frame got interrupted unexpectedly. %d: %s.", errno,
1573 g_strerror (errno));
1574 break;
1575 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001576
bo.xiao857b8682024-09-12 16:40:32 +08001577 return GST_FLOW_ERROR;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001578}
1579
bo.xiao857b8682024-09-12 16:40:32 +08001580void
1581gst_aml_v4l2_allocator_reset_group (GstAmlV4l2Allocator *allocator,
1582 GstAmlV4l2MemoryGroup *group)
xuesong.jiangae1548e2022-05-06 16:38:46 +08001583{
bo.xiao857b8682024-09-12 16:40:32 +08001584 switch (allocator->memory)
1585 {
xuesong.jiangae1548e2022-05-06 16:38:46 +08001586 case V4L2_MEMORY_USERPTR:
bo.xiao857b8682024-09-12 16:40:32 +08001587 gst_aml_v4l2_allocator_clear_userptr (allocator, group);
1588 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001589 case V4L2_MEMORY_DMABUF:
bo.xiao857b8682024-09-12 16:40:32 +08001590 gst_aml_v4l2_allocator_clear_dmabufin (allocator, group);
1591 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001592 case V4L2_MEMORY_MMAP:
bo.xiao857b8682024-09-12 16:40:32 +08001593 break;
xuesong.jiangae1548e2022-05-06 16:38:46 +08001594 default:
bo.xiao857b8682024-09-12 16:40:32 +08001595 g_assert_not_reached ();
1596 break;
1597 }
xuesong.jiangae1548e2022-05-06 16:38:46 +08001598
bo.xiao857b8682024-09-12 16:40:32 +08001599 gst_aml_v4l2_allocator_reset_size(allocator, group);
xuesong.jiangae1548e2022-05-06 16:38:46 +08001600}