blob: bfd4cfa3b63c06a2645e23e0fa6961b826dfcde2 [file] [log] [blame]
le.han02c38f02024-08-16 02:35:36 +00001/* GStreamer
2 * Copyright (C) 2008 David Schleef <ds@schleef.org>
3 * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
4 * Copyright (C) 2011 Nokia Corporation. All rights reserved.
5 * Contact: Stefan Kost <stefan.kost@nokia.com>
6 * Copyright (C) 2012 Collabora Ltd.
7 * Author : Edward Hervey <edward@collabora.com>
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
18 *
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
23 */
24
25/**
26 * SECTION:gstvideodecoder
27 * @title: GstAmlVideoDecoder
28 * @short_description: Base class for video decoders
29 *
30 * This base class is for video decoders turning encoded data into raw video
31 * frames.
32 *
33 * The GstAmlVideoDecoder base class and derived subclasses should cooperate as
34 * follows:
35 *
36 * ## Configuration
37 *
38 * * Initially, GstAmlVideoDecoder calls @start when the decoder element
39 * is activated, which allows the subclass to perform any global setup.
40 *
41 * * GstAmlVideoDecoder calls @set_format to inform the subclass of caps
42 * describing input video data that it is about to receive, including
43 * possibly configuration data.
44 * While unlikely, it might be called more than once, if changing input
45 * parameters require reconfiguration.
46 *
47 * * Incoming data buffers are processed as needed, described in Data
48 * Processing below.
49 *
50 * * GstAmlVideoDecoder calls @stop at end of all processing.
51 *
52 * ## Data processing
53 *
54 * * The base class gathers input data, and optionally allows subclass
55 * to parse this into subsequently manageable chunks, typically
56 * corresponding to and referred to as 'frames'.
57 *
58 * * Each input frame is provided in turn to the subclass' @handle_frame
59 * callback.
60 * * When the subclass enables the subframe mode with `gst_aml_video_decoder_set_subframe_mode`,
61 * the base class will provide to the subclass the same input frame with
62 * different input buffers to the subclass @handle_frame
63 * callback. During this call, the subclass needs to take
64 * ownership of the input_buffer as @GstAmlVideoCodecFrame.input_buffer
65 * will have been changed before the next subframe buffer is received.
66 * The subclass will call `gst_aml_video_decoder_have_last_subframe`
67 * when a new input frame can be created by the base class.
68 * Every subframe will share the same @GstAmlVideoCodecFrame.output_buffer
69 * to write the decoding result. The subclass is responsible to protect
70 * its access.
71 *
72 * * If codec processing results in decoded data, the subclass should call
73 * @gst_aml_video_decoder_finish_frame to have decoded data pushed
74 * downstream. In subframe mode
75 * the subclass should call @gst_aml_video_decoder_finish_subframe until the
76 * last subframe where it should call @gst_aml_video_decoder_finish_frame.
77 * The subclass can detect the last subframe using GST_VIDEO_BUFFER_FLAG_MARKER
78 * on buffers or using its own logic to collect the subframes.
79 * In case of decoding failure, the subclass must call
80 * @gst_aml_video_decoder_drop_frame or @gst_aml_video_decoder_drop_subframe,
81 * to allow the base class to do timestamp and offset tracking, and possibly
82 * to requeue the frame for a later attempt in the case of reverse playback.
83 *
84 * ## Shutdown phase
85 *
86 * * The GstAmlVideoDecoder class calls @stop to inform the subclass that data
87 * parsing will be stopped.
88 *
89 * ## Additional Notes
90 *
91 * * Seeking/Flushing
92 *
93 * * When the pipeline is seeked or otherwise flushed, the subclass is
94 * informed via a call to its @reset callback, with the hard parameter
95 * set to true. This indicates the subclass should drop any internal data
96 * queues and timestamps and prepare for a fresh set of buffers to arrive
97 * for parsing and decoding.
98 *
99 * * End Of Stream
100 *
101 * * At end-of-stream, the subclass @parse function may be called some final
102 * times with the at_eos parameter set to true, indicating that the element
103 * should not expect any more data to be arriving, and it should parse and
104 * remaining frames and call gst_aml_video_decoder_have_frame() if possible.
105 *
106 * The subclass is responsible for providing pad template caps for
107 * source and sink pads. The pads need to be named "sink" and "src". It also
108 * needs to provide information about the output caps, when they are known.
109 * This may be when the base class calls the subclass' @set_format function,
110 * though it might be during decoding, before calling
111 * @gst_aml_video_decoder_finish_frame. This is done via
112 * @gst_aml_video_decoder_set_output_state
113 *
114 * The subclass is also responsible for providing (presentation) timestamps
115 * (likely based on corresponding input ones). If that is not applicable
116 * or possible, the base class provides limited framerate based interpolation.
117 *
118 * Similarly, the base class provides some limited (legacy) seeking support
119 * if specifically requested by the subclass, as full-fledged support
120 * should rather be left to upstream demuxer, parser or alike. This simple
121 * approach caters for seeking and duration reporting using estimated input
122 * bitrates. To enable it, a subclass should call
123 * @gst_aml_video_decoder_set_estimate_rate to enable handling of incoming
124 * byte-streams.
125 *
126 * The base class provides some support for reverse playback, in particular
127 * in case incoming data is not packetized or upstream does not provide
128 * fragments on keyframe boundaries. However, the subclass should then be
129 * prepared for the parsing and frame processing stage to occur separately
130 * (in normal forward processing, the latter immediately follows the former),
131 * The subclass also needs to ensure the parsing stage properly marks
132 * keyframes, unless it knows the upstream elements will do so properly for
133 * incoming data.
134 *
135 * The bare minimum that a functional subclass needs to implement is:
136 *
137 * * Provide pad templates
138 * * Inform the base class of output caps via
139 * @gst_aml_video_decoder_set_output_state
140 *
141 * * Parse input data, if it is not considered packetized from upstream
142 * Data will be provided to @parse which should invoke
143 * @gst_aml_video_decoder_add_to_frame and @gst_aml_video_decoder_have_frame to
144 * separate the data belonging to each video frame.
145 *
146 * * Accept data in @handle_frame and provide decoded results to
147 * @gst_aml_video_decoder_finish_frame, or call @gst_aml_video_decoder_drop_frame.
148 */
149
150#ifdef HAVE_CONFIG_H
151#include "config.h"
152#endif
153
154/* TODO
155 *
156 * * Add a flag/boolean for I-frame-only/image decoders so we can do extra
157 * features, like applying QoS on input (as opposed to after the frame is
158 * decoded).
159 * * Add a flag/boolean for decoders that require keyframes, so the base
160 * class can automatically discard non-keyframes before one has arrived
161 * * Detect reordered frame/timestamps and fix the pts/dts
162 * * Support for GstIndex (or shall we not care ?)
163 * * Calculate actual latency based on input/output timestamp/frame_number
164 * and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
165 * * Emit latency message when it changes
166 *
167 */
168
169/* Implementation notes:
170 * The Video Decoder base class operates in 2 primary processing modes, depending
171 * on whether forward or reverse playback is requested.
172 *
173 * Forward playback:
174 * * Incoming buffer -> @parse() -> add_to_frame()/have_frame() ->
175 * handle_frame() -> push downstream
176 *
177 * Reverse playback is more complicated, since it involves gathering incoming
178 * data regions as we loop backwards through the upstream data. The processing
179 * concept (using incoming buffers as containing one frame each to simplify
180 * things) is:
181 *
182 * Upstream data we want to play:
183 * Buffer encoded order: 1 2 3 4 5 6 7 8 9 EOS
184 * Keyframe flag: K K
185 * Groupings: AAAAAAA BBBBBBB CCCCCCC
186 *
187 * Input:
188 * Buffer reception order: 7 8 9 4 5 6 1 2 3 EOS
189 * Keyframe flag: K K
190 * Discont flag: D D D
191 *
192 * - Each Discont marks a discont in the decoding order.
193 * - The keyframes mark where we can start decoding.
194 *
195 * Initially, we prepend incoming buffers to the gather queue. Whenever the
196 * discont flag is set on an incoming buffer, the gather queue is flushed out
197 * before the new buffer is collected.
198 *
199 * The above data will be accumulated in the gather queue like this:
200 *
201 * gather queue: 9 8 7
202 * D
203 *
204 * When buffer 4 is received (with a DISCONT), we flush the gather queue like
205 * this:
206 *
207 * while (gather)
208 * take head of queue and prepend to parse queue (this reverses the
209 * sequence, so parse queue is 7 -> 8 -> 9)
210 *
211 * Next, we process the parse queue, which now contains all un-parsed packets
212 * (including any leftover ones from the previous decode section)
213 *
214 * for each buffer now in the parse queue:
215 * Call the subclass parse function, prepending each resulting frame to
216 * the parse_gather queue. Buffers which precede the first one that
217 * produces a parsed frame are retained in the parse queue for
218 * re-processing on the next cycle of parsing.
219 *
220 * The parse_gather queue now contains frame objects ready for decoding,
221 * in reverse order.
222 * parse_gather: 9 -> 8 -> 7
223 *
224 * while (parse_gather)
225 * Take the head of the queue and prepend it to the decode queue
226 * If the frame was a keyframe, process the decode queue
227 * decode is now 7-8-9
228 *
229 * Processing the decode queue results in frames with attached output buffers
230 * stored in the 'output_queue' ready for outputting in reverse order.
231 *
232 * After we flushed the gather queue and parsed it, we add 4 to the (now empty)
233 * gather queue. We get the following situation:
234 *
235 * gather queue: 4
236 * decode queue: 7 8 9
237 *
238 * After we received 5 (Keyframe) and 6:
239 *
240 * gather queue: 6 5 4
241 * decode queue: 7 8 9
242 *
243 * When we receive 1 (DISCONT) which triggers a flush of the gather queue:
244 *
245 * Copy head of the gather queue (6) to decode queue:
246 *
247 * gather queue: 5 4
248 * decode queue: 6 7 8 9
249 *
250 * Copy head of the gather queue (5) to decode queue. This is a keyframe so we
251 * can start decoding.
252 *
253 * gather queue: 4
254 * decode queue: 5 6 7 8 9
255 *
256 * Decode frames in decode queue, store raw decoded data in output queue, we
257 * can take the head of the decode queue and prepend the decoded result in the
258 * output queue:
259 *
260 * gather queue: 4
261 * decode queue:
262 * output queue: 9 8 7 6 5
263 *
264 * Now output all the frames in the output queue, picking a frame from the
265 * head of the queue.
266 *
267 * Copy head of the gather queue (4) to decode queue, we flushed the gather
268 * queue and can now store input buffer in the gather queue:
269 *
270 * gather queue: 1
271 * decode queue: 4
272 *
273 * When we receive EOS, the queue looks like:
274 *
275 * gather queue: 3 2 1
276 * decode queue: 4
277 *
278 * Fill decode queue, first keyframe we copy is 2:
279 *
280 * gather queue: 1
281 * decode queue: 2 3 4
282 *
283 * Decoded output:
284 *
285 * gather queue: 1
286 * decode queue:
287 * output queue: 4 3 2
288 *
289 * Leftover buffer 1 cannot be decoded and must be discarded.
290 */
291
292#include "gstamlvideodecoder.h"
293#include "gstamlvideoutils.h"
294#include "gstamlvideoutilsprivate.h"
295
xuesong.jiang0223de52024-09-13 15:33:10 +0800296#include <string.h>
le.han02c38f02024-08-16 02:35:36 +0000297#include <gst/video/video.h>
298#include <gst/video/video-event.h>
299#include <gst/video/gstvideopool.h>
300#include <gst/video/gstvideometa.h>
xuesong.jiang0223de52024-09-13 15:33:10 +0800301#include <gst/gstdrmbufferpool.h>
302#include <gst/allocators/gstdmabuf.h>
le.han02c38f02024-08-16 02:35:36 +0000303
304GST_DEBUG_CATEGORY (videodecoder_debug);
305#define GST_CAT_DEFAULT videodecoder_debug
306
307/* properties */
308#define DEFAULT_QOS TRUE
309#define DEFAULT_MAX_ERRORS GST_AML_VIDEO_DECODER_MAX_ERRORS
310#define DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL 0
311#define DEFAULT_DISCARD_CORRUPTED_FRAMES FALSE
312#define DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS FALSE
313#define DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS (GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT | GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT)
xuesong.jiang0223de52024-09-13 15:33:10 +0800314#define DEFAULT_LOCAL_BUF_POOL FALSE
le.han02c38f02024-08-16 02:35:36 +0000315
316/* Used for request_sync_point_frame_number. These are out of range for the
317 * frame numbers and can be given special meaning */
318#define REQUEST_SYNC_POINT_PENDING G_MAXUINT + 1
319#define REQUEST_SYNC_POINT_UNSET G_MAXUINT64
320
321enum
322{
323 PROP_0,
324 PROP_QOS,
325 PROP_MAX_ERRORS,
326 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
327 PROP_DISCARD_CORRUPTED_FRAMES,
328 PROP_AUTOMATIC_REQUEST_SYNC_POINTS,
le.han44bdbd82024-08-20 07:36:50 +0000329#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +0000330 PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
le.han44bdbd82024-08-20 07:36:50 +0000331#endif
xuesong.jiang0223de52024-09-13 15:33:10 +0800332 PROP_LOCAL_BUF_POOL,
le.han02c38f02024-08-16 02:35:36 +0000333};
334
335struct _GstAmlVideoDecoderPrivate
336{
337 /* FIXME introduce a context ? */
338
339 GstBufferPool *pool;
340 GstAllocator *allocator;
341 GstAllocationParams params;
342
343 /* parse tracking */
344 /* input data */
345 GstAdapter *input_adapter;
346 /* assembles current frame */
347 GstAdapter *output_adapter;
348
349 /* Whether we attempt to convert newsegment from bytes to
350 * time using a bitrate estimation */
351 gboolean do_estimate_rate;
352
353 /* Whether input is considered packetized or not */
354 gboolean packetized;
355
356 /* whether input is considered as subframes */
357 gboolean subframe_mode;
358
359 /* Error handling */
360 gint max_errors;
361 gint error_count;
362 gboolean had_output_data;
363 gboolean had_input_data;
364
365 gboolean needs_format;
366 /* input_segment are output_segment identical */
367 gboolean in_out_segment_sync;
368
369 /* TRUE if we have an active set of instant rate flags */
370 gboolean decode_flags_override;
371 GstSegmentFlags decode_flags;
372
373 /* ... being tracked here;
374 * only available during parsing or when doing subframe decoding */
375 GstAmlVideoCodecFrame *current_frame;
376 /* events that should apply to the current frame */
377 /* FIXME 2.0: Use a GQueue or similar, see GstAmlVideoCodecFrame::events */
378 GList *current_frame_events;
379 /* events that should be pushed before the next frame */
380 /* FIXME 2.0: Use a GQueue or similar, see GstAmlVideoCodecFrame::events */
381 GList *pending_events;
382
383 /* relative offset of input data */
384 guint64 input_offset;
385 /* relative offset of frame */
386 guint64 frame_offset;
387 /* tracking ts and offsets */
388 GQueue timestamps;
389
390 /* last outgoing ts */
391 GstClockTime last_timestamp_out;
392 /* incoming pts - dts */
393 GstClockTime pts_delta;
394 gboolean reordered_output;
395
396 /* FIXME: Consider using a GQueue or other better fitting data structure */
397 /* reverse playback */
398 /* collect input */
399 GList *gather;
400 /* to-be-parsed */
401 GList *parse;
402 /* collected parsed frames */
403 GList *parse_gather;
404 /* frames to be handled == decoded */
405 GList *decode;
406 /* collected output - of buffer objects, not frames */
407 GList *output_queued;
408
409
410 /* base_picture_number is the picture number of the reference picture */
411 guint64 base_picture_number;
412 /* combine with base_picture_number, framerate and calcs to yield (presentation) ts */
413 GstClockTime base_timestamp;
414
415 /* Properties */
416 GstClockTime min_force_key_unit_interval;
417 gboolean discard_corrupted_frames;
418
419 /* Key unit related state */
420 gboolean needs_sync_point;
421 GstAmlVideoDecoderRequestSyncPointFlags request_sync_point_flags;
422 guint64 request_sync_point_frame_number;
423 GstClockTime last_force_key_unit_time;
424 /* -1 if we saw no sync point yet */
425 guint64 distance_from_sync;
426
427 gboolean automatic_request_sync_points;
428 GstAmlVideoDecoderRequestSyncPointFlags automatic_request_sync_point_flags;
xuesong.jiang0223de52024-09-13 15:33:10 +0800429 gboolean local_buf_pool;
le.han02c38f02024-08-16 02:35:36 +0000430
431 guint32 system_frame_number;
432 guint32 decode_frame_number;
433
434 GQueue frames; /* Protected with OBJECT_LOCK */
435 GstAmlVideoCodecState *input_state;
436 GstAmlVideoCodecState *output_state; /* OBJECT_LOCK and STREAM_LOCK */
437 gboolean output_state_changed;
438
439 /* QoS properties */
440 gboolean do_qos;
441 gdouble proportion; /* OBJECT_LOCK */
442 GstClockTime earliest_time; /* OBJECT_LOCK */
443 GstClockTime qos_frame_duration; /* OBJECT_LOCK */
444 gboolean discont;
445 /* qos messages: frames dropped/processed */
446 guint dropped;
447 guint processed;
448
449 /* Outgoing byte size ? */
450 gint64 bytes_out;
451 gint64 time;
452
453 gint64 min_latency;
454 gint64 max_latency;
455
456 /* Tracks whether the latency message was posted at least once */
457 gboolean posted_latency_msg;
458
459 /* upstream stream tags (global tags are passed through as-is) */
460 GstTagList *upstream_tags;
461
462 /* subclass tags */
463 GstTagList *tags;
464 GstTagMergeMode tags_merge_mode;
465
466 gboolean tags_changed;
467
468 /* flags */
469 gboolean use_default_pad_acceptcaps;
470
471#ifndef GST_DISABLE_DEBUG
472 /* Diagnostic time for reporting the time
473 * from flush to first output */
474 GstClockTime last_reset_time;
475#endif
476};
477
478static GstElementClass *parent_class = NULL;
479static gint private_offset = 0;
480
481/* cached quark to avoid contention on the global quark table lock */
482#define META_TAG_VIDEO meta_tag_video_quark
483static GQuark meta_tag_video_quark;
484
485static void gst_aml_video_decoder_class_init (GstAmlVideoDecoderClass * klass);
486static void gst_aml_video_decoder_init (GstAmlVideoDecoder * dec,
487 GstAmlVideoDecoderClass * klass);
488
489static void gst_aml_video_decoder_finalize (GObject * object);
490static void gst_aml_video_decoder_get_property (GObject * object, guint property_id,
491 GValue * value, GParamSpec * pspec);
492static void gst_aml_video_decoder_set_property (GObject * object, guint property_id,
493 const GValue * value, GParamSpec * pspec);
494
495static gboolean gst_aml_video_decoder_setcaps (GstAmlVideoDecoder * dec,
496 GstCaps * caps);
497static gboolean gst_aml_video_decoder_sink_event (GstPad * pad, GstObject * parent,
498 GstEvent * event);
499static gboolean gst_aml_video_decoder_src_event (GstPad * pad, GstObject * parent,
500 GstEvent * event);
501static GstFlowReturn gst_aml_video_decoder_chain (GstPad * pad, GstObject * parent,
502 GstBuffer * buf);
503static gboolean gst_aml_video_decoder_sink_query (GstPad * pad, GstObject * parent,
504 GstQuery * query);
505static GstStateChangeReturn gst_aml_video_decoder_change_state (GstElement *
506 element, GstStateChange transition);
507static gboolean gst_aml_video_decoder_src_query (GstPad * pad, GstObject * parent,
508 GstQuery * query);
509static void gst_aml_video_decoder_reset (GstAmlVideoDecoder * decoder, gboolean full,
510 gboolean flush_hard);
511
512static GstFlowReturn gst_aml_video_decoder_decode_frame (GstAmlVideoDecoder * decoder,
513 GstAmlVideoCodecFrame * frame);
514
515static void gst_aml_video_decoder_push_event_list (GstAmlVideoDecoder * decoder,
516 GList * events);
517static GstClockTime gst_aml_video_decoder_get_frame_duration (GstAmlVideoDecoder *
518 decoder, GstAmlVideoCodecFrame * frame);
519static GstAmlVideoCodecFrame *gst_aml_video_decoder_new_frame (GstAmlVideoDecoder *
520 decoder);
521static GstFlowReturn gst_aml_video_decoder_clip_and_push_buf (GstAmlVideoDecoder *
522 decoder, GstBuffer * buf);
523static GstFlowReturn gst_aml_video_decoder_flush_parse (GstAmlVideoDecoder * dec,
524 gboolean at_eos);
525
526static void gst_aml_video_decoder_clear_queues (GstAmlVideoDecoder * dec);
527
528static gboolean gst_aml_video_decoder_sink_event_default (GstAmlVideoDecoder * decoder,
529 GstEvent * event);
530static gboolean gst_aml_video_decoder_src_event_default (GstAmlVideoDecoder * decoder,
531 GstEvent * event);
532static gboolean gst_aml_video_decoder_decide_allocation_default (GstAmlVideoDecoder *
533 decoder, GstQuery * query);
534static gboolean gst_aml_video_decoder_propose_allocation_default (GstAmlVideoDecoder *
535 decoder, GstQuery * query);
536static gboolean gst_aml_video_decoder_negotiate_default (GstAmlVideoDecoder * decoder);
537static GstFlowReturn gst_aml_video_decoder_parse_available (GstAmlVideoDecoder * dec,
538 gboolean at_eos, gboolean new_buffer);
539static gboolean gst_aml_video_decoder_negotiate_unlocked (GstAmlVideoDecoder *
540 decoder);
541static gboolean gst_aml_video_decoder_sink_query_default (GstAmlVideoDecoder * decoder,
542 GstQuery * query);
543static gboolean gst_aml_video_decoder_src_query_default (GstAmlVideoDecoder * decoder,
544 GstQuery * query);
545
546static gboolean gst_aml_video_decoder_transform_meta_default (GstAmlVideoDecoder *
547 decoder, GstAmlVideoCodecFrame * frame, GstMeta * meta);
548
549static gboolean gst_aml_video_decoder_handle_missing_data_default (GstAmlVideoDecoder *
550 decoder, GstClockTime timestamp, GstClockTime duration);
551
552static void gst_aml_video_decoder_copy_metas (GstAmlVideoDecoder * decoder,
553 GstAmlVideoCodecFrame * frame, GstBuffer * src_buffer,
554 GstBuffer * dest_buffer);
555
556static void gst_aml_video_decoder_request_sync_point_internal (GstAmlVideoDecoder *
557 dec, GstClockTime deadline, GstAmlVideoDecoderRequestSyncPointFlags flags);
558
559/* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
560 * method to get to the padtemplates */
561GType
562gst_aml_video_decoder_get_type (void)
563{
564 static gsize type = 0;
565
566 if (g_once_init_enter (&type)) {
567 GType _type;
568 static const GTypeInfo info = {
569 sizeof (GstAmlVideoDecoderClass),
570 NULL,
571 NULL,
572 (GClassInitFunc) gst_aml_video_decoder_class_init,
573 NULL,
574 NULL,
575 sizeof (GstAmlVideoDecoder),
576 0,
577 (GInstanceInitFunc) gst_aml_video_decoder_init,
578 };
579
580 _type = g_type_register_static (GST_TYPE_ELEMENT,
581 "GstAmlVideoDecoder", &info, G_TYPE_FLAG_ABSTRACT);
582
583 private_offset =
584 g_type_add_instance_private (_type, sizeof (GstAmlVideoDecoderPrivate));
585
586 g_once_init_leave (&type, _type);
587 }
588 return type;
589}
590
591static inline GstAmlVideoDecoderPrivate *
592gst_aml_video_decoder_get_instance_private (GstAmlVideoDecoder * self)
593{
594 return (G_STRUCT_MEMBER_P (self, private_offset));
595}
596
597static void
598gst_aml_video_decoder_class_init (GstAmlVideoDecoderClass * klass)
599{
600 GObjectClass *gobject_class;
601 GstElementClass *gstelement_class;
602
603 gobject_class = G_OBJECT_CLASS (klass);
604 gstelement_class = GST_ELEMENT_CLASS (klass);
605
606 GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "amlvideodecoder", 0,
607 "Base Video Decoder");
608
609 parent_class = g_type_class_peek_parent (klass);
610
611 if (private_offset != 0)
612 g_type_class_adjust_private_offset (klass, &private_offset);
613
614 gobject_class->finalize = gst_aml_video_decoder_finalize;
615 gobject_class->get_property = gst_aml_video_decoder_get_property;
616 gobject_class->set_property = gst_aml_video_decoder_set_property;
617
618 gstelement_class->change_state =
619 GST_DEBUG_FUNCPTR (gst_aml_video_decoder_change_state);
620
621 klass->sink_event = gst_aml_video_decoder_sink_event_default;
622 klass->src_event = gst_aml_video_decoder_src_event_default;
623 klass->decide_allocation = gst_aml_video_decoder_decide_allocation_default;
624 klass->propose_allocation = gst_aml_video_decoder_propose_allocation_default;
625 klass->negotiate = gst_aml_video_decoder_negotiate_default;
626 klass->sink_query = gst_aml_video_decoder_sink_query_default;
627 klass->src_query = gst_aml_video_decoder_src_query_default;
628 klass->transform_meta = gst_aml_video_decoder_transform_meta_default;
629 klass->handle_missing_data = gst_aml_video_decoder_handle_missing_data_default;
630
631 /**
632 * GstAmlVideoDecoder:qos:
633 *
634 * If set to %TRUE the decoder will handle QoS events received
635 * from downstream elements.
636 * This includes dropping output frames which are detected as late
637 * using the metrics reported by those events.
638 *
639 * Since: 1.18
640 */
641 g_object_class_install_property (gobject_class, PROP_QOS,
642 g_param_spec_boolean ("qos", "Quality of Service",
643 "Handle Quality-of-Service events from downstream",
644 DEFAULT_QOS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
645
646 /**
647 * GstAmlVideoDecoder:max-errors:
648 *
649 * Maximum number of tolerated consecutive decode errors. See
650 * gst_aml_video_decoder_set_max_errors() for more details.
651 *
652 * Since: 1.18
653 */
654 g_object_class_install_property (gobject_class, PROP_MAX_ERRORS,
655 g_param_spec_int ("max-errors", "Max errors",
656 "Max consecutive decoder errors before returning flow error",
657 -1, G_MAXINT, DEFAULT_MAX_ERRORS,
658 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
659
660 /**
661 * GstAmlVideoDecoder:min-force-key-unit-interval:
662 *
663 * Minimum interval between force-key-unit events sent upstream by the
664 * decoder. Setting this to 0 will cause every event to be handled, setting
665 * this to %GST_CLOCK_TIME_NONE will cause every event to be ignored.
666 *
667 * See gst_video_event_new_upstream_force_key_unit() for more details about
668 * force-key-unit events.
669 *
670 * Since: 1.20
671 */
672 g_object_class_install_property (gobject_class,
673 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
674 g_param_spec_uint64 ("min-force-key-unit-interval",
675 "Minimum Force Keyunit Interval",
676 "Minimum interval between force-keyunit requests in nanoseconds", 0,
677 G_MAXUINT64, DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL,
678 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
679
680 /**
681 * GstAmlVideoDecoder:discard-corrupted-frames:
682 *
683 * If set to %TRUE the decoder will discard frames that are marked as
684 * corrupted instead of outputting them.
685 *
686 * Since: 1.20
687 */
688 g_object_class_install_property (gobject_class, PROP_DISCARD_CORRUPTED_FRAMES,
689 g_param_spec_boolean ("discard-corrupted-frames",
690 "Discard Corrupted Frames",
691 "Discard frames marked as corrupted instead of outputting them",
692 DEFAULT_DISCARD_CORRUPTED_FRAMES,
693 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
694
695 /**
696 * GstAmlVideoDecoder:automatic-request-sync-points:
697 *
698 * If set to %TRUE the decoder will automatically request sync points when
699 * it seems like a good idea, e.g. if the first frames are not key frames or
700 * if packet loss was reported by upstream.
701 *
702 * Since: 1.20
703 */
704 g_object_class_install_property (gobject_class,
705 PROP_AUTOMATIC_REQUEST_SYNC_POINTS,
706 g_param_spec_boolean ("automatic-request-sync-points",
707 "Automatic Request Sync Points",
708 "Automatically request sync points when it would be useful",
709 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS,
710 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
711
712 /**
713 * GstAmlVideoDecoder:automatic-request-sync-point-flags:
714 *
715 * GstAmlVideoDecoderRequestSyncPointFlags to use for the automatically
716 * requested sync points if `automatic-request-sync-points` is enabled.
717 *
718 * Since: 1.20
719 */
le.han44bdbd82024-08-20 07:36:50 +0000720#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +0000721 g_object_class_install_property (gobject_class,
722 PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
723 g_param_spec_flags ("automatic-request-sync-point-flags",
724 "Automatic Request Sync Point Flags",
725 "Flags to use when automatically requesting sync points",
726 GST_TYPE_VIDEO_DECODER_REQUEST_SYNC_POINT_FLAGS,
727 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
728 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
le.han44bdbd82024-08-20 07:36:50 +0000729#endif
le.han02c38f02024-08-16 02:35:36 +0000730
xuesong.jiang0223de52024-09-13 15:33:10 +0800731 /**
732 * GstAmlVideoDecoder:local-buffer-pool:
733 *
734 * If set to %TRUE the decoder will create drmbufferpool itself
735 * instead of querying it downstream
736 *
737 */
738 g_object_class_install_property (gobject_class,
739 PROP_LOCAL_BUF_POOL,
740 g_param_spec_boolean ("local-buffer-pool",
741 "Create DrmBufferPool when needed",
742 "Create your own drmbufferpool instead of querying it from downstream",
743 DEFAULT_LOCAL_BUF_POOL,
744 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
745
le.han02c38f02024-08-16 02:35:36 +0000746 meta_tag_video_quark = g_quark_from_static_string (GST_META_TAG_VIDEO_STR);
747}
748
749static void
750gst_aml_video_decoder_init (GstAmlVideoDecoder * decoder, GstAmlVideoDecoderClass * klass)
751{
752 GstPadTemplate *pad_template;
753 GstPad *pad;
754
755 GST_DEBUG_OBJECT (decoder, "gst_aml_video_decoder_init");
756
xuesong.jiang0223de52024-09-13 15:33:10 +0800757 decoder->svp = FALSE;
758
le.han02c38f02024-08-16 02:35:36 +0000759 decoder->priv = gst_aml_video_decoder_get_instance_private (decoder);
760
761 pad_template =
762 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
763 g_return_if_fail (pad_template != NULL);
764
765 decoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
766
767 gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_aml_video_decoder_chain));
768 gst_pad_set_event_function (pad,
769 GST_DEBUG_FUNCPTR (gst_aml_video_decoder_sink_event));
770 gst_pad_set_query_function (pad,
771 GST_DEBUG_FUNCPTR (gst_aml_video_decoder_sink_query));
772 gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
773
774 pad_template =
775 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
776 g_return_if_fail (pad_template != NULL);
777
778 decoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
779
780 gst_pad_set_event_function (pad,
781 GST_DEBUG_FUNCPTR (gst_aml_video_decoder_src_event));
782 gst_pad_set_query_function (pad,
783 GST_DEBUG_FUNCPTR (gst_aml_video_decoder_src_query));
784 gst_element_add_pad (GST_ELEMENT (decoder), decoder->srcpad);
785
786 gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
787 gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
788
789 g_rec_mutex_init (&decoder->stream_lock);
790
791 decoder->priv->input_adapter = gst_adapter_new ();
792 decoder->priv->output_adapter = gst_adapter_new ();
793 decoder->priv->packetized = TRUE;
794 decoder->priv->needs_format = FALSE;
795
796 g_queue_init (&decoder->priv->frames);
797 g_queue_init (&decoder->priv->timestamps);
798
799 /* properties */
800 decoder->priv->do_qos = DEFAULT_QOS;
801 decoder->priv->max_errors = GST_AML_VIDEO_DECODER_MAX_ERRORS;
802
803 decoder->priv->min_latency = 0;
804 decoder->priv->max_latency = 0;
805
806 decoder->priv->automatic_request_sync_points =
807 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS;
808 decoder->priv->automatic_request_sync_point_flags =
809 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS;
810
xuesong.jiang0223de52024-09-13 15:33:10 +0800811 decoder->priv->local_buf_pool = DEFAULT_LOCAL_BUF_POOL;
812
le.han02c38f02024-08-16 02:35:36 +0000813 gst_aml_video_decoder_reset (decoder, TRUE, TRUE);
814}
815
816static GstAmlVideoCodecState *
817_new_input_state (GstCaps * caps)
818{
819 GstAmlVideoCodecState *state;
820 GstStructure *structure;
821 const GValue *codec_data;
822
823 state = g_slice_new0 (GstAmlVideoCodecState);
824 state->ref_count = 1;
825 gst_video_info_init (&state->info);
826 if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
827 goto parse_fail;
828 state->caps = gst_caps_ref (caps);
829
830 structure = gst_caps_get_structure (caps, 0);
831
832 codec_data = gst_structure_get_value (structure, "codec_data");
833 if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
834 state->codec_data = GST_BUFFER (g_value_dup_boxed (codec_data));
835
836 return state;
837
838parse_fail:
839 {
840 g_slice_free (GstAmlVideoCodecState, state);
841 return NULL;
842 }
843}
844
845static GstAmlVideoCodecState *
846_new_output_state (GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode,
847 guint width, guint height, GstAmlVideoCodecState * reference,
848 gboolean copy_interlace_mode)
849{
850 GstAmlVideoCodecState *state;
851
852 state = g_slice_new0 (GstAmlVideoCodecState);
853 state->ref_count = 1;
854 gst_video_info_init (&state->info);
855 if (!gst_video_info_set_interlaced_format (&state->info, fmt, interlace_mode,
856 width, height)) {
857 g_slice_free (GstAmlVideoCodecState, state);
858 return NULL;
859 }
860
861 if (reference) {
862 GstVideoInfo *tgt, *ref;
863
864 tgt = &state->info;
865 ref = &reference->info;
866
867 /* Copy over extra fields from reference state */
868 if (copy_interlace_mode)
869 tgt->interlace_mode = ref->interlace_mode;
870 tgt->flags = ref->flags;
871 tgt->chroma_site = ref->chroma_site;
872 tgt->colorimetry = ref->colorimetry;
873 GST_DEBUG ("reference par %d/%d fps %d/%d",
874 ref->par_n, ref->par_d, ref->fps_n, ref->fps_d);
875 tgt->par_n = ref->par_n;
876 tgt->par_d = ref->par_d;
877 tgt->fps_n = ref->fps_n;
878 tgt->fps_d = ref->fps_d;
879 tgt->views = ref->views;
880
881 GST_VIDEO_INFO_FIELD_ORDER (tgt) = GST_VIDEO_INFO_FIELD_ORDER (ref);
882
883 if (GST_VIDEO_INFO_MULTIVIEW_MODE (ref) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
884 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
885 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) =
886 GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
887 } else {
888 /* Default to MONO, overridden as needed by sub-classes */
889 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_MULTIVIEW_MODE_MONO;
890 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_MULTIVIEW_FLAGS_NONE;
891 }
892 }
893
894 GST_DEBUG ("reference par %d/%d fps %d/%d",
895 state->info.par_n, state->info.par_d,
896 state->info.fps_n, state->info.fps_d);
897
898 return state;
899}
900
901static gboolean
902gst_aml_video_decoder_setcaps (GstAmlVideoDecoder * decoder, GstCaps * caps)
903{
904 GstAmlVideoDecoderClass *decoder_class;
905 GstAmlVideoCodecState *state;
906 gboolean ret = TRUE;
907
908 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
909
910 GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
911
912 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
913
914 if (decoder->priv->input_state) {
915 GST_DEBUG_OBJECT (decoder,
916 "Checking if caps changed old %" GST_PTR_FORMAT " new %" GST_PTR_FORMAT,
917 decoder->priv->input_state->caps, caps);
918 if (gst_caps_is_equal (decoder->priv->input_state->caps, caps))
919 goto caps_not_changed;
920 }
921
922 state = _new_input_state (caps);
923
924 if (G_UNLIKELY (state == NULL))
925 goto parse_fail;
926
927 if (decoder_class->set_format)
928 ret = decoder_class->set_format (decoder, state);
929
930 if (!ret)
931 goto refused_format;
932
933 if (decoder->priv->input_state)
934 gst_aml_video_codec_state_unref (decoder->priv->input_state);
935 decoder->priv->input_state = state;
936
937 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
938
939 return ret;
940
941caps_not_changed:
942 {
943 GST_DEBUG_OBJECT (decoder, "Caps did not change - ignore");
944 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
945 return TRUE;
946 }
947
948 /* ERRORS */
949parse_fail:
950 {
951 GST_WARNING_OBJECT (decoder, "Failed to parse caps");
952 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
953 return FALSE;
954 }
955
956refused_format:
957 {
958 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
959 GST_WARNING_OBJECT (decoder, "Subclass refused caps");
960 gst_aml_video_codec_state_unref (state);
961 return FALSE;
962 }
963}
964
965static void
966gst_aml_video_decoder_finalize (GObject * object)
967{
968 GstAmlVideoDecoder *decoder;
969
970 decoder = GST_AML_VIDEO_DECODER (object);
971
972 GST_DEBUG_OBJECT (object, "finalize");
973
974 g_rec_mutex_clear (&decoder->stream_lock);
975
976 if (decoder->priv->input_adapter) {
977 g_object_unref (decoder->priv->input_adapter);
978 decoder->priv->input_adapter = NULL;
979 }
980 if (decoder->priv->output_adapter) {
981 g_object_unref (decoder->priv->output_adapter);
982 decoder->priv->output_adapter = NULL;
983 }
984
985 if (decoder->priv->input_state)
986 gst_aml_video_codec_state_unref (decoder->priv->input_state);
987 if (decoder->priv->output_state)
988 gst_aml_video_codec_state_unref (decoder->priv->output_state);
989
990 if (decoder->priv->pool) {
991 gst_object_unref (decoder->priv->pool);
992 decoder->priv->pool = NULL;
993 }
994
995 if (decoder->priv->allocator) {
996 gst_object_unref (decoder->priv->allocator);
997 decoder->priv->allocator = NULL;
998 }
999
1000 G_OBJECT_CLASS (parent_class)->finalize (object);
1001}
1002
1003static void
1004gst_aml_video_decoder_get_property (GObject * object, guint property_id,
1005 GValue * value, GParamSpec * pspec)
1006{
1007 GstAmlVideoDecoder *dec = GST_AML_VIDEO_DECODER (object);
1008 GstAmlVideoDecoderPrivate *priv = dec->priv;
1009
1010 switch (property_id) {
1011 case PROP_QOS:
1012 g_value_set_boolean (value, priv->do_qos);
1013 break;
1014 case PROP_MAX_ERRORS:
1015 g_value_set_int (value, gst_aml_video_decoder_get_max_errors (dec));
1016 break;
1017 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
1018 g_value_set_uint64 (value, priv->min_force_key_unit_interval);
1019 break;
1020 case PROP_DISCARD_CORRUPTED_FRAMES:
1021 g_value_set_boolean (value, priv->discard_corrupted_frames);
1022 break;
1023 case PROP_AUTOMATIC_REQUEST_SYNC_POINTS:
1024 g_value_set_boolean (value, priv->automatic_request_sync_points);
1025 break;
le.han44bdbd82024-08-20 07:36:50 +00001026#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00001027 case PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS:
1028 g_value_set_flags (value, priv->automatic_request_sync_point_flags);
1029 break;
le.han44bdbd82024-08-20 07:36:50 +00001030#endif
xuesong.jiang0223de52024-09-13 15:33:10 +08001031 case PROP_LOCAL_BUF_POOL:
1032 g_value_set_boolean (value, priv->local_buf_pool);
1033 break;
le.han02c38f02024-08-16 02:35:36 +00001034 default:
1035 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
1036 break;
1037 }
1038}
1039
1040static void
1041gst_aml_video_decoder_set_property (GObject * object, guint property_id,
1042 const GValue * value, GParamSpec * pspec)
1043{
1044 GstAmlVideoDecoder *dec = GST_AML_VIDEO_DECODER (object);
1045 GstAmlVideoDecoderPrivate *priv = dec->priv;
1046
1047 switch (property_id) {
1048 case PROP_QOS:
1049 priv->do_qos = g_value_get_boolean (value);
1050 break;
1051 case PROP_MAX_ERRORS:
1052 gst_aml_video_decoder_set_max_errors (dec, g_value_get_int (value));
1053 break;
1054 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
1055 priv->min_force_key_unit_interval = g_value_get_uint64 (value);
1056 break;
1057 case PROP_DISCARD_CORRUPTED_FRAMES:
1058 priv->discard_corrupted_frames = g_value_get_boolean (value);
1059 break;
1060 case PROP_AUTOMATIC_REQUEST_SYNC_POINTS:
1061 priv->automatic_request_sync_points = g_value_get_boolean (value);
1062 break;
le.han44bdbd82024-08-20 07:36:50 +00001063#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00001064 case PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS:
1065 priv->automatic_request_sync_point_flags = g_value_get_flags (value);
1066 break;
le.han44bdbd82024-08-20 07:36:50 +00001067#endif
xuesong.jiang0223de52024-09-13 15:33:10 +08001068 case PROP_LOCAL_BUF_POOL:
1069 priv->local_buf_pool = g_value_get_boolean (value);
1070 break;
le.han02c38f02024-08-16 02:35:36 +00001071 default:
1072 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
1073 break;
1074 }
1075}
1076
1077/* hard == FLUSH, otherwise discont */
1078static GstFlowReturn
1079gst_aml_video_decoder_flush (GstAmlVideoDecoder * dec, gboolean hard)
1080{
1081 GstAmlVideoDecoderClass *klass = GST_AML_VIDEO_DECODER_GET_CLASS (dec);
1082 GstFlowReturn ret = GST_FLOW_OK;
1083
1084 GST_LOG_OBJECT (dec, "flush hard %d", hard);
1085
1086 /* Inform subclass */
1087 if (klass->reset) {
1088 GST_FIXME_OBJECT (dec, "GstAmlVideoDecoder::reset() is deprecated");
1089 klass->reset (dec, hard);
1090 }
1091
1092 if (klass->flush)
1093 klass->flush (dec);
1094
1095 /* and get (re)set for the sequel */
1096 gst_aml_video_decoder_reset (dec, FALSE, hard);
1097
1098 return ret;
1099}
1100
1101static GstEvent *
1102gst_aml_video_decoder_create_merged_tags_event (GstAmlVideoDecoder * dec)
1103{
1104 GstTagList *merged_tags;
1105
1106 GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
1107 GST_LOG_OBJECT (dec, "decoder : %" GST_PTR_FORMAT, dec->priv->tags);
1108 GST_LOG_OBJECT (dec, "mode : %d", dec->priv->tags_merge_mode);
1109
1110 merged_tags =
1111 gst_tag_list_merge (dec->priv->upstream_tags, dec->priv->tags,
1112 dec->priv->tags_merge_mode);
1113
1114 GST_DEBUG_OBJECT (dec, "merged : %" GST_PTR_FORMAT, merged_tags);
1115
1116 if (merged_tags == NULL)
1117 return NULL;
1118
1119 if (gst_tag_list_is_empty (merged_tags)) {
1120 gst_tag_list_unref (merged_tags);
1121 return NULL;
1122 }
1123
1124 return gst_event_new_tag (merged_tags);
1125}
1126
1127static gboolean
1128gst_aml_video_decoder_push_event (GstAmlVideoDecoder * decoder, GstEvent * event)
1129{
1130 switch (GST_EVENT_TYPE (event)) {
1131 case GST_EVENT_SEGMENT:
1132 {
1133 GstSegment segment;
1134
1135 gst_event_copy_segment (event, &segment);
1136
1137 GST_DEBUG_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
1138
1139 if (segment.format != GST_FORMAT_TIME) {
1140 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1141 break;
1142 }
1143
1144 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1145 decoder->output_segment = segment;
1146 decoder->priv->in_out_segment_sync =
1147 gst_segment_is_equal (&decoder->input_segment, &segment);
1148 decoder->priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
1149 decoder->priv->earliest_time = GST_CLOCK_TIME_NONE;
1150 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1151 break;
1152 }
1153 default:
1154 break;
1155 }
1156
1157 GST_DEBUG_OBJECT (decoder, "pushing event %s",
1158 gst_event_type_get_name (GST_EVENT_TYPE (event)));
1159
1160 return gst_pad_push_event (decoder->srcpad, event);
1161}
1162
1163static GstFlowReturn
1164gst_aml_video_decoder_parse_available (GstAmlVideoDecoder * dec, gboolean at_eos,
1165 gboolean new_buffer)
1166{
1167 GstAmlVideoDecoderClass *decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (dec);
1168 GstAmlVideoDecoderPrivate *priv = dec->priv;
1169 GstFlowReturn ret = GST_FLOW_OK;
1170 gsize was_available, available;
1171 guint inactive = 0;
1172
1173 available = gst_adapter_available (priv->input_adapter);
1174
1175 while (available || new_buffer) {
1176 new_buffer = FALSE;
1177 /* current frame may have been parsed and handled,
1178 * so we need to set up a new one when asking subclass to parse */
1179 if (priv->current_frame == NULL)
1180 priv->current_frame = gst_aml_video_decoder_new_frame (dec);
1181
1182 was_available = available;
1183 ret = decoder_class->parse (dec, priv->current_frame,
1184 priv->input_adapter, at_eos);
1185 if (ret != GST_FLOW_OK)
1186 break;
1187
1188 /* if the subclass returned success (GST_FLOW_OK), it is expected
1189 * to have collected and submitted a frame, i.e. it should have
1190 * called gst_aml_video_decoder_have_frame(), or at least consumed a
1191 * few bytes through gst_aml_video_decoder_add_to_frame().
1192 *
1193 * Otherwise, this is an implementation bug, and we error out
1194 * after 2 failed attempts */
1195 available = gst_adapter_available (priv->input_adapter);
1196 if (!priv->current_frame || available != was_available)
1197 inactive = 0;
1198 else if (++inactive == 2)
1199 goto error_inactive;
1200 }
1201
1202 return ret;
1203
1204 /* ERRORS */
1205error_inactive:
1206 {
1207 GST_ERROR_OBJECT (dec, "Failed to consume data. Error in subclass?");
1208 return GST_FLOW_ERROR;
1209 }
1210}
1211
1212/* This function has to be called with the stream lock taken. */
1213static GstFlowReturn
1214gst_aml_video_decoder_drain_out (GstAmlVideoDecoder * dec, gboolean at_eos)
1215{
1216 GstAmlVideoDecoderClass *decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (dec);
1217 GstAmlVideoDecoderPrivate *priv = dec->priv;
1218 GstFlowReturn ret = GST_FLOW_OK;
1219
1220 if (dec->input_segment.rate > 0.0) {
1221 /* Forward mode, if unpacketized, give the child class
1222 * a final chance to flush out packets */
1223 if (!priv->packetized) {
1224 ret = gst_aml_video_decoder_parse_available (dec, TRUE, FALSE);
1225 }
1226
1227 if (at_eos) {
1228 if (decoder_class->finish)
1229 ret = decoder_class->finish (dec);
1230 } else {
1231 if (decoder_class->drain) {
1232 ret = decoder_class->drain (dec);
1233 } else {
1234 GST_FIXME_OBJECT (dec, "Sub-class should implement drain()");
1235 }
1236 }
1237 } else {
1238 /* Reverse playback mode */
1239 ret = gst_aml_video_decoder_flush_parse (dec, TRUE);
1240 }
1241
1242 return ret;
1243}
1244
1245static GList *
1246_flush_events (GstPad * pad, GList * events)
1247{
1248 GList *tmp;
1249
1250 for (tmp = events; tmp; tmp = tmp->next) {
1251 if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
1252 GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
1253 GST_EVENT_IS_STICKY (tmp->data)) {
1254 gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
1255 }
1256 gst_event_unref (tmp->data);
1257 }
1258 g_list_free (events);
1259
1260 return NULL;
1261}
1262
1263/* Must be called holding the GST_AML_VIDEO_DECODER_STREAM_LOCK */
1264static gboolean
1265gst_aml_video_decoder_negotiate_default_caps (GstAmlVideoDecoder * decoder)
1266{
1267 GstCaps *caps, *templcaps;
1268 GstAmlVideoCodecState *state;
1269 GstVideoInfo info;
1270 gint i;
1271 gint caps_size;
1272 GstStructure *structure;
1273
1274 templcaps = gst_pad_get_pad_template_caps (decoder->srcpad);
1275 caps = gst_pad_peer_query_caps (decoder->srcpad, templcaps);
1276 if (caps)
1277 gst_caps_unref (templcaps);
1278 else
1279 caps = templcaps;
1280 templcaps = NULL;
1281
1282 if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
1283 goto caps_error;
1284
1285 GST_LOG_OBJECT (decoder, "peer caps %" GST_PTR_FORMAT, caps);
1286
1287 /* before fixating, try to use whatever upstream provided */
1288 caps = gst_caps_make_writable (caps);
1289 caps_size = gst_caps_get_size (caps);
1290 if (decoder->priv->input_state && decoder->priv->input_state->caps) {
1291 GstCaps *sinkcaps = decoder->priv->input_state->caps;
1292 GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
1293 gint width, height;
1294
1295 if (gst_structure_get_int (structure, "width", &width)) {
1296 for (i = 0; i < caps_size; i++) {
1297 gst_structure_set (gst_caps_get_structure (caps, i), "width",
1298 G_TYPE_INT, width, NULL);
1299 }
1300 }
1301
1302 if (gst_structure_get_int (structure, "height", &height)) {
1303 for (i = 0; i < caps_size; i++) {
1304 gst_structure_set (gst_caps_get_structure (caps, i), "height",
1305 G_TYPE_INT, height, NULL);
1306 }
1307 }
1308 }
1309
1310 for (i = 0; i < caps_size; i++) {
1311 structure = gst_caps_get_structure (caps, i);
1312 /* Random I420 1280x720 for fixation */
1313 if (gst_structure_has_field (structure, "format"))
1314 gst_structure_fixate_field_string (structure, "format", "I420");
1315 else
1316 gst_structure_set (structure, "format", G_TYPE_STRING, "I420", NULL);
1317
1318 if (gst_structure_has_field (structure, "width"))
1319 gst_structure_fixate_field_nearest_int (structure, "width", 1280);
1320 else
1321 gst_structure_set (structure, "width", G_TYPE_INT, 1280, NULL);
1322
1323 if (gst_structure_has_field (structure, "height"))
1324 gst_structure_fixate_field_nearest_int (structure, "height", 720);
1325 else
1326 gst_structure_set (structure, "height", G_TYPE_INT, 720, NULL);
1327 }
1328 caps = gst_caps_fixate (caps);
1329
1330 if (!caps || !gst_video_info_from_caps (&info, caps))
1331 goto caps_error;
1332
1333 GST_INFO_OBJECT (decoder,
1334 "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
1335 state =
1336 gst_aml_video_decoder_set_output_state (decoder, info.finfo->format,
1337 info.width, info.height, decoder->priv->input_state);
1338 gst_aml_video_codec_state_unref (state);
1339 gst_caps_unref (caps);
1340
1341 return TRUE;
1342
1343caps_error:
1344 {
1345 if (caps)
1346 gst_caps_unref (caps);
1347 return FALSE;
1348 }
1349}
1350
1351static gboolean
1352gst_aml_video_decoder_handle_missing_data_default (GstAmlVideoDecoder *
1353 decoder, GstClockTime timestamp, GstClockTime duration)
1354{
1355 GstAmlVideoDecoderPrivate *priv;
1356
1357 priv = decoder->priv;
1358
1359 if (priv->automatic_request_sync_points) {
1360 GstClockTime deadline =
1361 gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
1362 timestamp);
1363
1364 GST_DEBUG_OBJECT (decoder,
1365 "Requesting sync point for missing data at running time %"
1366 GST_TIME_FORMAT " timestamp %" GST_TIME_FORMAT " with duration %"
1367 GST_TIME_FORMAT, GST_TIME_ARGS (deadline), GST_TIME_ARGS (timestamp),
1368 GST_TIME_ARGS (duration));
1369
1370 gst_aml_video_decoder_request_sync_point_internal (decoder, deadline,
1371 priv->automatic_request_sync_point_flags);
1372 }
1373
1374 return TRUE;
1375}
1376
1377static gboolean
1378gst_aml_video_decoder_sink_event_default (GstAmlVideoDecoder * decoder,
1379 GstEvent * event)
1380{
1381 GstAmlVideoDecoderClass *decoder_class;
1382 GstAmlVideoDecoderPrivate *priv;
1383 gboolean ret = FALSE;
1384 gboolean forward_immediate = FALSE;
1385
1386 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
1387
1388 priv = decoder->priv;
1389
1390 switch (GST_EVENT_TYPE (event)) {
1391 case GST_EVENT_STREAM_START:
1392 {
1393 GstFlowReturn flow_ret = GST_FLOW_OK;
1394
1395 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1396 flow_ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
1397 ret = (flow_ret == GST_FLOW_OK);
1398
1399 GST_DEBUG_OBJECT (decoder, "received STREAM_START. Clearing taglist");
1400 /* Flush upstream tags after a STREAM_START */
1401 if (priv->upstream_tags) {
1402 gst_tag_list_unref (priv->upstream_tags);
1403 priv->upstream_tags = NULL;
1404 priv->tags_changed = TRUE;
1405 }
1406 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1407
1408 /* Forward STREAM_START immediately. Everything is drained after
1409 * the STREAM_START event and we can forward this event immediately
1410 * now without having buffers out of order.
1411 */
1412 forward_immediate = TRUE;
1413 break;
1414 }
1415 case GST_EVENT_CAPS:
1416 {
1417 GstCaps *caps;
1418
1419 gst_event_parse_caps (event, &caps);
1420 ret = gst_aml_video_decoder_setcaps (decoder, caps);
1421 gst_event_unref (event);
1422 event = NULL;
1423 break;
1424 }
1425 case GST_EVENT_SEGMENT_DONE:
1426 {
1427 GstFlowReturn flow_ret = GST_FLOW_OK;
1428
1429 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1430 flow_ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
1431 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1432 ret = (flow_ret == GST_FLOW_OK);
1433
1434 /* Forward SEGMENT_DONE immediately. This is required
1435 * because no buffer or serialized event might come
1436 * after SEGMENT_DONE and nothing could trigger another
1437 * _finish_frame() call.
1438 *
1439 * The subclass can override this behaviour by overriding
1440 * the ::sink_event() vfunc and not chaining up to the
1441 * parent class' ::sink_event() until a later time.
1442 */
1443 forward_immediate = TRUE;
1444 break;
1445 }
1446 case GST_EVENT_EOS:
1447 {
1448 GstFlowReturn flow_ret = GST_FLOW_OK;
1449
1450 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1451 flow_ret = gst_aml_video_decoder_drain_out (decoder, TRUE);
1452 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1453 ret = (flow_ret == GST_FLOW_OK);
1454
1455 /* Error out even if EOS was ok when we had input, but no output */
1456 if (ret && priv->had_input_data && !priv->had_output_data) {
fei.dengaaaa5252024-12-19 14:08:27 +08001457 GST_WARNING_OBJECT (decoder, "No valid frames decoded before end of stream");
le.han02c38f02024-08-16 02:35:36 +00001458 }
1459
1460 /* Forward EOS immediately. This is required because no
1461 * buffer or serialized event will come after EOS and
1462 * nothing could trigger another _finish_frame() call.
1463 *
1464 * The subclass can override this behaviour by overriding
1465 * the ::sink_event() vfunc and not chaining up to the
1466 * parent class' ::sink_event() until a later time.
1467 */
1468 forward_immediate = TRUE;
1469 break;
1470 }
1471 case GST_EVENT_GAP:
1472 {
le.han44bdbd82024-08-20 07:36:50 +00001473#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00001474 GstClockTime timestamp, duration;
1475 GstGapFlags gap_flags = 0;
le.han44bdbd82024-08-20 07:36:50 +00001476#endif
le.han02c38f02024-08-16 02:35:36 +00001477 GstFlowReturn flow_ret = GST_FLOW_OK;
1478 gboolean needs_reconfigure = FALSE;
1479 GList *events;
1480 GList *frame_events;
1481
le.han44bdbd82024-08-20 07:36:50 +00001482#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00001483 gst_event_parse_gap (event, &timestamp, &duration);
1484 gst_event_parse_gap_flags (event, &gap_flags);
le.han44bdbd82024-08-20 07:36:50 +00001485#endif
le.han02c38f02024-08-16 02:35:36 +00001486
1487 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1488 /* If this is not missing data, or the subclass does not handle it
1489 * specifically, then drain out the decoder and forward the event
1490 * directly. */
le.han44bdbd82024-08-20 07:36:50 +00001491#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00001492 if ((gap_flags & GST_GAP_FLAG_MISSING_DATA) == 0
1493 || !decoder_class->handle_missing_data
1494 || decoder_class->handle_missing_data (decoder, timestamp,
1495 duration)) {
le.han44bdbd82024-08-20 07:36:50 +00001496#endif
le.han02c38f02024-08-16 02:35:36 +00001497 if (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)
1498 flow_ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
1499 ret = (flow_ret == GST_FLOW_OK);
1500
1501 /* Ensure we have caps before forwarding the event */
1502 if (!decoder->priv->output_state) {
1503 if (!gst_aml_video_decoder_negotiate_default_caps (decoder)) {
1504 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1505 GST_ELEMENT_ERROR (decoder, STREAM, FORMAT, (NULL),
1506 ("Decoder output not negotiated before GAP event."));
1507 forward_immediate = TRUE;
1508 break;
1509 }
1510 needs_reconfigure = TRUE;
1511 }
1512
1513 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad)
1514 || needs_reconfigure;
1515 if (decoder->priv->output_state_changed || needs_reconfigure) {
1516 if (!gst_aml_video_decoder_negotiate_unlocked (decoder)) {
1517 GST_WARNING_OBJECT (decoder, "Failed to negotiate with downstream");
1518 gst_pad_mark_reconfigure (decoder->srcpad);
1519 }
1520 }
1521
1522 GST_DEBUG_OBJECT (decoder, "Pushing all pending serialized events"
1523 " before the gap");
1524 events = decoder->priv->pending_events;
1525 frame_events = decoder->priv->current_frame_events;
1526 decoder->priv->pending_events = NULL;
1527 decoder->priv->current_frame_events = NULL;
1528
1529 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1530
1531 gst_aml_video_decoder_push_event_list (decoder, events);
1532 gst_aml_video_decoder_push_event_list (decoder, frame_events);
1533
1534 /* Forward GAP immediately. Everything is drained after
1535 * the GAP event and we can forward this event immediately
1536 * now without having buffers out of order.
1537 */
1538 forward_immediate = TRUE;
le.han44bdbd82024-08-20 07:36:50 +00001539#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00001540 } else {
1541 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1542 gst_clear_event (&event);
1543 }
le.han44bdbd82024-08-20 07:36:50 +00001544#endif
le.han02c38f02024-08-16 02:35:36 +00001545 break;
1546 }
1547 case GST_EVENT_CUSTOM_DOWNSTREAM:
1548 {
1549 gboolean in_still;
1550 GstFlowReturn flow_ret = GST_FLOW_OK;
1551
1552 if (gst_video_event_parse_still_frame (event, &in_still)) {
1553 if (in_still) {
1554 GST_DEBUG_OBJECT (decoder, "draining current data for still-frame");
1555 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1556 flow_ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
1557 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1558 ret = (flow_ret == GST_FLOW_OK);
1559 }
1560 /* Forward STILL_FRAME immediately. Everything is drained after
1561 * the STILL_FRAME event and we can forward this event immediately
1562 * now without having buffers out of order.
1563 */
1564 forward_immediate = TRUE;
1565 }
1566 break;
1567 }
1568 case GST_EVENT_SEGMENT:
1569 {
1570 GstSegment segment;
1571
1572 gst_event_copy_segment (event, &segment);
1573
1574 if (segment.format == GST_FORMAT_TIME) {
1575 GST_DEBUG_OBJECT (decoder,
1576 "received TIME SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1577 } else {
1578 gint64 start;
1579
1580 GST_DEBUG_OBJECT (decoder,
1581 "received SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1582
1583 /* handle newsegment as a result from our legacy simple seeking */
1584 /* note that initial 0 should convert to 0 in any case */
1585 if (priv->do_estimate_rate &&
1586 gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES,
1587 segment.start, GST_FORMAT_TIME, &start)) {
1588 /* best attempt convert */
1589 /* as these are only estimates, stop is kept open-ended to avoid
1590 * premature cutting */
1591 GST_DEBUG_OBJECT (decoder,
1592 "converted to TIME start %" GST_TIME_FORMAT,
1593 GST_TIME_ARGS (start));
1594 segment.start = start;
1595 segment.stop = GST_CLOCK_TIME_NONE;
1596 segment.time = start;
1597 /* replace event */
1598 gst_event_unref (event);
1599 event = gst_event_new_segment (&segment);
1600 } else {
1601 goto newseg_wrong_format;
1602 }
1603 }
1604
1605 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1606
1607 /* Update the decode flags in the segment if we have an instant-rate
1608 * override active */
1609 GST_OBJECT_LOCK (decoder);
le.han44bdbd82024-08-20 07:36:50 +00001610#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00001611 if (!priv->decode_flags_override)
1612 priv->decode_flags = segment.flags;
1613 else {
1614 segment.flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1615 segment.flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1616 }
le.han44bdbd82024-08-20 07:36:50 +00001617#endif
le.han02c38f02024-08-16 02:35:36 +00001618
1619 priv->base_timestamp = GST_CLOCK_TIME_NONE;
1620 priv->base_picture_number = 0;
1621
1622 decoder->input_segment = segment;
1623 decoder->priv->in_out_segment_sync = FALSE;
1624
1625 GST_OBJECT_UNLOCK (decoder);
1626 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1627
1628 break;
1629 }
le.han44bdbd82024-08-20 07:36:50 +00001630#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00001631 case GST_EVENT_INSTANT_RATE_CHANGE:
1632 {
1633 GstSegmentFlags flags;
1634 GstSegment *seg;
1635
1636 gst_event_parse_instant_rate_change (event, NULL, &flags);
1637
1638 GST_OBJECT_LOCK (decoder);
1639 priv->decode_flags_override = TRUE;
1640 priv->decode_flags = flags;
1641
1642 /* Update the input segment flags */
1643 seg = &decoder->input_segment;
1644 seg->flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1645 seg->flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1646 GST_OBJECT_UNLOCK (decoder);
1647 break;
1648 }
le.han44bdbd82024-08-20 07:36:50 +00001649#endif
le.han02c38f02024-08-16 02:35:36 +00001650 case GST_EVENT_FLUSH_STOP:
1651 {
1652 GList *l;
1653
1654 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1655 for (l = priv->frames.head; l; l = l->next) {
1656 GstAmlVideoCodecFrame *frame = l->data;
1657
1658 frame->events = _flush_events (decoder->srcpad, frame->events);
1659 }
1660 priv->current_frame_events = _flush_events (decoder->srcpad,
1661 decoder->priv->current_frame_events);
1662
1663 /* well, this is kind of worse than a DISCONT */
1664 gst_aml_video_decoder_flush (decoder, TRUE);
1665 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1666 /* Forward FLUSH_STOP immediately. This is required because it is
1667 * expected to be forwarded immediately and no buffers are queued
1668 * anyway.
1669 */
1670 forward_immediate = TRUE;
1671 break;
1672 }
1673 case GST_EVENT_TAG:
1674 {
1675 GstTagList *tags;
1676
1677 gst_event_parse_tag (event, &tags);
1678
1679 if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
1680 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1681 if (priv->upstream_tags != tags) {
1682 if (priv->upstream_tags)
1683 gst_tag_list_unref (priv->upstream_tags);
1684 priv->upstream_tags = gst_tag_list_ref (tags);
1685 GST_INFO_OBJECT (decoder, "upstream tags: %" GST_PTR_FORMAT, tags);
1686 }
1687 gst_event_unref (event);
1688 event = gst_aml_video_decoder_create_merged_tags_event (decoder);
1689 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1690 if (!event)
1691 ret = TRUE;
1692 }
1693 break;
1694 }
1695 default:
1696 break;
1697 }
1698
1699 /* Forward non-serialized events immediately, and all other
1700 * events which can be forwarded immediately without potentially
1701 * causing the event to go out of order with other events and
1702 * buffers as decided above.
1703 */
1704 if (event) {
1705 if (!GST_EVENT_IS_SERIALIZED (event) || forward_immediate) {
1706 ret = gst_aml_video_decoder_push_event (decoder, event);
1707 } else {
1708 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1709 decoder->priv->current_frame_events =
1710 g_list_prepend (decoder->priv->current_frame_events, event);
1711 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1712 ret = TRUE;
1713 }
1714 }
1715
1716 return ret;
1717
1718newseg_wrong_format:
1719 {
1720 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1721 gst_event_unref (event);
1722 /* SWALLOW EVENT */
1723 return TRUE;
1724 }
1725}
1726
1727static gboolean
1728gst_aml_video_decoder_sink_event (GstPad * pad, GstObject * parent,
1729 GstEvent * event)
1730{
1731 GstAmlVideoDecoder *decoder;
1732 GstAmlVideoDecoderClass *decoder_class;
1733 gboolean ret = FALSE;
1734
1735 decoder = GST_AML_VIDEO_DECODER (parent);
1736 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
1737
1738 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1739 GST_EVENT_TYPE_NAME (event));
1740
1741 if (decoder_class->sink_event)
1742 ret = decoder_class->sink_event (decoder, event);
1743
1744 return ret;
1745}
1746
1747/* perform upstream byte <-> time conversion (duration, seeking)
1748 * if subclass allows and if enough data for moderately decent conversion */
1749static inline gboolean
1750gst_aml_video_decoder_do_byte (GstAmlVideoDecoder * dec)
1751{
1752 gboolean ret;
1753
1754 GST_OBJECT_LOCK (dec);
1755 ret = dec->priv->do_estimate_rate && (dec->priv->bytes_out > 0)
1756 && (dec->priv->time > GST_SECOND);
1757 GST_OBJECT_UNLOCK (dec);
1758
1759 return ret;
1760}
1761
1762static gboolean
1763gst_aml_video_decoder_do_seek (GstAmlVideoDecoder * dec, GstEvent * event)
1764{
1765 GstFormat format;
1766 GstSeekFlags flags;
1767 GstSeekType start_type, end_type;
1768 gdouble rate;
1769 gint64 start, start_time, end_time;
1770 GstSegment seek_segment;
1771 guint32 seqnum;
1772
1773 gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
1774 &start_time, &end_type, &end_time);
1775
1776 /* we'll handle plain open-ended flushing seeks with the simple approach */
1777 if (rate != 1.0) {
1778 GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
1779 return FALSE;
1780 }
1781
1782 if (start_type != GST_SEEK_TYPE_SET) {
1783 GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
1784 return FALSE;
1785 }
1786
1787 if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
1788 (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
1789 GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
1790 return FALSE;
1791 }
1792
1793 if (!(flags & GST_SEEK_FLAG_FLUSH)) {
1794 GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
1795 return FALSE;
1796 }
1797
1798 memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
1799 gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
1800 start_time, end_type, end_time, NULL);
1801 start_time = seek_segment.position;
1802
1803 if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
1804 GST_FORMAT_BYTES, &start)) {
1805 GST_DEBUG_OBJECT (dec, "conversion failed");
1806 return FALSE;
1807 }
1808
1809 seqnum = gst_event_get_seqnum (event);
1810 event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
1811 GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
1812 gst_event_set_seqnum (event, seqnum);
1813
1814 GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
1815 G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
1816
1817 return gst_pad_push_event (dec->sinkpad, event);
1818}
1819
1820static gboolean
1821gst_aml_video_decoder_src_event_default (GstAmlVideoDecoder * decoder,
1822 GstEvent * event)
1823{
1824 GstAmlVideoDecoderPrivate *priv;
1825 gboolean res = FALSE;
1826
1827 priv = decoder->priv;
1828
1829 GST_DEBUG_OBJECT (decoder,
1830 "received event %d, %s", GST_EVENT_TYPE (event),
1831 GST_EVENT_TYPE_NAME (event));
1832
1833 switch (GST_EVENT_TYPE (event)) {
1834 case GST_EVENT_SEEK:
1835 {
1836 GstFormat format;
1837 gdouble rate;
1838 GstSeekFlags flags;
1839 GstSeekType start_type, stop_type;
1840 gint64 start, stop;
1841 gint64 tstart, tstop;
1842 guint32 seqnum;
1843
1844 gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
1845 &stop_type, &stop);
1846 seqnum = gst_event_get_seqnum (event);
1847
1848 /* upstream gets a chance first */
1849 if ((res = gst_pad_push_event (decoder->sinkpad, event)))
1850 break;
1851
1852 /* if upstream fails for a time seek, maybe we can help if allowed */
1853 if (format == GST_FORMAT_TIME) {
1854 if (gst_aml_video_decoder_do_byte (decoder))
1855 res = gst_aml_video_decoder_do_seek (decoder, event);
1856 break;
1857 }
1858
1859 /* ... though a non-time seek can be aided as well */
1860 /* First bring the requested format to time */
1861 if (!(res =
1862 gst_pad_query_convert (decoder->srcpad, format, start,
1863 GST_FORMAT_TIME, &tstart)))
1864 goto convert_error;
1865 if (!(res =
1866 gst_pad_query_convert (decoder->srcpad, format, stop,
1867 GST_FORMAT_TIME, &tstop)))
1868 goto convert_error;
1869
1870 /* then seek with time on the peer */
1871 event = gst_event_new_seek (rate, GST_FORMAT_TIME,
1872 flags, start_type, tstart, stop_type, tstop);
1873 gst_event_set_seqnum (event, seqnum);
1874
1875 res = gst_pad_push_event (decoder->sinkpad, event);
1876 break;
1877 }
1878 case GST_EVENT_QOS:
1879 {
1880 GstQOSType type;
1881 gdouble proportion;
1882 GstClockTimeDiff diff;
1883 GstClockTime timestamp;
1884
1885 gst_event_parse_qos (event, &type, &proportion, &diff, &timestamp);
1886
1887 GST_OBJECT_LOCK (decoder);
1888 priv->proportion = proportion;
1889 if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {
1890 if (G_UNLIKELY (diff > 0)) {
1891 priv->earliest_time = timestamp + 2 * diff + priv->qos_frame_duration;
1892 } else {
1893 priv->earliest_time = timestamp + diff;
1894 }
1895 } else {
1896 priv->earliest_time = GST_CLOCK_TIME_NONE;
1897 }
1898 GST_OBJECT_UNLOCK (decoder);
1899
1900 GST_DEBUG_OBJECT (decoder,
1901 "got QoS %" GST_TIME_FORMAT ", %" GST_STIME_FORMAT ", %g",
1902 GST_TIME_ARGS (timestamp), GST_STIME_ARGS (diff), proportion);
1903
1904 res = gst_pad_push_event (decoder->sinkpad, event);
1905 break;
1906 }
1907 default:
1908 res = gst_pad_push_event (decoder->sinkpad, event);
1909 break;
1910 }
1911done:
1912 return res;
1913
1914convert_error:
1915 GST_DEBUG_OBJECT (decoder, "could not convert format");
1916 goto done;
1917}
1918
1919static gboolean
1920gst_aml_video_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
1921{
1922 GstAmlVideoDecoder *decoder;
1923 GstAmlVideoDecoderClass *decoder_class;
1924 gboolean ret = FALSE;
1925
1926 decoder = GST_AML_VIDEO_DECODER (parent);
1927 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
1928
1929 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1930 GST_EVENT_TYPE_NAME (event));
1931
1932 if (decoder_class->src_event)
1933 ret = decoder_class->src_event (decoder, event);
1934
1935 return ret;
1936}
1937
1938static gboolean
1939gst_aml_video_decoder_src_query_default (GstAmlVideoDecoder * dec, GstQuery * query)
1940{
1941 GstPad *pad = GST_AML_VIDEO_DECODER_SRC_PAD (dec);
1942 gboolean res = TRUE;
1943
1944 GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
1945
1946 switch (GST_QUERY_TYPE (query)) {
1947 case GST_QUERY_POSITION:
1948 {
1949 GstFormat format;
1950 gint64 time, value;
1951
1952 /* upstream gets a chance first */
1953 if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
1954 GST_LOG_OBJECT (dec, "returning peer response");
1955 break;
1956 }
1957
1958 /* Refuse BYTES format queries. If it made sense to
1959 * answer them, upstream would have already */
1960 gst_query_parse_position (query, &format, NULL);
1961
1962 if (format == GST_FORMAT_BYTES) {
1963 GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
1964 break;
1965 }
1966
1967 /* we start from the last seen time */
1968 time = dec->priv->last_timestamp_out;
1969 /* correct for the segment values */
1970 time = gst_segment_to_stream_time (&dec->output_segment,
1971 GST_FORMAT_TIME, time);
1972
1973 GST_LOG_OBJECT (dec,
1974 "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
1975
1976 /* and convert to the final format */
1977 if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
1978 format, &value)))
1979 break;
1980
1981 gst_query_set_position (query, format, value);
1982
1983 GST_LOG_OBJECT (dec,
1984 "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
1985 format);
1986 break;
1987 }
1988 case GST_QUERY_DURATION:
1989 {
1990 GstFormat format;
1991
1992 /* upstream in any case */
1993 if ((res = gst_pad_query_default (pad, GST_OBJECT (dec), query)))
1994 break;
1995
1996 gst_query_parse_duration (query, &format, NULL);
1997 /* try answering TIME by converting from BYTE if subclass allows */
1998 if (format == GST_FORMAT_TIME && gst_aml_video_decoder_do_byte (dec)) {
1999 gint64 value;
2000
2001 if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
2002 &value)) {
2003 GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
2004 if (gst_pad_query_convert (dec->sinkpad,
2005 GST_FORMAT_BYTES, value, GST_FORMAT_TIME, &value)) {
2006 gst_query_set_duration (query, GST_FORMAT_TIME, value);
2007 res = TRUE;
2008 }
2009 }
2010 }
2011 break;
2012 }
2013 case GST_QUERY_CONVERT:
2014 {
2015 GstFormat src_fmt, dest_fmt;
2016 gint64 src_val, dest_val;
2017
2018 GST_DEBUG_OBJECT (dec, "convert query");
2019
2020 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2021 GST_OBJECT_LOCK (dec);
2022 if (dec->priv->output_state != NULL)
2023 res = __gst_aml_video_rawvideo_convert (dec->priv->output_state,
2024 src_fmt, src_val, &dest_fmt, &dest_val);
2025 else
2026 res = FALSE;
2027 GST_OBJECT_UNLOCK (dec);
2028 if (!res)
2029 goto error;
2030 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2031 break;
2032 }
2033 case GST_QUERY_LATENCY:
2034 {
2035 gboolean live;
2036 GstClockTime min_latency, max_latency;
2037
2038 res = gst_pad_peer_query (dec->sinkpad, query);
2039 if (res) {
2040 gst_query_parse_latency (query, &live, &min_latency, &max_latency);
2041 GST_DEBUG_OBJECT (dec, "Peer qlatency: live %d, min %"
2042 GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
2043 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
2044
2045 GST_OBJECT_LOCK (dec);
2046 min_latency += dec->priv->min_latency;
2047 if (max_latency == GST_CLOCK_TIME_NONE
2048 || dec->priv->max_latency == GST_CLOCK_TIME_NONE)
2049 max_latency = GST_CLOCK_TIME_NONE;
2050 else
2051 max_latency += dec->priv->max_latency;
2052 GST_OBJECT_UNLOCK (dec);
2053
2054 gst_query_set_latency (query, live, min_latency, max_latency);
2055 }
2056 }
2057 break;
2058 default:
2059 res = gst_pad_query_default (pad, GST_OBJECT (dec), query);
2060 }
2061 return res;
2062
2063error:
2064 GST_ERROR_OBJECT (dec, "query failed");
2065 return res;
2066}
2067
2068static gboolean
2069gst_aml_video_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2070{
2071 GstAmlVideoDecoder *decoder;
2072 GstAmlVideoDecoderClass *decoder_class;
2073 gboolean ret = FALSE;
2074
2075 decoder = GST_AML_VIDEO_DECODER (parent);
2076 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
2077
2078 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
2079 GST_QUERY_TYPE_NAME (query));
2080
2081 if (decoder_class->src_query)
2082 ret = decoder_class->src_query (decoder, query);
2083
2084 return ret;
2085}
2086
2087/**
2088 * gst_aml_video_decoder_proxy_getcaps:
2089 * @decoder: a #GstAmlVideoDecoder
2090 * @caps: (allow-none): initial caps
2091 * @filter: (allow-none): filter caps
2092 *
2093 * Returns caps that express @caps (or sink template caps if @caps == NULL)
2094 * restricted to resolution/format/... combinations supported by downstream
2095 * elements.
2096 *
2097 * Returns: (transfer full): a #GstCaps owned by caller
2098 *
2099 * Since: 1.6
2100 */
2101GstCaps *
2102gst_aml_video_decoder_proxy_getcaps (GstAmlVideoDecoder * decoder, GstCaps * caps,
2103 GstCaps * filter)
2104{
2105 return __gst_aml_video_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
2106 GST_AML_VIDEO_DECODER_SINK_PAD (decoder),
2107 GST_AML_VIDEO_DECODER_SRC_PAD (decoder), caps, filter);
2108}
2109
2110static GstCaps *
2111gst_aml_video_decoder_sink_getcaps (GstAmlVideoDecoder * decoder, GstCaps * filter)
2112{
2113 GstAmlVideoDecoderClass *klass;
2114 GstCaps *caps;
2115
2116 klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
2117
2118 if (klass->getcaps)
2119 caps = klass->getcaps (decoder, filter);
2120 else
2121 caps = gst_aml_video_decoder_proxy_getcaps (decoder, NULL, filter);
2122
2123 GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
2124
2125 return caps;
2126}
2127
2128static gboolean
2129gst_aml_video_decoder_sink_query_default (GstAmlVideoDecoder * decoder,
2130 GstQuery * query)
2131{
2132 GstPad *pad = GST_AML_VIDEO_DECODER_SINK_PAD (decoder);
2133 GstAmlVideoDecoderPrivate *priv;
2134 gboolean res = FALSE;
2135
2136 priv = decoder->priv;
2137
2138 GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
2139
2140 switch (GST_QUERY_TYPE (query)) {
2141 case GST_QUERY_CONVERT:
2142 {
2143 GstFormat src_fmt, dest_fmt;
2144 gint64 src_val, dest_val;
2145
2146 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2147 GST_OBJECT_LOCK (decoder);
2148 res =
2149 __gst_aml_video_encoded_video_convert (priv->bytes_out, priv->time,
2150 src_fmt, src_val, &dest_fmt, &dest_val);
2151 GST_OBJECT_UNLOCK (decoder);
2152 if (!res)
2153 goto error;
2154 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2155 break;
2156 }
2157 case GST_QUERY_ALLOCATION:{
2158 GstAmlVideoDecoderClass *klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
2159
2160 if (klass->propose_allocation)
2161 res = klass->propose_allocation (decoder, query);
2162 break;
2163 }
2164 case GST_QUERY_CAPS:{
2165 GstCaps *filter, *caps;
2166
2167 gst_query_parse_caps (query, &filter);
2168 caps = gst_aml_video_decoder_sink_getcaps (decoder, filter);
2169 gst_query_set_caps_result (query, caps);
2170 gst_caps_unref (caps);
2171 res = TRUE;
2172 break;
2173 }
2174 case GST_QUERY_ACCEPT_CAPS:{
2175 if (decoder->priv->use_default_pad_acceptcaps) {
2176 res =
2177 gst_pad_query_default (GST_AML_VIDEO_DECODER_SINK_PAD (decoder),
2178 GST_OBJECT_CAST (decoder), query);
2179 } else {
2180 GstCaps *caps;
2181 GstCaps *allowed_caps;
2182 GstCaps *template_caps;
2183 gboolean accept;
2184
2185 gst_query_parse_accept_caps (query, &caps);
2186
2187 template_caps = gst_pad_get_pad_template_caps (pad);
2188 accept = gst_caps_is_subset (caps, template_caps);
2189 gst_caps_unref (template_caps);
2190
2191 if (accept) {
2192 allowed_caps =
2193 gst_pad_query_caps (GST_AML_VIDEO_DECODER_SINK_PAD (decoder), caps);
2194
2195 accept = gst_caps_can_intersect (caps, allowed_caps);
2196
2197 gst_caps_unref (allowed_caps);
2198 }
2199
2200 gst_query_set_accept_caps_result (query, accept);
2201 res = TRUE;
2202 }
2203 break;
2204 }
2205 default:
2206 res = gst_pad_query_default (pad, GST_OBJECT (decoder), query);
2207 break;
2208 }
2209done:
2210
2211 return res;
2212error:
2213 GST_DEBUG_OBJECT (decoder, "query failed");
2214 goto done;
2215
2216}
2217
2218static gboolean
2219gst_aml_video_decoder_sink_query (GstPad * pad, GstObject * parent,
2220 GstQuery * query)
2221{
2222 GstAmlVideoDecoder *decoder;
2223 GstAmlVideoDecoderClass *decoder_class;
2224 gboolean ret = FALSE;
2225
2226 decoder = GST_AML_VIDEO_DECODER (parent);
2227 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
2228
2229 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
2230 GST_QUERY_TYPE_NAME (query));
2231
2232 if (decoder_class->sink_query)
2233 ret = decoder_class->sink_query (decoder, query);
2234
2235 return ret;
2236}
2237
2238typedef struct _Timestamp Timestamp;
2239struct _Timestamp
2240{
2241 guint64 offset;
2242 GstClockTime pts;
2243 GstClockTime dts;
2244 GstClockTime duration;
2245 guint flags;
2246};
2247
2248static void
2249timestamp_free (Timestamp * ts)
2250{
2251 g_slice_free (Timestamp, ts);
2252}
2253
2254static void
2255gst_aml_video_decoder_add_buffer_info (GstAmlVideoDecoder * decoder,
2256 GstBuffer * buffer)
2257{
2258 GstAmlVideoDecoderPrivate *priv = decoder->priv;
2259 Timestamp *ts;
2260
2261 if (!GST_BUFFER_PTS_IS_VALID (buffer) &&
2262 !GST_BUFFER_DTS_IS_VALID (buffer) &&
2263 !GST_BUFFER_DURATION_IS_VALID (buffer) &&
2264 GST_BUFFER_FLAGS (buffer) == 0) {
2265 /* Save memory - don't bother storing info
2266 * for buffers with no distinguishing info */
2267 return;
2268 }
2269
2270 ts = g_slice_new (Timestamp);
2271
2272 GST_LOG_OBJECT (decoder,
2273 "adding PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT
2274 " (offset:%" G_GUINT64_FORMAT ")",
2275 GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2276 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), priv->input_offset);
2277
2278 ts->offset = priv->input_offset;
2279 ts->pts = GST_BUFFER_PTS (buffer);
2280 ts->dts = GST_BUFFER_DTS (buffer);
2281 ts->duration = GST_BUFFER_DURATION (buffer);
2282 ts->flags = GST_BUFFER_FLAGS (buffer);
2283
2284 g_queue_push_tail (&priv->timestamps, ts);
2285
2286 if (g_queue_get_length (&priv->timestamps) > 40) {
2287 GST_WARNING_OBJECT (decoder,
2288 "decoder timestamp list getting long: %d timestamps,"
2289 "possible internal leaking?", g_queue_get_length (&priv->timestamps));
2290 }
2291}
2292
2293static void
2294gst_aml_video_decoder_get_buffer_info_at_offset (GstAmlVideoDecoder *
2295 decoder, guint64 offset, GstClockTime * pts, GstClockTime * dts,
2296 GstClockTime * duration, guint * flags)
2297{
2298#ifndef GST_DISABLE_GST_DEBUG
2299 guint64 got_offset = 0;
2300#endif
2301 Timestamp *ts;
2302 GList *g;
2303
2304 *pts = GST_CLOCK_TIME_NONE;
2305 *dts = GST_CLOCK_TIME_NONE;
2306 *duration = GST_CLOCK_TIME_NONE;
2307 *flags = 0;
2308
2309 g = decoder->priv->timestamps.head;
2310 while (g) {
2311 ts = g->data;
2312 if (ts->offset <= offset) {
2313 GList *next = g->next;
2314#ifndef GST_DISABLE_GST_DEBUG
2315 got_offset = ts->offset;
2316#endif
2317 *pts = ts->pts;
2318 *dts = ts->dts;
2319 *duration = ts->duration;
2320 *flags = ts->flags;
2321 g_queue_delete_link (&decoder->priv->timestamps, g);
2322 g = next;
2323 timestamp_free (ts);
2324 } else {
2325 break;
2326 }
2327 }
2328
2329 GST_LOG_OBJECT (decoder,
2330 "got PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT " flags %x @ offs %"
2331 G_GUINT64_FORMAT " (wanted offset:%" G_GUINT64_FORMAT ")",
2332 GST_TIME_ARGS (*pts), GST_TIME_ARGS (*dts), *flags, got_offset, offset);
2333}
2334
2335#if !GLIB_CHECK_VERSION(2, 60, 0)
2336#define g_queue_clear_full queue_clear_full
2337static void
2338queue_clear_full (GQueue * queue, GDestroyNotify free_func)
2339{
2340 gpointer data;
2341
2342 while ((data = g_queue_pop_head (queue)) != NULL)
2343 free_func (data);
2344}
2345#endif
2346
2347static void
2348gst_aml_video_decoder_clear_queues (GstAmlVideoDecoder * dec)
2349{
2350 GstAmlVideoDecoderPrivate *priv = dec->priv;
2351
2352 g_list_free_full (priv->output_queued,
2353 (GDestroyNotify) gst_mini_object_unref);
2354 priv->output_queued = NULL;
2355
2356 g_list_free_full (priv->gather, (GDestroyNotify) gst_mini_object_unref);
2357 priv->gather = NULL;
2358 g_list_free_full (priv->decode, (GDestroyNotify) gst_aml_video_codec_frame_unref);
2359 priv->decode = NULL;
2360 g_list_free_full (priv->parse, (GDestroyNotify) gst_mini_object_unref);
2361 priv->parse = NULL;
2362 g_list_free_full (priv->parse_gather,
2363 (GDestroyNotify) gst_aml_video_codec_frame_unref);
2364 priv->parse_gather = NULL;
2365 g_queue_clear_full (&priv->frames,
2366 (GDestroyNotify) gst_aml_video_codec_frame_unref);
2367}
2368
2369static void
2370gst_aml_video_decoder_reset (GstAmlVideoDecoder * decoder, gboolean full,
2371 gboolean flush_hard)
2372{
2373 GstAmlVideoDecoderPrivate *priv = decoder->priv;
2374
2375 GST_DEBUG_OBJECT (decoder, "reset full %d", full);
2376
2377 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
2378
2379 if (full || flush_hard) {
2380 gst_segment_init (&decoder->input_segment, GST_FORMAT_UNDEFINED);
2381 gst_segment_init (&decoder->output_segment, GST_FORMAT_UNDEFINED);
2382 gst_aml_video_decoder_clear_queues (decoder);
2383 decoder->priv->in_out_segment_sync = TRUE;
2384
2385 if (priv->current_frame) {
2386 gst_aml_video_codec_frame_unref (priv->current_frame);
2387 priv->current_frame = NULL;
2388 }
2389
2390 g_list_free_full (priv->current_frame_events,
2391 (GDestroyNotify) gst_event_unref);
2392 priv->current_frame_events = NULL;
2393 g_list_free_full (priv->pending_events, (GDestroyNotify) gst_event_unref);
2394 priv->pending_events = NULL;
2395
2396 priv->error_count = 0;
2397 priv->had_output_data = FALSE;
2398 priv->had_input_data = FALSE;
2399
2400 GST_OBJECT_LOCK (decoder);
2401 priv->earliest_time = GST_CLOCK_TIME_NONE;
2402 priv->proportion = 0.5;
2403 priv->decode_flags_override = FALSE;
2404
2405 priv->request_sync_point_flags = 0;
2406 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
2407 priv->last_force_key_unit_time = GST_CLOCK_TIME_NONE;
2408 GST_OBJECT_UNLOCK (decoder);
2409 priv->distance_from_sync = -1;
2410 }
2411
2412 if (full) {
2413 if (priv->input_state)
2414 gst_aml_video_codec_state_unref (priv->input_state);
2415 priv->input_state = NULL;
2416 GST_OBJECT_LOCK (decoder);
2417 if (priv->output_state)
2418 gst_aml_video_codec_state_unref (priv->output_state);
2419 priv->output_state = NULL;
2420
2421 priv->qos_frame_duration = 0;
2422 GST_OBJECT_UNLOCK (decoder);
2423
2424 if (priv->tags)
2425 gst_tag_list_unref (priv->tags);
2426 priv->tags = NULL;
2427 priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
2428 if (priv->upstream_tags) {
2429 gst_tag_list_unref (priv->upstream_tags);
2430 priv->upstream_tags = NULL;
2431 }
2432 priv->tags_changed = FALSE;
2433 priv->reordered_output = FALSE;
2434
2435 priv->dropped = 0;
2436 priv->processed = 0;
2437
2438 priv->posted_latency_msg = FALSE;
2439
2440 priv->decode_frame_number = 0;
2441 priv->base_picture_number = 0;
2442
2443 if (priv->pool) {
2444 GST_DEBUG_OBJECT (decoder, "deactivate pool %" GST_PTR_FORMAT,
2445 priv->pool);
2446 gst_buffer_pool_set_active (priv->pool, FALSE);
2447 gst_object_unref (priv->pool);
2448 priv->pool = NULL;
2449 }
2450
2451 if (priv->allocator) {
2452 gst_object_unref (priv->allocator);
2453 priv->allocator = NULL;
2454 }
2455 }
2456
2457 priv->discont = TRUE;
2458
2459 priv->base_timestamp = GST_CLOCK_TIME_NONE;
2460 priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
2461 priv->pts_delta = GST_CLOCK_TIME_NONE;
2462
2463 priv->input_offset = 0;
2464 priv->frame_offset = 0;
2465 gst_adapter_clear (priv->input_adapter);
2466 gst_adapter_clear (priv->output_adapter);
2467 g_queue_clear_full (&priv->timestamps, (GDestroyNotify) timestamp_free);
2468
2469 GST_OBJECT_LOCK (decoder);
2470 priv->bytes_out = 0;
2471 priv->time = 0;
2472 GST_OBJECT_UNLOCK (decoder);
2473
2474#ifndef GST_DISABLE_DEBUG
2475 priv->last_reset_time = gst_util_get_timestamp ();
2476#endif
2477
2478 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2479}
2480
2481static GstFlowReturn
2482gst_aml_video_decoder_chain_forward (GstAmlVideoDecoder * decoder,
2483 GstBuffer * buf, gboolean at_eos)
2484{
2485 GstAmlVideoDecoderPrivate *priv;
2486 GstAmlVideoDecoderClass *klass;
2487 GstFlowReturn ret = GST_FLOW_OK;
2488
2489 klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
2490 priv = decoder->priv;
2491
2492 g_return_val_if_fail (priv->packetized || klass->parse, GST_FLOW_ERROR);
2493
2494 /* Draining on DISCONT is handled in chain_reverse() for reverse playback,
2495 * and this function would only be called to get everything collected GOP
2496 * by GOP in the parse_gather list */
2497 if (decoder->input_segment.rate > 0.0 && GST_BUFFER_IS_DISCONT (buf)
2498 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2499 ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
2500
2501 if (priv->current_frame == NULL)
2502 priv->current_frame = gst_aml_video_decoder_new_frame (decoder);
2503
2504 if (!priv->packetized)
2505 gst_aml_video_decoder_add_buffer_info (decoder, buf);
2506
2507 priv->input_offset += gst_buffer_get_size (buf);
2508
2509 if (priv->packetized) {
2510 GstAmlVideoCodecFrame *frame;
2511 gboolean was_keyframe = FALSE;
2512
2513 frame = priv->current_frame;
2514
2515 frame->abidata.ABI.num_subframes++;
2516 if (gst_aml_video_decoder_get_subframe_mode (decoder)) {
2517 /* End the frame if the marker flag is set */
le.han44bdbd82024-08-20 07:36:50 +00002518#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00002519 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_VIDEO_BUFFER_FLAG_MARKER)
2520 && (decoder->input_segment.rate > 0.0))
le.han44bdbd82024-08-20 07:36:50 +00002521#else
2522 if (decoder->input_segment.rate > 0.0)
2523#endif
le.han02c38f02024-08-16 02:35:36 +00002524 priv->current_frame = gst_aml_video_codec_frame_ref (frame);
2525 else
2526 priv->current_frame = NULL;
2527 } else {
2528 priv->current_frame = frame;
2529 }
2530
2531 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
2532 was_keyframe = TRUE;
2533 GST_DEBUG_OBJECT (decoder, "Marking current_frame as sync point");
2534 GST_AML_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
2535 }
2536
2537 if (frame->input_buffer) {
2538 gst_aml_video_decoder_copy_metas (decoder, frame, frame->input_buffer, buf);
2539 gst_buffer_unref (frame->input_buffer);
2540 }
2541 frame->input_buffer = buf;
2542
2543 if (decoder->input_segment.rate < 0.0) {
2544 priv->parse_gather = g_list_prepend (priv->parse_gather, frame);
2545 priv->current_frame = NULL;
2546 } else {
2547 ret = gst_aml_video_decoder_decode_frame (decoder, frame);
2548 if (!gst_aml_video_decoder_get_subframe_mode (decoder))
2549 priv->current_frame = NULL;
2550 }
2551 /* If in trick mode and it was a keyframe, drain decoder to avoid extra
2552 * latency. Only do this for forwards playback as reverse playback handles
2553 * draining on keyframes in flush_parse(), and would otherwise call back
2554 * from drain_out() to here causing an infinite loop.
2555 * Also this function is only called for reverse playback to gather frames
2556 * GOP by GOP, and does not do any actual decoding. That would be done by
2557 * flush_decode() */
2558 if (ret == GST_FLOW_OK && was_keyframe && decoder->input_segment.rate > 0.0
2559 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2560 ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
2561 } else {
2562 gst_adapter_push (priv->input_adapter, buf);
2563
2564 ret = gst_aml_video_decoder_parse_available (decoder, at_eos, TRUE);
2565 }
2566
2567 if (ret == GST_AML_VIDEO_DECODER_FLOW_NEED_DATA)
2568 return GST_FLOW_OK;
2569
2570 return ret;
2571}
2572
2573static GstFlowReturn
2574gst_aml_video_decoder_flush_decode (GstAmlVideoDecoder * dec)
2575{
2576 GstAmlVideoDecoderPrivate *priv = dec->priv;
2577 GstFlowReturn res = GST_FLOW_OK;
2578 GList *walk;
le.han44bdbd82024-08-20 07:36:50 +00002579#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00002580 GstAmlVideoCodecFrame *current_frame = NULL;
2581 gboolean last_subframe;
le.han44bdbd82024-08-20 07:36:50 +00002582#endif
le.han02c38f02024-08-16 02:35:36 +00002583 GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
2584
2585 walk = priv->decode;
2586 while (walk) {
2587 GList *next;
2588 GstAmlVideoCodecFrame *frame = (GstAmlVideoCodecFrame *) (walk->data);
le.han44bdbd82024-08-20 07:36:50 +00002589#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00002590 last_subframe = TRUE;
le.han44bdbd82024-08-20 07:36:50 +00002591#endif
le.han02c38f02024-08-16 02:35:36 +00002592 /* In subframe mode, we need to get rid of intermediary frames
2593 * created during the buffer gather stage. That's why that we keep a current
2594 * frame as the main frame and drop all the frame afterwhile until the end
2595 * of the subframes batch.
2596 * */
le.han44bdbd82024-08-20 07:36:50 +00002597#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00002598 if (gst_aml_video_decoder_get_subframe_mode (dec)) {
2599 if (current_frame == NULL) {
2600 current_frame = gst_aml_video_codec_frame_ref (frame);
2601 } else {
2602 if (current_frame->input_buffer) {
2603 gst_aml_video_decoder_copy_metas (dec, current_frame,
2604 current_frame->input_buffer, current_frame->output_buffer);
2605 gst_buffer_unref (current_frame->input_buffer);
2606 }
2607 current_frame->input_buffer = gst_buffer_ref (frame->input_buffer);
2608 gst_aml_video_codec_frame_unref (frame);
2609 }
2610 last_subframe = GST_BUFFER_FLAG_IS_SET (current_frame->input_buffer,
2611 GST_VIDEO_BUFFER_FLAG_MARKER);
2612 } else {
2613 current_frame = frame;
2614 }
le.han44bdbd82024-08-20 07:36:50 +00002615#endif
le.han02c38f02024-08-16 02:35:36 +00002616
2617 GST_DEBUG_OBJECT (dec, "decoding frame %p buffer %p, PTS %" GST_TIME_FORMAT
2618 ", DTS %" GST_TIME_FORMAT, frame, frame->input_buffer,
2619 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2620 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2621
2622 next = walk->next;
2623
2624 priv->decode = g_list_delete_link (priv->decode, walk);
2625
2626 /* decode buffer, resulting data prepended to queue */
le.han44bdbd82024-08-20 07:36:50 +00002627#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00002628 res = gst_aml_video_decoder_decode_frame (dec, current_frame);
le.han44bdbd82024-08-20 07:36:50 +00002629#else
2630 res = gst_aml_video_decoder_decode_frame (dec, frame);
2631#endif
le.han02c38f02024-08-16 02:35:36 +00002632 if (res != GST_FLOW_OK)
2633 break;
le.han44bdbd82024-08-20 07:36:50 +00002634#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00002635 if (!gst_aml_video_decoder_get_subframe_mode (dec)
2636 || last_subframe)
2637 current_frame = NULL;
le.han44bdbd82024-08-20 07:36:50 +00002638#endif
le.han02c38f02024-08-16 02:35:36 +00002639 walk = next;
2640 }
2641
2642 return res;
2643}
2644
2645/* gst_aml_video_decoder_flush_parse is called from the
2646 * chain_reverse() function when a buffer containing
2647 * a DISCONT - indicating that reverse playback
2648 * looped back to the next data block, and therefore
2649 * all available data should be fed through the
2650 * decoder and frames gathered for reversed output
2651 */
2652static GstFlowReturn
2653gst_aml_video_decoder_flush_parse (GstAmlVideoDecoder * dec, gboolean at_eos)
2654{
2655 GstAmlVideoDecoderPrivate *priv = dec->priv;
2656 GstFlowReturn res = GST_FLOW_OK;
2657 GList *walk;
2658 GstAmlVideoDecoderClass *decoder_class;
2659
2660 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (dec);
2661
2662 GST_DEBUG_OBJECT (dec, "flushing buffers to parsing");
2663
2664 /* Reverse the gather list, and prepend it to the parse list,
2665 * then flush to parse whatever we can */
2666 priv->gather = g_list_reverse (priv->gather);
2667 priv->parse = g_list_concat (priv->gather, priv->parse);
2668 priv->gather = NULL;
2669
2670 /* clear buffer and decoder state */
2671 gst_aml_video_decoder_flush (dec, FALSE);
2672
2673 walk = priv->parse;
2674 while (walk) {
2675 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2676 GList *next = walk->next;
2677
2678 GST_DEBUG_OBJECT (dec, "parsing buffer %p, PTS %" GST_TIME_FORMAT
2679 ", DTS %" GST_TIME_FORMAT " flags %x", buf,
2680 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2681 GST_TIME_ARGS (GST_BUFFER_DTS (buf)), GST_BUFFER_FLAGS (buf));
2682
2683 /* parse buffer, resulting frames prepended to parse_gather queue */
2684 gst_buffer_ref (buf);
2685 res = gst_aml_video_decoder_chain_forward (dec, buf, at_eos);
2686
2687 /* if we generated output, we can discard the buffer, else we
2688 * keep it in the queue */
2689 if (priv->parse_gather) {
2690 GST_DEBUG_OBJECT (dec, "parsed buffer to %p", priv->parse_gather->data);
2691 priv->parse = g_list_delete_link (priv->parse, walk);
2692 gst_buffer_unref (buf);
2693 } else {
2694 GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
2695 }
2696 walk = next;
2697 }
2698
2699 walk = priv->parse_gather;
2700 while (walk) {
2701 GstAmlVideoCodecFrame *frame = (GstAmlVideoCodecFrame *) (walk->data);
2702 GList *walk2;
2703
2704 /* this is reverse playback, check if we need to apply some segment
2705 * to the output before decoding, as during decoding the segment.rate
2706 * must be used to determine if a buffer should be pushed or added to
2707 * the output list for reverse pushing.
2708 *
2709 * The new segment is not immediately pushed here because we must
2710 * wait for negotiation to happen before it can be pushed to avoid
2711 * pushing a segment before caps event. Negotiation only happens
2712 * when finish_frame is called.
2713 */
2714 for (walk2 = frame->events; walk2;) {
2715 GList *cur = walk2;
2716 GstEvent *event = walk2->data;
2717
2718 walk2 = g_list_next (walk2);
2719 if (GST_EVENT_TYPE (event) <= GST_EVENT_SEGMENT) {
2720
2721 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
2722 GstSegment segment;
2723
2724 GST_DEBUG_OBJECT (dec, "Segment at frame %p %" GST_TIME_FORMAT,
2725 frame, GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)));
2726 gst_event_copy_segment (event, &segment);
2727 if (segment.format == GST_FORMAT_TIME) {
2728 dec->output_segment = segment;
2729 dec->priv->in_out_segment_sync =
2730 gst_segment_is_equal (&dec->input_segment, &segment);
2731 }
2732 }
2733 dec->priv->pending_events =
2734 g_list_append (dec->priv->pending_events, event);
2735 frame->events = g_list_delete_link (frame->events, cur);
2736 }
2737 }
2738
2739 walk = walk->next;
2740 }
2741
2742 /* now we can process frames. Start by moving each frame from the parse_gather
2743 * to the decode list, reverse the order as we go, and stopping when/if we
2744 * copy a keyframe. */
2745 GST_DEBUG_OBJECT (dec, "checking parsed frames for a keyframe to decode");
2746 walk = priv->parse_gather;
2747 while (walk) {
2748 GstAmlVideoCodecFrame *frame = (GstAmlVideoCodecFrame *) (walk->data);
2749
2750 /* remove from the gather list */
2751 priv->parse_gather = g_list_remove_link (priv->parse_gather, walk);
2752
2753 /* move it to the front of the decode queue */
2754 priv->decode = g_list_concat (walk, priv->decode);
2755
2756 /* if we copied a keyframe, flush and decode the decode queue */
2757 if (GST_AML_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
2758 GST_DEBUG_OBJECT (dec, "found keyframe %p with PTS %" GST_TIME_FORMAT
2759 ", DTS %" GST_TIME_FORMAT, frame,
2760 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2761 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2762 res = gst_aml_video_decoder_flush_decode (dec);
2763 if (res != GST_FLOW_OK)
2764 goto done;
2765
2766 /* We need to tell the subclass to drain now.
2767 * We prefer the drain vfunc, but for backward-compat
2768 * we use a finish() vfunc if drain isn't implemented */
2769 if (decoder_class->drain) {
2770 GST_DEBUG_OBJECT (dec, "Draining");
2771 res = decoder_class->drain (dec);
2772 } else if (decoder_class->finish) {
2773 GST_FIXME_OBJECT (dec, "Sub-class should implement drain(). "
2774 "Calling finish() for backwards-compat");
2775 res = decoder_class->finish (dec);
2776 }
2777
2778 if (res != GST_FLOW_OK)
2779 goto done;
2780
2781 /* now send queued data downstream */
2782 walk = priv->output_queued;
2783 while (walk) {
2784 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2785
2786 priv->output_queued =
2787 g_list_delete_link (priv->output_queued, priv->output_queued);
2788
2789 if (G_LIKELY (res == GST_FLOW_OK)) {
2790 /* avoid stray DISCONT from forward processing,
2791 * which have no meaning in reverse pushing */
2792 GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
2793
2794 /* Last chance to calculate a timestamp as we loop backwards
2795 * through the list */
2796 if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE)
2797 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2798 else if (priv->last_timestamp_out != GST_CLOCK_TIME_NONE &&
2799 GST_BUFFER_DURATION (buf) != GST_CLOCK_TIME_NONE) {
2800 GST_BUFFER_TIMESTAMP (buf) =
2801 priv->last_timestamp_out - GST_BUFFER_DURATION (buf);
2802 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2803 GST_LOG_OBJECT (dec,
2804 "Calculated TS %" GST_TIME_FORMAT " working backwards",
2805 GST_TIME_ARGS (priv->last_timestamp_out));
2806 }
2807
2808 res = gst_aml_video_decoder_clip_and_push_buf (dec, buf);
2809 } else {
2810 gst_buffer_unref (buf);
2811 }
2812
2813 walk = priv->output_queued;
2814 }
2815
2816 /* clear buffer and decoder state again
2817 * before moving to the previous keyframe */
2818 gst_aml_video_decoder_flush (dec, FALSE);
2819 }
2820
2821 walk = priv->parse_gather;
2822 }
2823
2824done:
2825 return res;
2826}
2827
2828static GstFlowReturn
2829gst_aml_video_decoder_chain_reverse (GstAmlVideoDecoder * dec, GstBuffer * buf)
2830{
2831 GstAmlVideoDecoderPrivate *priv = dec->priv;
2832 GstFlowReturn result = GST_FLOW_OK;
2833
2834 /* if we have a discont, move buffers to the decode list */
2835 if (!buf || GST_BUFFER_IS_DISCONT (buf)) {
2836 GST_DEBUG_OBJECT (dec, "received discont");
2837
2838 /* parse and decode stuff in the gather and parse queues */
2839 result = gst_aml_video_decoder_flush_parse (dec, FALSE);
2840 }
2841
2842 if (G_LIKELY (buf)) {
2843 GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2844 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
2845 GST_TIME_FORMAT, buf, gst_buffer_get_size (buf),
2846 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2847 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2848 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2849
2850 /* add buffer to gather queue */
2851 priv->gather = g_list_prepend (priv->gather, buf);
2852 }
2853
2854 return result;
2855}
2856
2857static GstFlowReturn
2858gst_aml_video_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
2859{
2860 GstAmlVideoDecoder *decoder;
2861 GstFlowReturn ret = GST_FLOW_OK;
2862
2863 decoder = GST_AML_VIDEO_DECODER (parent);
2864
2865 if (G_UNLIKELY (!decoder->priv->input_state && decoder->priv->needs_format))
2866 goto not_negotiated;
2867
2868 GST_LOG_OBJECT (decoder,
2869 "AML chain PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT " duration %"
2870 GST_TIME_FORMAT " size %" G_GSIZE_FORMAT " flags %x",
2871 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2872 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2873 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)),
2874 gst_buffer_get_size (buf), GST_BUFFER_FLAGS (buf));
2875
2876 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
2877
2878 /* NOTE:
2879 * requiring the pad to be negotiated makes it impossible to use
2880 * oggdemux or filesrc ! decoder */
2881
2882 if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
2883 GstEvent *event;
2884 GstSegment *segment = &decoder->input_segment;
2885
2886 GST_WARNING_OBJECT (decoder,
2887 "Received buffer without a new-segment. "
2888 "Assuming timestamps start from 0.");
2889
2890 gst_segment_init (segment, GST_FORMAT_TIME);
2891
2892 event = gst_event_new_segment (segment);
2893
2894 decoder->priv->current_frame_events =
2895 g_list_prepend (decoder->priv->current_frame_events, event);
2896 }
2897
2898 decoder->priv->had_input_data = TRUE;
2899
2900 if (decoder->input_segment.rate > 0.0)
2901 ret = gst_aml_video_decoder_chain_forward (decoder, buf, FALSE);
2902 else
2903 ret = gst_aml_video_decoder_chain_reverse (decoder, buf);
2904
2905 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2906 return ret;
2907
2908 /* ERRORS */
2909not_negotiated:
2910 {
2911 GST_ELEMENT_ERROR (decoder, CORE, NEGOTIATION, (NULL),
2912 ("decoder not initialized"));
2913 gst_buffer_unref (buf);
2914 return GST_FLOW_NOT_NEGOTIATED;
2915 }
2916}
2917
2918static GstStateChangeReturn
2919gst_aml_video_decoder_change_state (GstElement * element, GstStateChange transition)
2920{
2921 GstAmlVideoDecoder *decoder;
2922 GstAmlVideoDecoderClass *decoder_class;
2923 GstStateChangeReturn ret;
2924
2925 decoder = GST_AML_VIDEO_DECODER (element);
2926 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (element);
2927
2928 switch (transition) {
2929 case GST_STATE_CHANGE_NULL_TO_READY:
2930 /* open device/library if needed */
2931 if (decoder_class->open && !decoder_class->open (decoder))
2932 goto open_failed;
2933 break;
2934 case GST_STATE_CHANGE_READY_TO_PAUSED:
2935 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
2936 gst_aml_video_decoder_reset (decoder, TRUE, TRUE);
2937 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2938
2939 /* Initialize device/library if needed */
2940 if (decoder_class->start && !decoder_class->start (decoder))
2941 goto start_failed;
2942 break;
2943 default:
2944 break;
2945 }
2946
2947 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
2948
2949 switch (transition) {
2950 case GST_STATE_CHANGE_PAUSED_TO_READY:{
2951 gboolean stopped = TRUE;
2952
2953 if (decoder_class->stop)
2954 stopped = decoder_class->stop (decoder);
2955
2956 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
2957 gst_aml_video_decoder_reset (decoder, TRUE, TRUE);
2958 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2959
2960 if (!stopped)
2961 goto stop_failed;
2962
2963 break;
2964 }
2965 case GST_STATE_CHANGE_READY_TO_NULL:
2966 /* close device/library if needed */
2967 if (decoder_class->close && !decoder_class->close (decoder))
2968 goto close_failed;
2969 break;
2970 default:
2971 break;
2972 }
2973
2974 return ret;
2975
2976 /* Errors */
2977open_failed:
2978 {
2979 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2980 ("Failed to open decoder"));
2981 return GST_STATE_CHANGE_FAILURE;
2982 }
2983
2984start_failed:
2985 {
2986 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2987 ("Failed to start decoder"));
2988 return GST_STATE_CHANGE_FAILURE;
2989 }
2990
2991stop_failed:
2992 {
2993 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2994 ("Failed to stop decoder"));
2995 return GST_STATE_CHANGE_FAILURE;
2996 }
2997
2998close_failed:
2999 {
3000 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
3001 ("Failed to close decoder"));
3002 return GST_STATE_CHANGE_FAILURE;
3003 }
3004}
3005
3006static GstAmlVideoCodecFrame *
3007gst_aml_video_decoder_new_frame (GstAmlVideoDecoder * decoder)
3008{
3009 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3010 GstAmlVideoCodecFrame *frame;
3011
3012 frame = g_slice_new0 (GstAmlVideoCodecFrame);
3013
3014 frame->ref_count = 1;
3015
3016 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3017 frame->system_frame_number = priv->system_frame_number;
3018 priv->system_frame_number++;
3019 frame->decode_frame_number = priv->decode_frame_number;
3020 priv->decode_frame_number++;
3021
3022 frame->dts = GST_CLOCK_TIME_NONE;
3023 frame->pts = GST_CLOCK_TIME_NONE;
3024 frame->duration = GST_CLOCK_TIME_NONE;
3025 frame->events = priv->current_frame_events;
3026 priv->current_frame_events = NULL;
3027
3028 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3029
3030 GST_LOG_OBJECT (decoder, "Created new frame %p (sfn:%d)",
3031 frame, frame->system_frame_number);
3032
3033 return frame;
3034}
3035
3036static void
3037gst_aml_video_decoder_push_event_list (GstAmlVideoDecoder * decoder, GList * events)
3038{
3039 GList *l;
3040
3041 /* events are stored in reverse order */
3042 for (l = g_list_last (events); l; l = g_list_previous (l)) {
3043 GST_LOG_OBJECT (decoder, "pushing %s event", GST_EVENT_TYPE_NAME (l->data));
3044 gst_aml_video_decoder_push_event (decoder, l->data);
3045 }
3046 g_list_free (events);
3047}
3048
3049static void
3050gst_aml_video_decoder_prepare_finish_frame (GstAmlVideoDecoder *
3051 decoder, GstAmlVideoCodecFrame * frame, gboolean dropping)
3052{
3053 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3054 GList *l, *events = NULL;
3055 gboolean sync;
3056
fei.dengaaaa5252024-12-19 14:08:27 +08003057 /*if decoder had outputed decoded data,but dropped by
3058 by pts < segment.start. so set had_output_data to true*/
3059 priv->had_output_data = TRUE;
3060
le.han02c38f02024-08-16 02:35:36 +00003061#ifndef GST_DISABLE_GST_DEBUG
3062 GST_LOG_OBJECT (decoder, "n %d in %" G_GSIZE_FORMAT " out %" G_GSIZE_FORMAT,
3063 priv->frames.length,
3064 gst_adapter_available (priv->input_adapter),
3065 gst_adapter_available (priv->output_adapter));
3066#endif
3067
3068 sync = GST_AML_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame);
3069
3070 GST_LOG_OBJECT (decoder,
3071 "finish frame %p (#%d)(sub=#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
3072 GST_TIME_FORMAT,
3073 frame, frame->system_frame_number, frame->abidata.ABI.num_subframes,
3074 sync, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts));
3075
3076 /* Push all pending events that arrived before this frame */
3077 for (l = priv->frames.head; l; l = l->next) {
3078 GstAmlVideoCodecFrame *tmp = l->data;
3079
3080 if (tmp->events) {
3081 events = g_list_concat (tmp->events, events);
3082 tmp->events = NULL;
3083 }
3084
3085 if (tmp == frame)
3086 break;
3087 }
3088
3089 if (dropping || !decoder->priv->output_state) {
3090 /* Push before the next frame that is not dropped */
3091 decoder->priv->pending_events =
3092 g_list_concat (events, decoder->priv->pending_events);
3093 } else {
3094 gst_aml_video_decoder_push_event_list (decoder, decoder->priv->pending_events);
3095 decoder->priv->pending_events = NULL;
3096
3097 gst_aml_video_decoder_push_event_list (decoder, events);
3098 }
3099
3100 /* Check if the data should not be displayed. For example altref/invisible
3101 * frame in vp8. In this case we should not update the timestamps. */
3102 if (GST_AML_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
3103 return;
3104
3105 /* If the frame is meant to be output but we don't have an output_buffer
3106 * we have a problem :) */
3107 if (G_UNLIKELY ((frame->output_buffer == NULL) && !dropping))
3108 goto no_output_buffer;
3109
3110 if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
3111 if (frame->pts != priv->base_timestamp) {
3112 GST_DEBUG_OBJECT (decoder,
3113 "sync timestamp %" GST_TIME_FORMAT " diff %" GST_STIME_FORMAT,
3114 GST_TIME_ARGS (frame->pts),
3115 GST_STIME_ARGS (GST_CLOCK_DIFF (frame->pts,
3116 decoder->output_segment.start)));
3117 priv->base_timestamp = frame->pts;
3118 priv->base_picture_number = frame->decode_frame_number;
3119 }
3120 }
3121
3122 if (frame->duration == GST_CLOCK_TIME_NONE) {
3123 frame->duration = gst_aml_video_decoder_get_frame_duration (decoder, frame);
3124 GST_LOG_OBJECT (decoder,
3125 "Guessing duration %" GST_TIME_FORMAT " for frame...",
3126 GST_TIME_ARGS (frame->duration));
3127 }
3128
3129 /* PTS is expected montone ascending,
3130 * so a good guess is lowest unsent DTS */
3131 {
3132 GstClockTime min_ts = GST_CLOCK_TIME_NONE;
3133 GstAmlVideoCodecFrame *oframe = NULL;
3134 gboolean seen_none = FALSE;
3135
3136 /* some maintenance regardless */
3137 for (l = priv->frames.head; l; l = l->next) {
3138 GstAmlVideoCodecFrame *tmp = l->data;
3139
3140 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts)) {
3141 seen_none = TRUE;
3142 continue;
3143 }
3144
3145 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts < min_ts) {
3146 min_ts = tmp->abidata.ABI.ts;
3147 oframe = tmp;
3148 }
3149 }
3150 /* save a ts if needed */
3151 if (oframe && oframe != frame) {
3152 oframe->abidata.ABI.ts = frame->abidata.ABI.ts;
3153 }
3154
3155 /* and set if needed;
3156 * valid delta means we have reasonable DTS input */
3157 /* also, if we ended up reordered, means this approach is conflicting
3158 * with some sparse existing PTS, and so it does not work out */
3159 if (!priv->reordered_output &&
3160 !GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none &&
3161 GST_CLOCK_TIME_IS_VALID (priv->pts_delta)) {
3162 frame->pts = min_ts + priv->pts_delta;
3163 GST_DEBUG_OBJECT (decoder,
3164 "no valid PTS, using oldest DTS %" GST_TIME_FORMAT,
3165 GST_TIME_ARGS (frame->pts));
3166 }
3167
3168 /* some more maintenance, ts2 holds PTS */
3169 min_ts = GST_CLOCK_TIME_NONE;
3170 seen_none = FALSE;
3171 for (l = priv->frames.head; l; l = l->next) {
3172 GstAmlVideoCodecFrame *tmp = l->data;
3173
3174 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts2)) {
3175 seen_none = TRUE;
3176 continue;
3177 }
3178
3179 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts2 < min_ts) {
3180 min_ts = tmp->abidata.ABI.ts2;
3181 oframe = tmp;
3182 }
3183 }
3184 /* save a ts if needed */
3185 if (oframe && oframe != frame) {
3186 oframe->abidata.ABI.ts2 = frame->abidata.ABI.ts2;
3187 }
3188
3189 /* if we detected reordered output, then PTS are void,
3190 * however those were obtained; bogus input, subclass etc */
3191 if (priv->reordered_output && !seen_none) {
3192 GST_DEBUG_OBJECT (decoder, "invalidating PTS");
3193 frame->pts = GST_CLOCK_TIME_NONE;
3194 }
3195
3196 if (!GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none) {
3197 frame->pts = min_ts;
3198 GST_DEBUG_OBJECT (decoder,
3199 "no valid PTS, using oldest PTS %" GST_TIME_FORMAT,
3200 GST_TIME_ARGS (frame->pts));
3201 }
3202 }
3203
3204
3205 if (frame->pts == GST_CLOCK_TIME_NONE) {
3206 /* Last ditch timestamp guess: Just add the duration to the previous
3207 * frame. If it's the first frame, just use the segment start. */
3208 if (frame->duration != GST_CLOCK_TIME_NONE) {
3209 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out))
3210 frame->pts = priv->last_timestamp_out + frame->duration;
3211 else if (frame->dts != GST_CLOCK_TIME_NONE) {
3212 frame->pts = frame->dts;
3213 GST_LOG_OBJECT (decoder,
3214 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
3215 GST_TIME_ARGS (frame->pts));
3216 } else if (decoder->output_segment.rate > 0.0)
3217 frame->pts = decoder->output_segment.start;
3218 GST_INFO_OBJECT (decoder,
3219 "Guessing PTS=%" GST_TIME_FORMAT " for frame... DTS=%"
3220 GST_TIME_FORMAT, GST_TIME_ARGS (frame->pts),
3221 GST_TIME_ARGS (frame->dts));
3222 } else if (sync && frame->dts != GST_CLOCK_TIME_NONE) {
3223 frame->pts = frame->dts;
3224 GST_LOG_OBJECT (decoder,
3225 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
3226 GST_TIME_ARGS (frame->pts));
3227 }
3228 }
3229
3230 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out)) {
3231 if (frame->pts < priv->last_timestamp_out) {
3232 GST_WARNING_OBJECT (decoder,
3233 "decreasing timestamp (%" GST_TIME_FORMAT " < %"
3234 GST_TIME_FORMAT ")",
3235 GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
3236 priv->reordered_output = TRUE;
3237 /* make it a bit less weird downstream */
3238 frame->pts = priv->last_timestamp_out;
3239 }
3240 }
3241
3242 if (GST_CLOCK_TIME_IS_VALID (frame->pts))
3243 priv->last_timestamp_out = frame->pts;
3244
3245 return;
3246
3247 /* ERRORS */
3248no_output_buffer:
3249 {
3250 GST_ERROR_OBJECT (decoder, "No buffer to output !");
3251 }
3252}
3253
3254/**
3255 * gst_aml_video_decoder_release_frame:
3256 * @dec: a #GstAmlVideoDecoder
3257 * @frame: (transfer full): the #GstAmlVideoCodecFrame to release
3258 *
3259 * Similar to gst_aml_video_decoder_drop_frame(), but simply releases @frame
3260 * without any processing other than removing it from list of pending frames,
3261 * after which it is considered finished and released.
3262 *
3263 * Since: 1.2.2
3264 */
3265void
3266gst_aml_video_decoder_release_frame (GstAmlVideoDecoder * dec,
3267 GstAmlVideoCodecFrame * frame)
3268{
3269 GList *link;
3270
3271 /* unref once from the list */
3272 GST_AML_VIDEO_DECODER_STREAM_LOCK (dec);
3273 link = g_queue_find (&dec->priv->frames, frame);
3274 if (link) {
3275 gst_aml_video_codec_frame_unref (frame);
3276 g_queue_delete_link (&dec->priv->frames, link);
3277 }
3278 if (frame->events) {
3279 dec->priv->pending_events =
3280 g_list_concat (frame->events, dec->priv->pending_events);
3281 frame->events = NULL;
3282 }
3283 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (dec);
3284
3285 /* unref because this function takes ownership */
3286 gst_aml_video_codec_frame_unref (frame);
3287}
3288
3289/* called with STREAM_LOCK */
3290static void
3291gst_aml_video_decoder_post_qos_drop (GstAmlVideoDecoder * dec, GstClockTime timestamp)
3292{
3293 GstClockTime stream_time, jitter, earliest_time, qostime;
3294 GstSegment *segment;
3295 GstMessage *qos_msg;
3296 gdouble proportion;
3297 dec->priv->dropped++;
3298
3299 /* post QoS message */
3300 GST_OBJECT_LOCK (dec);
3301 proportion = dec->priv->proportion;
3302 earliest_time = dec->priv->earliest_time;
3303 GST_OBJECT_UNLOCK (dec);
3304
3305 segment = &dec->output_segment;
3306 if (G_UNLIKELY (segment->format == GST_FORMAT_UNDEFINED))
3307 segment = &dec->input_segment;
3308 stream_time =
3309 gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);
3310 qostime = gst_segment_to_running_time (segment, GST_FORMAT_TIME, timestamp);
3311 jitter = GST_CLOCK_DIFF (qostime, earliest_time);
3312 qos_msg =
3313 gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, qostime, stream_time,
3314 timestamp, GST_CLOCK_TIME_NONE);
3315 gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
3316 gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
3317 dec->priv->processed, dec->priv->dropped);
3318 gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
3319}
3320
3321/**
3322 * gst_aml_video_decoder_drop_frame:
3323 * @dec: a #GstAmlVideoDecoder
3324 * @frame: (transfer full): the #GstAmlVideoCodecFrame to drop
3325 *
3326 * Similar to gst_aml_video_decoder_finish_frame(), but drops @frame in any
3327 * case and posts a QoS message with the frame's details on the bus.
3328 * In any case, the frame is considered finished and released.
3329 *
3330 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3331 */
3332GstFlowReturn
3333gst_aml_video_decoder_drop_frame (GstAmlVideoDecoder * dec, GstAmlVideoCodecFrame * frame)
3334{
3335 GST_LOG_OBJECT (dec, "drop frame %p", frame);
3336
3337 if (gst_aml_video_decoder_get_subframe_mode (dec))
3338 GST_DEBUG_OBJECT (dec, "Drop subframe %d. Must be the last one.",
3339 frame->abidata.ABI.num_subframes);
3340
3341 GST_AML_VIDEO_DECODER_STREAM_LOCK (dec);
3342
3343 gst_aml_video_decoder_prepare_finish_frame (dec, frame, TRUE);
3344
3345 GST_DEBUG_OBJECT (dec, "dropping frame %" GST_TIME_FORMAT,
3346 GST_TIME_ARGS (frame->pts));
3347
3348 gst_aml_video_decoder_post_qos_drop (dec, frame->pts);
3349
3350 /* now free the frame */
3351 gst_aml_video_decoder_release_frame (dec, frame);
3352
3353 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (dec);
3354
3355 return GST_FLOW_OK;
3356}
3357
3358/**
3359 * gst_aml_video_decoder_drop_subframe:
3360 * @dec: a #GstAmlVideoDecoder
3361 * @frame: (transfer full): the #GstAmlVideoCodecFrame
3362 *
3363 * Drops input data.
3364 * The frame is not considered finished until the whole frame
3365 * is finished or dropped by the subclass.
3366 *
3367 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3368 *
3369 * Since: 1.20
3370 */
3371GstFlowReturn
3372gst_aml_video_decoder_drop_subframe (GstAmlVideoDecoder * dec,
3373 GstAmlVideoCodecFrame * frame)
3374{
3375 g_return_val_if_fail (gst_aml_video_decoder_get_subframe_mode (dec),
3376 GST_FLOW_NOT_SUPPORTED);
3377
3378 GST_LOG_OBJECT (dec, "drop subframe %p num=%d", frame->input_buffer,
3379 gst_aml_video_decoder_get_input_subframe_index (dec, frame));
3380
3381 GST_AML_VIDEO_DECODER_STREAM_LOCK (dec);
3382
3383 gst_aml_video_codec_frame_unref (frame);
3384
3385 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (dec);
3386
3387 return GST_FLOW_OK;
3388}
3389
3390static gboolean
3391gst_aml_video_decoder_transform_meta_default (GstAmlVideoDecoder *
3392 decoder, GstAmlVideoCodecFrame * frame, GstMeta * meta)
3393{
3394 const GstMetaInfo *info = meta->info;
3395 const gchar *const *tags;
3396 const gchar *const supported_tags[] = {
3397 GST_META_TAG_VIDEO_STR,
3398 GST_META_TAG_VIDEO_ORIENTATION_STR,
3399 GST_META_TAG_VIDEO_SIZE_STR,
3400 NULL,
3401 };
3402
3403 tags = gst_meta_api_type_get_tags (info->api);
3404
3405 if (!tags)
3406 return TRUE;
3407
3408 while (*tags) {
3409 if (!g_strv_contains (supported_tags, *tags))
3410 return FALSE;
3411 tags++;
3412 }
3413
3414 return TRUE;
3415}
3416
3417typedef struct
3418{
3419 GstAmlVideoDecoder *decoder;
3420 GstAmlVideoCodecFrame *frame;
3421 GstBuffer *buffer;
3422} CopyMetaData;
3423
3424static gboolean
3425foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
3426{
3427 CopyMetaData *data = user_data;
3428 GstAmlVideoDecoder *decoder = data->decoder;
3429 GstAmlVideoDecoderClass *klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
3430 GstAmlVideoCodecFrame *frame = data->frame;
3431 GstBuffer *buffer = data->buffer;
3432 const GstMetaInfo *info = (*meta)->info;
3433 gboolean do_copy = FALSE;
3434
le.han44bdbd82024-08-20 07:36:50 +00003435#if ((GST_VERSION_MAJOR == 1) && (GST_VERSION_MINOR >= 18))
le.han02c38f02024-08-16 02:35:36 +00003436 if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)
3437 || gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory_reference)) {
le.han44bdbd82024-08-20 07:36:50 +00003438#else
3439 if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
3440#endif
le.han02c38f02024-08-16 02:35:36 +00003441 /* never call the transform_meta with memory specific metadata */
3442 GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
3443 g_type_name (info->api));
3444 do_copy = FALSE;
3445 } else if (klass->transform_meta) {
3446 do_copy = klass->transform_meta (decoder, frame, *meta);
3447 GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
3448 g_type_name (info->api), do_copy);
3449 }
3450
3451 /* we only copy metadata when the subclass implemented a transform_meta
3452 * function and when it returns %TRUE */
3453 if (do_copy && info->transform_func) {
3454 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
3455 GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
3456 /* simply copy then */
3457
3458 info->transform_func (buffer, *meta, inbuf, _gst_meta_transform_copy,
3459 &copy_data);
3460 }
3461 return TRUE;
3462}
3463
3464static void
3465gst_aml_video_decoder_copy_metas (GstAmlVideoDecoder * decoder,
3466 GstAmlVideoCodecFrame * frame, GstBuffer * src_buffer, GstBuffer * dest_buffer)
3467{
3468 GstAmlVideoDecoderClass *decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
3469
3470 if (decoder_class->transform_meta) {
3471 if (G_LIKELY (frame)) {
3472 CopyMetaData data;
3473
3474 data.decoder = decoder;
3475 data.frame = frame;
3476 data.buffer = dest_buffer;
3477 gst_buffer_foreach_meta (src_buffer, foreach_metadata, &data);
3478 } else {
3479 GST_WARNING_OBJECT (decoder,
3480 "Can't copy metadata because input frame disappeared");
3481 }
3482 }
3483}
3484
3485/**
3486 * gst_aml_video_decoder_finish_frame:
3487 * @decoder: a #GstAmlVideoDecoder
3488 * @frame: (transfer full): a decoded #GstAmlVideoCodecFrame
3489 *
3490 * @frame should have a valid decoded data buffer, whose metadata fields
3491 * are then appropriately set according to frame data and pushed downstream.
3492 * If no output data is provided, @frame is considered skipped.
3493 * In any case, the frame is considered finished and released.
3494 *
3495 * After calling this function the output buffer of the frame is to be
3496 * considered read-only. This function will also change the metadata
3497 * of the buffer.
3498 *
3499 * Returns: a #GstFlowReturn resulting from sending data downstream
3500 */
3501GstFlowReturn
3502gst_aml_video_decoder_finish_frame (GstAmlVideoDecoder * decoder,
3503 GstAmlVideoCodecFrame * frame)
3504{
3505 GstFlowReturn ret = GST_FLOW_OK;
3506 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3507 GstBuffer *output_buffer;
3508 gboolean needs_reconfigure = FALSE;
3509
3510 GST_LOG_OBJECT (decoder, "finish frame %p", frame);
3511
3512 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3513
3514 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
3515 if (G_UNLIKELY (priv->output_state_changed || (priv->output_state
3516 && needs_reconfigure))) {
3517 if (!gst_aml_video_decoder_negotiate_unlocked (decoder)) {
3518 gst_pad_mark_reconfigure (decoder->srcpad);
3519 if (GST_PAD_IS_FLUSHING (decoder->srcpad))
3520 ret = GST_FLOW_FLUSHING;
3521 else
3522 ret = GST_FLOW_NOT_NEGOTIATED;
3523 goto done;
3524 }
3525 }
3526
3527 gst_aml_video_decoder_prepare_finish_frame (decoder, frame, FALSE);
3528 priv->processed++;
3529
3530 if (priv->tags_changed) {
3531 GstEvent *tags_event;
3532
3533 tags_event = gst_aml_video_decoder_create_merged_tags_event (decoder);
3534
3535 if (tags_event != NULL)
3536 gst_aml_video_decoder_push_event (decoder, tags_event);
3537
3538 priv->tags_changed = FALSE;
3539 }
3540
3541 /* no buffer data means this frame is skipped */
3542 if (!frame->output_buffer || GST_AML_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame)) {
3543 GST_DEBUG_OBJECT (decoder,
3544 "skipping frame %" GST_TIME_FORMAT " because not output was produced",
3545 GST_TIME_ARGS (frame->pts));
3546 goto done;
3547 }
3548
3549 /* Mark output as corrupted if the subclass requested so and we're either
3550 * still before the sync point after the request, or we don't even know the
3551 * frame number of the sync point yet (it is 0) */
3552 GST_OBJECT_LOCK (decoder);
3553 if (frame->system_frame_number <= priv->request_sync_point_frame_number
3554 && priv->request_sync_point_frame_number != REQUEST_SYNC_POINT_UNSET) {
3555 if (priv->request_sync_point_flags &
3556 GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT) {
3557 GST_DEBUG_OBJECT (decoder,
3558 "marking frame %" GST_TIME_FORMAT
3559 " as corrupted because it is still before the sync point",
3560 GST_TIME_ARGS (frame->pts));
3561 GST_AML_VIDEO_CODEC_FRAME_FLAG_SET (frame,
3562 GST_AML_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3563 }
3564 } else {
3565 /* Reset to -1 to mark it as unset now that we've reached the frame */
3566 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
3567 }
3568 GST_OBJECT_UNLOCK (decoder);
3569
3570 if (priv->discard_corrupted_frames
3571 && (GST_AML_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3572 GST_AML_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)
3573 || GST_BUFFER_FLAG_IS_SET (frame->output_buffer,
3574 GST_BUFFER_FLAG_CORRUPTED))) {
3575 GST_DEBUG_OBJECT (decoder,
3576 "skipping frame %" GST_TIME_FORMAT " because it is corrupted",
3577 GST_TIME_ARGS (frame->pts));
3578 goto done;
3579 }
3580
3581 /* We need a writable buffer for the metadata changes below */
3582 output_buffer = frame->output_buffer =
3583 gst_buffer_make_writable (frame->output_buffer);
3584
3585 GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
3586
3587 GST_BUFFER_PTS (output_buffer) = frame->pts;
3588 GST_BUFFER_DTS (output_buffer) = GST_CLOCK_TIME_NONE;
3589 GST_BUFFER_DURATION (output_buffer) = frame->duration;
3590
3591 GST_BUFFER_OFFSET (output_buffer) = GST_BUFFER_OFFSET_NONE;
3592 GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
3593
3594 if (priv->discont) {
3595 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_DISCONT);
3596 }
3597
3598 if (GST_AML_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3599 GST_AML_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)) {
3600 GST_DEBUG_OBJECT (decoder,
3601 "marking frame %" GST_TIME_FORMAT " as corrupted",
3602 GST_TIME_ARGS (frame->pts));
3603 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_CORRUPTED);
3604 }
le.hanab1c6762024-09-04 08:47:19 +00003605 if (frame->input_buffer) {
3606 gst_aml_video_decoder_copy_metas (decoder, frame, frame->input_buffer,
3607 frame->output_buffer);
3608 }
le.han02c38f02024-08-16 02:35:36 +00003609
3610 /* Get an additional ref to the buffer, which is going to be pushed
3611 * downstream, the original ref is owned by the frame
3612 */
3613 output_buffer = gst_buffer_ref (output_buffer);
3614
3615 /* Release frame so the buffer is writable when we push it downstream
3616 * if possible, i.e. if the subclass does not hold additional references
3617 * to the frame
3618 */
3619 gst_aml_video_decoder_release_frame (decoder, frame);
3620 frame = NULL;
3621
3622 if (decoder->output_segment.rate < 0.0
3623 && !(decoder->output_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)) {
3624 GST_LOG_OBJECT (decoder, "queued frame");
3625 priv->output_queued = g_list_prepend (priv->output_queued, output_buffer);
3626 } else {
3627 ret = gst_aml_video_decoder_clip_and_push_buf (decoder, output_buffer);
3628 }
3629
3630done:
3631 if (frame)
3632 gst_aml_video_decoder_release_frame (decoder, frame);
3633 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3634 return ret;
3635}
3636
3637/**
3638 * gst_aml_video_decoder_finish_subframe:
3639 * @decoder: a #GstAmlVideoDecoder
3640 * @frame: (transfer full): the #GstAmlVideoCodecFrame
3641 *
3642 * Indicate that a subframe has been finished to be decoded
3643 * by the subclass. This method should be called for all subframes
3644 * except the last subframe where @gst_aml_video_decoder_finish_frame
3645 * should be called instead.
3646 *
3647 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3648 *
3649 * Since: 1.20
3650 */
3651GstFlowReturn
3652gst_aml_video_decoder_finish_subframe (GstAmlVideoDecoder * decoder,
3653 GstAmlVideoCodecFrame * frame)
3654{
3655 g_return_val_if_fail (gst_aml_video_decoder_get_subframe_mode (decoder),
3656 GST_FLOW_NOT_SUPPORTED);
3657
3658 GST_LOG_OBJECT (decoder, "finish subframe %p num=%d", frame->input_buffer,
3659 gst_aml_video_decoder_get_input_subframe_index (decoder, frame));
3660
3661 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3662 frame->abidata.ABI.subframes_processed++;
3663 gst_aml_video_codec_frame_unref (frame);
3664
3665 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3666
3667 return GST_FLOW_OK;
3668}
3669
3670/* With stream lock, takes the frame reference */
3671static GstFlowReturn
3672gst_aml_video_decoder_clip_and_push_buf (GstAmlVideoDecoder * decoder, GstBuffer * buf)
3673{
3674 GstFlowReturn ret = GST_FLOW_OK;
3675 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3676 guint64 start, stop;
3677 guint64 cstart, cstop;
3678 GstSegment *segment;
3679 GstClockTime duration;
3680
3681 /* Check for clipping */
3682 start = GST_BUFFER_PTS (buf);
3683 duration = GST_BUFFER_DURATION (buf);
3684
3685 /* store that we have valid decoded data */
3686 priv->had_output_data = TRUE;
3687
3688 stop = GST_CLOCK_TIME_NONE;
3689
3690 if (GST_CLOCK_TIME_IS_VALID (start) && GST_CLOCK_TIME_IS_VALID (duration)) {
3691 stop = start + duration;
3692 } else if (GST_CLOCK_TIME_IS_VALID (start)
3693 && !GST_CLOCK_TIME_IS_VALID (duration)) {
3694 /* If we don't clip away buffers that far before the segment we
3695 * can cause the pipeline to lockup. This can happen if audio is
3696 * properly clipped, and thus the audio sink does not preroll yet
3697 * but the video sink prerolls because we already outputted a
3698 * buffer here... and then queues run full.
3699 *
3700 * In the worst case we will clip one buffer too many here now if no
3701 * framerate is given, no buffer duration is given and the actual
3702 * framerate is lower than 25fps */
3703 stop = start + 40 * GST_MSECOND;
3704 }
3705
3706 segment = &decoder->output_segment;
3707 if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
le.hane46b90e2024-08-19 08:18:58 +00003708 if (!GST_CLOCK_TIME_IS_VALID (start)) {
3709 GST_BUFFER_PTS (buf) = cstart;
3710 }
le.han02c38f02024-08-16 02:35:36 +00003711
3712 if (stop != GST_CLOCK_TIME_NONE && GST_CLOCK_TIME_IS_VALID (duration))
3713 GST_BUFFER_DURATION (buf) = cstop - cstart;
3714
3715 GST_LOG_OBJECT (decoder,
3716 "accepting buffer inside segment: %" GST_TIME_FORMAT " %"
3717 GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3718 " time %" GST_TIME_FORMAT,
3719 GST_TIME_ARGS (cstart),
3720 GST_TIME_ARGS (cstop),
3721 GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop),
3722 GST_TIME_ARGS (segment->time));
3723 } else {
3724 GST_LOG_OBJECT (decoder,
3725 "dropping buffer outside segment: %" GST_TIME_FORMAT
3726 " %" GST_TIME_FORMAT
3727 " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3728 " time %" GST_TIME_FORMAT,
3729 GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
3730 GST_TIME_ARGS (segment->start),
3731 GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
3732 /* only check and return EOS if upstream still
3733 * in the same segment and interested as such */
3734 if (decoder->priv->in_out_segment_sync) {
3735 if (segment->rate >= 0) {
3736 if (GST_BUFFER_PTS (buf) >= segment->stop)
3737 ret = GST_FLOW_EOS;
3738 } else if (GST_BUFFER_PTS (buf) < segment->start) {
3739 ret = GST_FLOW_EOS;
3740 }
3741 }
3742
3743 const char *env = getenv("GST_AML_VIDEO_DECODER_DO_CLIP");
3744 if (env && 0 == atoi(env)) {
3745 GST_DEBUG_OBJECT (decoder, "not clip out of boud buf, send anyway");
3746 /* release STREAM_LOCK not to block upstream
3747 * while pushing buffer downstream */
3748 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3749 ret = gst_pad_push (decoder->srcpad, buf);
3750 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3751 }
3752 else
3753 {
3754 gst_buffer_unref (buf);
3755 }
3756 goto done;
3757 }
3758
3759 /* Is buffer too late (QoS) ? */
3760 if (priv->do_qos && GST_CLOCK_TIME_IS_VALID (priv->earliest_time)
3761 && GST_CLOCK_TIME_IS_VALID (cstart)) {
3762 GstClockTime deadline =
3763 gst_segment_to_running_time (segment, GST_FORMAT_TIME, cstart);
3764 if (GST_CLOCK_TIME_IS_VALID (deadline) && deadline < priv->earliest_time) {
3765 GST_WARNING_OBJECT (decoder,
3766 "Dropping frame due to QoS. start:%" GST_TIME_FORMAT " deadline:%"
3767 GST_TIME_FORMAT " earliest_time:%" GST_TIME_FORMAT,
3768 GST_TIME_ARGS (start), GST_TIME_ARGS (deadline),
3769 GST_TIME_ARGS (priv->earliest_time));
3770 gst_aml_video_decoder_post_qos_drop (decoder, cstart);
3771 gst_buffer_unref (buf);
3772 priv->discont = TRUE;
3773 goto done;
3774 }
3775 }
3776
3777 /* Set DISCONT flag here ! */
3778
3779 if (priv->discont) {
3780 GST_DEBUG_OBJECT (decoder, "Setting discont on output buffer");
3781 GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
3782 priv->discont = FALSE;
3783 }
3784
3785 /* update rate estimate */
3786 GST_OBJECT_LOCK (decoder);
3787 priv->bytes_out += gst_buffer_get_size (buf);
3788 if (GST_CLOCK_TIME_IS_VALID (duration)) {
3789 priv->time += duration;
3790 } else {
3791 /* FIXME : Use difference between current and previous outgoing
3792 * timestamp, and relate to difference between current and previous
3793 * bytes */
3794 /* better none than nothing valid */
3795 priv->time = GST_CLOCK_TIME_NONE;
3796 }
3797 GST_OBJECT_UNLOCK (decoder);
3798
3799 GST_DEBUG_OBJECT (decoder, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
3800 "PTS %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
3801 gst_buffer_get_size (buf),
3802 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
3803 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
3804
3805 /* we got data, so note things are looking up again, reduce
3806 * the error count, if there is one */
3807 if (G_UNLIKELY (priv->error_count))
3808 priv->error_count = 0;
3809
3810#ifndef GST_DISABLE_DEBUG
3811 if (G_UNLIKELY (priv->last_reset_time != GST_CLOCK_TIME_NONE)) {
3812 GstClockTime elapsed = gst_util_get_timestamp () - priv->last_reset_time;
3813
3814 /* First buffer since reset, report how long we took */
3815 GST_INFO_OBJECT (decoder, "First buffer since flush took %" GST_TIME_FORMAT
3816 " to produce", GST_TIME_ARGS (elapsed));
3817 priv->last_reset_time = GST_CLOCK_TIME_NONE;
3818 }
3819#endif
3820
3821 /* release STREAM_LOCK not to block upstream
3822 * while pushing buffer downstream */
3823 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3824 ret = gst_pad_push (decoder->srcpad, buf);
3825 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3826
3827done:
3828 return ret;
3829}
3830
3831/**
3832 * gst_aml_video_decoder_add_to_frame:
3833 * @decoder: a #GstAmlVideoDecoder
3834 * @n_bytes: the number of bytes to add
3835 *
3836 * Removes next @n_bytes of input data and adds it to currently parsed frame.
3837 */
3838void
3839gst_aml_video_decoder_add_to_frame (GstAmlVideoDecoder * decoder, int n_bytes)
3840{
3841 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3842 GstBuffer *buf;
3843
3844 GST_LOG_OBJECT (decoder, "add %d bytes to frame", n_bytes);
3845
3846 if (n_bytes == 0)
3847 return;
3848
3849 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3850 if (gst_adapter_available (priv->output_adapter) == 0) {
3851 priv->frame_offset =
3852 priv->input_offset - gst_adapter_available (priv->input_adapter);
3853 }
3854 buf = gst_adapter_take_buffer (priv->input_adapter, n_bytes);
3855
3856 gst_adapter_push (priv->output_adapter, buf);
3857 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3858}
3859
3860/**
3861 * gst_aml_video_decoder_get_pending_frame_size:
3862 * @decoder: a #GstAmlVideoDecoder
3863 *
3864 * Returns the number of bytes previously added to the current frame
3865 * by calling gst_aml_video_decoder_add_to_frame().
3866 *
3867 * Returns: The number of bytes pending for the current frame
3868 *
3869 * Since: 1.4
3870 */
3871gsize
3872gst_aml_video_decoder_get_pending_frame_size (GstAmlVideoDecoder * decoder)
3873{
3874 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3875 gsize ret;
3876
3877 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3878 ret = gst_adapter_available (priv->output_adapter);
3879 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3880
3881 GST_LOG_OBJECT (decoder, "Current pending frame has %" G_GSIZE_FORMAT "bytes",
3882 ret);
3883
3884 return ret;
3885}
3886
3887static guint64
3888gst_aml_video_decoder_get_frame_duration (GstAmlVideoDecoder * decoder,
3889 GstAmlVideoCodecFrame * frame)
3890{
3891 GstAmlVideoCodecState *state = decoder->priv->output_state;
3892
3893 /* it's possible that we don't have a state yet when we are dropping the
3894 * initial buffers */
3895 if (state == NULL)
3896 return GST_CLOCK_TIME_NONE;
3897
3898 if (state->info.fps_d == 0 || state->info.fps_n == 0) {
3899 return GST_CLOCK_TIME_NONE;
3900 }
3901
3902 /* FIXME: For interlaced frames this needs to take into account
3903 * the number of valid fields in the frame
3904 */
3905
3906 return gst_util_uint64_scale (GST_SECOND, state->info.fps_d,
3907 state->info.fps_n);
3908}
3909
3910/**
3911 * gst_aml_video_decoder_have_frame:
3912 * @decoder: a #GstAmlVideoDecoder
3913 *
3914 * Gathers all data collected for currently parsed frame, gathers corresponding
3915 * metadata and passes it along for further processing, i.e. @handle_frame.
3916 *
3917 * Returns: a #GstFlowReturn
3918 */
3919GstFlowReturn
3920gst_aml_video_decoder_have_frame (GstAmlVideoDecoder * decoder)
3921{
3922 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3923 GstBuffer *buffer;
3924 int n_available;
3925 GstClockTime pts, dts, duration;
3926 guint flags;
3927 GstFlowReturn ret = GST_FLOW_OK;
3928
3929 GST_LOG_OBJECT (decoder, "have_frame at offset %" G_GUINT64_FORMAT,
3930 priv->frame_offset);
3931
3932 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3933
3934 n_available = gst_adapter_available (priv->output_adapter);
3935 if (n_available) {
3936 buffer = gst_adapter_take_buffer (priv->output_adapter, n_available);
3937 } else {
3938 buffer = gst_buffer_new_and_alloc (0);
3939 }
3940
3941 if (priv->current_frame->input_buffer) {
3942 gst_aml_video_decoder_copy_metas (decoder, priv->current_frame,
3943 priv->current_frame->input_buffer, buffer);
3944 gst_buffer_unref (priv->current_frame->input_buffer);
3945 }
3946 priv->current_frame->input_buffer = buffer;
3947
3948 gst_aml_video_decoder_get_buffer_info_at_offset (decoder,
3949 priv->frame_offset, &pts, &dts, &duration, &flags);
3950
3951 GST_BUFFER_PTS (buffer) = pts;
3952 GST_BUFFER_DTS (buffer) = dts;
3953 GST_BUFFER_DURATION (buffer) = duration;
3954 GST_BUFFER_FLAGS (buffer) = flags;
3955
3956 GST_LOG_OBJECT (decoder, "collected frame size %d, "
3957 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
3958 GST_TIME_FORMAT, n_available, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts),
3959 GST_TIME_ARGS (duration));
3960
3961 if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
3962 GST_DEBUG_OBJECT (decoder, "Marking as sync point");
3963 GST_AML_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
3964 }
3965
3966 if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_CORRUPTED)) {
3967 GST_DEBUG_OBJECT (decoder, "Marking as corrupted");
3968 GST_AML_VIDEO_CODEC_FRAME_FLAG_SET (priv->current_frame,
3969 GST_AML_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3970 }
3971
3972 /* In reverse playback, just capture and queue frames for later processing */
3973 if (decoder->input_segment.rate < 0.0) {
3974 priv->parse_gather =
3975 g_list_prepend (priv->parse_gather, priv->current_frame);
3976 priv->current_frame = NULL;
3977 } else {
3978 GstAmlVideoCodecFrame *frame = priv->current_frame;
3979 frame->abidata.ABI.num_subframes++;
3980 /* In subframe mode, we keep a ref for ourselves
3981 * as this frame will be kept during the data collection
3982 * in parsed mode. The frame reference will be released by
3983 * finish_(sub)frame or drop_(sub)frame.*/
3984 if (gst_aml_video_decoder_get_subframe_mode (decoder))
3985 gst_aml_video_codec_frame_ref (priv->current_frame);
3986 else
3987 priv->current_frame = NULL;
3988
3989 /* Decode the frame, which gives away our ref */
3990 ret = gst_aml_video_decoder_decode_frame (decoder, frame);
3991 }
3992
3993 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3994
3995 return ret;
3996}
3997
3998/* Pass the frame in priv->current_frame through the
3999 * handle_frame() callback for decoding and passing to gvd_finish_frame(),
4000 * or dropping by passing to gvd_drop_frame() */
4001static GstFlowReturn
4002gst_aml_video_decoder_decode_frame (GstAmlVideoDecoder * decoder,
4003 GstAmlVideoCodecFrame * frame)
4004{
4005 GstAmlVideoDecoderPrivate *priv = decoder->priv;
4006 GstAmlVideoDecoderClass *decoder_class;
4007 GstFlowReturn ret = GST_FLOW_OK;
4008
4009 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
4010
4011 /* FIXME : This should only have to be checked once (either the subclass has an
4012 * implementation, or it doesn't) */
4013 g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
4014 g_return_val_if_fail (frame != NULL, GST_FLOW_ERROR);
4015
4016 frame->pts = GST_BUFFER_PTS (frame->input_buffer);
4017 frame->dts = GST_BUFFER_DTS (frame->input_buffer);
4018 frame->duration = GST_BUFFER_DURATION (frame->input_buffer);
4019 frame->deadline =
4020 gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
4021 frame->pts);
4022
4023 /* For keyframes, PTS = DTS + constant_offset, usually 0 to 3 frame
4024 * durations. */
4025 /* FIXME upstream can be quite wrong about the keyframe aspect,
4026 * so we could be going off here as well,
4027 * maybe let subclass decide if it really is/was a keyframe */
4028 if (GST_AML_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
4029 priv->distance_from_sync = 0;
4030
4031 GST_OBJECT_LOCK (decoder);
4032 priv->request_sync_point_flags &=
4033 ~GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT;
4034 if (priv->request_sync_point_frame_number == REQUEST_SYNC_POINT_PENDING)
4035 priv->request_sync_point_frame_number = frame->system_frame_number;
4036 GST_OBJECT_UNLOCK (decoder);
4037
4038 if (GST_CLOCK_TIME_IS_VALID (frame->pts)
4039 && GST_CLOCK_TIME_IS_VALID (frame->dts)) {
4040 /* just in case they are not equal as might ideally be,
4041 * e.g. quicktime has a (positive) delta approach */
4042 priv->pts_delta = frame->pts - frame->dts;
4043 GST_DEBUG_OBJECT (decoder, "PTS delta %d ms",
4044 (gint) (priv->pts_delta / GST_MSECOND));
4045 }
4046 } else {
4047 if (priv->distance_from_sync == -1 && priv->automatic_request_sync_points) {
4048 GST_DEBUG_OBJECT (decoder,
4049 "Didn't receive a keyframe yet, requesting sync point");
4050 gst_aml_video_decoder_request_sync_point (decoder, frame,
4051 priv->automatic_request_sync_point_flags);
4052 }
4053
4054 GST_OBJECT_LOCK (decoder);
4055 if ((priv->needs_sync_point && priv->distance_from_sync == -1)
4056 || (priv->request_sync_point_flags &
4057 GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT)) {
4058 GST_WARNING_OBJECT (decoder,
4059 "Subclass requires a sync point but we didn't receive one yet, discarding input");
4060 GST_OBJECT_UNLOCK (decoder);
4061 if (priv->automatic_request_sync_points) {
4062 gst_aml_video_decoder_request_sync_point (decoder, frame,
4063 priv->automatic_request_sync_point_flags);
4064 }
4065 gst_aml_video_decoder_release_frame (decoder, frame);
4066 return GST_FLOW_OK;
4067 }
4068 GST_OBJECT_UNLOCK (decoder);
4069
4070 priv->distance_from_sync++;
4071 }
4072
4073 frame->distance_from_sync = priv->distance_from_sync;
4074
4075 if (frame->abidata.ABI.num_subframes == 1) {
4076 frame->abidata.ABI.ts = frame->dts;
4077 frame->abidata.ABI.ts2 = frame->pts;
4078 }
4079
4080 GST_LOG_OBJECT (decoder,
4081 "frame %p PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dist %d",
4082 frame, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
4083 frame->distance_from_sync);
4084 /* FIXME: suboptimal way to add a unique frame to the list, in case of subframe mode. */
4085 if (!g_queue_find (&priv->frames, frame)) {
4086 g_queue_push_tail (&priv->frames, gst_aml_video_codec_frame_ref (frame));
4087 } else {
4088 GST_LOG_OBJECT (decoder,
4089 "Do not add an existing frame used to decode subframes");
4090 }
4091
4092 if (priv->frames.length > 10) {
4093 GST_DEBUG_OBJECT (decoder, "decoder frame list getting long: %d frames,"
4094 "possible internal leaking?", priv->frames.length);
4095 }
4096
4097 /* do something with frame */
4098 ret = decoder_class->handle_frame (decoder, frame);
4099 if (ret != GST_FLOW_OK)
4100 GST_DEBUG_OBJECT (decoder, "flow error %s", gst_flow_get_name (ret));
4101
4102 /* the frame has either been added to parse_gather or sent to
4103 handle frame so there is no need to unref it */
4104 return ret;
4105}
4106
4107
4108/**
4109 * gst_aml_video_decoder_get_output_state:
4110 * @decoder: a #GstAmlVideoDecoder
4111 *
4112 * Get the #GstAmlVideoCodecState currently describing the output stream.
4113 *
4114 * Returns: (transfer full): #GstAmlVideoCodecState describing format of video data.
4115 */
4116GstAmlVideoCodecState *
4117gst_aml_video_decoder_get_output_state (GstAmlVideoDecoder * decoder)
4118{
4119 GstAmlVideoCodecState *state = NULL;
4120
4121 GST_OBJECT_LOCK (decoder);
4122 if (decoder->priv->output_state)
4123 state = gst_aml_video_codec_state_ref (decoder->priv->output_state);
4124 GST_OBJECT_UNLOCK (decoder);
4125
4126 return state;
4127}
4128
4129static GstAmlVideoCodecState *
4130_set_interlaced_output_state (GstAmlVideoDecoder * decoder,
4131 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
4132 guint height, GstAmlVideoCodecState * reference, gboolean copy_interlace_mode)
4133{
4134 GstAmlVideoDecoderPrivate *priv = decoder->priv;
4135 GstAmlVideoCodecState *state;
4136
4137 g_assert ((copy_interlace_mode
4138 && interlace_mode == GST_VIDEO_INTERLACE_MODE_PROGRESSIVE)
4139 || !copy_interlace_mode);
4140
4141 GST_DEBUG_OBJECT (decoder,
4142 "fmt:%d, width:%d, height:%d, interlace-mode: %s, reference:%p", fmt,
4143 width, height, gst_video_interlace_mode_to_string (interlace_mode),
4144 reference);
4145
4146 /* Create the new output state */
4147 state =
4148 _new_output_state (fmt, interlace_mode, width, height, reference,
4149 copy_interlace_mode);
4150 if (!state)
4151 return NULL;
4152
4153 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4154
4155 GST_OBJECT_LOCK (decoder);
4156 /* Replace existing output state by new one */
4157 if (priv->output_state)
4158 gst_aml_video_codec_state_unref (priv->output_state);
4159 priv->output_state = gst_aml_video_codec_state_ref (state);
4160
4161 if (priv->output_state != NULL && priv->output_state->info.fps_n > 0) {
4162 priv->qos_frame_duration =
4163 gst_util_uint64_scale (GST_SECOND, priv->output_state->info.fps_d,
4164 priv->output_state->info.fps_n);
4165 } else {
4166 priv->qos_frame_duration = 0;
4167 }
4168 priv->output_state_changed = TRUE;
4169 GST_OBJECT_UNLOCK (decoder);
4170
4171 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4172
4173 return state;
4174}
4175
4176/**
4177 * gst_aml_video_decoder_set_output_state:
4178 * @decoder: a #GstAmlVideoDecoder
4179 * @fmt: a #GstVideoFormat
4180 * @width: The width in pixels
4181 * @height: The height in pixels
4182 * @reference: (allow-none) (transfer none): An optional reference #GstAmlVideoCodecState
4183 *
4184 * Creates a new #GstAmlVideoCodecState with the specified @fmt, @width and @height
4185 * as the output state for the decoder.
4186 * Any previously set output state on @decoder will be replaced by the newly
4187 * created one.
4188 *
4189 * If the subclass wishes to copy over existing fields (like pixel aspec ratio,
4190 * or framerate) from an existing #GstAmlVideoCodecState, it can be provided as a
4191 * @reference.
4192 *
4193 * If the subclass wishes to override some fields from the output state (like
4194 * pixel-aspect-ratio or framerate) it can do so on the returned #GstAmlVideoCodecState.
4195 *
4196 * The new output state will only take effect (set on pads and buffers) starting
4197 * from the next call to #gst_aml_video_decoder_finish_frame().
4198 *
4199 * Returns: (transfer full): the newly configured output state.
4200 */
4201GstAmlVideoCodecState *
4202gst_aml_video_decoder_set_output_state (GstAmlVideoDecoder * decoder,
4203 GstVideoFormat fmt, guint width, guint height,
4204 GstAmlVideoCodecState * reference)
4205{
4206 return _set_interlaced_output_state (decoder, fmt,
4207 GST_VIDEO_INTERLACE_MODE_PROGRESSIVE, width, height, reference, TRUE);
4208}
4209
4210/**
4211 * gst_aml_video_decoder_set_interlaced_output_state:
4212 * @decoder: a #GstAmlVideoDecoder
4213 * @fmt: a #GstVideoFormat
4214 * @width: The width in pixels
4215 * @height: The height in pixels
4216 * @interlace_mode: A #GstVideoInterlaceMode
4217 * @reference: (allow-none) (transfer none): An optional reference #GstAmlVideoCodecState
4218 *
4219 * Same as #gst_aml_video_decoder_set_output_state() but also allows you to also set
4220 * the interlacing mode.
4221 *
4222 * Returns: (transfer full): the newly configured output state.
4223 *
4224 * Since: 1.16.
4225 */
4226GstAmlVideoCodecState *
4227gst_aml_video_decoder_set_interlaced_output_state (GstAmlVideoDecoder * decoder,
4228 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
4229 guint height, GstAmlVideoCodecState * reference)
4230{
4231 return _set_interlaced_output_state (decoder, fmt, interlace_mode, width,
4232 height, reference, FALSE);
4233}
4234
4235
4236/**
4237 * gst_aml_video_decoder_get_oldest_frame:
4238 * @decoder: a #GstAmlVideoDecoder
4239 *
4240 * Get the oldest pending unfinished #GstAmlVideoCodecFrame
4241 *
4242 * Returns: (transfer full): oldest pending unfinished #GstAmlVideoCodecFrame.
4243 */
4244GstAmlVideoCodecFrame *
4245gst_aml_video_decoder_get_oldest_frame (GstAmlVideoDecoder * decoder)
4246{
4247 GstAmlVideoCodecFrame *frame = NULL;
4248
4249 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4250 if (decoder->priv->frames.head)
4251 frame = gst_aml_video_codec_frame_ref (decoder->priv->frames.head->data);
4252 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4253
4254 return (GstAmlVideoCodecFrame *) frame;
4255}
4256
4257/**
4258 * gst_aml_video_decoder_get_frame:
4259 * @decoder: a #GstAmlVideoDecoder
4260 * @frame_number: system_frame_number of a frame
4261 *
4262 * Get a pending unfinished #GstAmlVideoCodecFrame
4263 *
4264 * Returns: (transfer full): pending unfinished #GstAmlVideoCodecFrame identified by @frame_number.
4265 */
4266GstAmlVideoCodecFrame *
4267gst_aml_video_decoder_get_frame (GstAmlVideoDecoder * decoder, int frame_number)
4268{
4269 GList *g;
4270 GstAmlVideoCodecFrame *frame = NULL;
4271
4272 GST_DEBUG_OBJECT (decoder, "frame_number : %d", frame_number);
4273
4274 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4275 for (g = decoder->priv->frames.head; g; g = g->next) {
4276 GstAmlVideoCodecFrame *tmp = g->data;
4277
4278 if (tmp->system_frame_number == frame_number) {
4279 frame = gst_aml_video_codec_frame_ref (tmp);
4280 break;
4281 }
4282 }
4283 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4284
4285 return frame;
4286}
4287
le.han5ab880d2024-09-03 08:55:11 +00004288GstAmlVideoCodecFrame *
4289gst_aml_video_decoder_v4l2_new_frame (GstAmlVideoDecoder * decoder)
4290{
4291 GstAmlVideoCodecFrame *frame = NULL;
4292 frame = gst_aml_video_decoder_new_frame (decoder);
4293 g_queue_push_tail (&decoder->priv->frames, gst_aml_video_codec_frame_ref (frame));
4294
4295 return frame;
4296}
4297
le.han02c38f02024-08-16 02:35:36 +00004298/**
4299 * gst_aml_video_decoder_get_frames:
4300 * @decoder: a #GstAmlVideoDecoder
4301 *
4302 * Get all pending unfinished #GstAmlVideoCodecFrame
4303 *
4304 * Returns: (transfer full) (element-type GstAmlVideoCodecFrame): pending unfinished #GstAmlVideoCodecFrame.
4305 */
4306GList *
4307gst_aml_video_decoder_get_frames (GstAmlVideoDecoder * decoder)
4308{
4309 GList *frames;
4310
4311 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4312 frames =
4313 g_list_copy_deep (decoder->priv->frames.head,
4314 (GCopyFunc) gst_aml_video_codec_frame_ref, NULL);
4315 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4316
4317 return frames;
4318}
4319
4320static gboolean
4321gst_aml_video_decoder_decide_allocation_default (GstAmlVideoDecoder * decoder,
4322 GstQuery * query)
4323{
4324 GstCaps *outcaps = NULL;
4325 GstBufferPool *pool = NULL;
4326 guint size, min, max;
4327 GstAllocator *allocator = NULL;
4328 GstAllocationParams params;
4329 GstStructure *config;
4330 gboolean update_pool, update_allocator;
4331 GstVideoInfo vinfo;
4332
4333 gst_query_parse_allocation (query, &outcaps, NULL);
4334 gst_video_info_init (&vinfo);
4335 if (outcaps)
4336 gst_video_info_from_caps (&vinfo, outcaps);
4337
4338 /* we got configuration from our peer or the decide_allocation method,
4339 * parse them */
4340 if (gst_query_get_n_allocation_params (query) > 0) {
4341 /* try the allocator */
4342 gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
4343 update_allocator = TRUE;
4344 } else {
4345 allocator = NULL;
4346 gst_allocation_params_init (&params);
4347 update_allocator = FALSE;
4348 }
4349
4350 if (gst_query_get_n_allocation_pools (query) > 0) {
4351 gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
4352 size = MAX (size, vinfo.size);
4353 update_pool = TRUE;
4354 } else {
4355 pool = NULL;
4356 size = vinfo.size;
4357 min = max = 0;
4358
4359 update_pool = FALSE;
4360 }
4361
4362 if (pool == NULL) {
4363 /* no pool, we can make our own */
4364 GST_DEBUG_OBJECT (decoder, "no pool, making new pool");
4365 pool = gst_video_buffer_pool_new ();
4366 }
4367
4368 /* now configure */
4369 config = gst_buffer_pool_get_config (pool);
4370 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
4371 gst_buffer_pool_config_set_allocator (config, allocator, &params);
4372
4373 GST_DEBUG_OBJECT (decoder,
4374 "setting config %" GST_PTR_FORMAT " in pool %" GST_PTR_FORMAT, config,
4375 pool);
4376 if (!gst_buffer_pool_set_config (pool, config)) {
4377 config = gst_buffer_pool_get_config (pool);
4378
4379 /* If change are not acceptable, fallback to generic pool */
4380 if (!gst_buffer_pool_config_validate_params (config, outcaps, size, min,
4381 max)) {
4382 GST_DEBUG_OBJECT (decoder, "unsupported pool, making new pool");
4383
4384 gst_object_unref (pool);
4385 pool = gst_video_buffer_pool_new ();
4386 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
4387 gst_buffer_pool_config_set_allocator (config, allocator, &params);
4388 }
4389
4390 if (!gst_buffer_pool_set_config (pool, config))
4391 goto config_failed;
4392 }
4393
4394 if (update_allocator)
4395 gst_query_set_nth_allocation_param (query, 0, allocator, &params);
4396 else
4397 gst_query_add_allocation_param (query, allocator, &params);
4398 if (allocator)
4399 gst_object_unref (allocator);
4400
4401 if (update_pool)
4402 gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
4403 else
4404 gst_query_add_allocation_pool (query, pool, size, min, max);
4405
4406 if (pool)
4407 gst_object_unref (pool);
4408
4409 return TRUE;
4410
4411config_failed:
4412 if (allocator)
4413 gst_object_unref (allocator);
4414 if (pool)
4415 gst_object_unref (pool);
4416 GST_ELEMENT_ERROR (decoder, RESOURCE, SETTINGS,
4417 ("Failed to configure the buffer pool"),
4418 ("Configuration is most likely invalid, please report this issue."));
4419 return FALSE;
4420}
4421
4422static gboolean
4423gst_aml_video_decoder_propose_allocation_default (GstAmlVideoDecoder * decoder,
4424 GstQuery * query)
4425{
4426 return TRUE;
4427}
4428
4429static gboolean
4430gst_aml_video_decoder_negotiate_pool (GstAmlVideoDecoder * decoder, GstCaps * caps)
4431{
4432 GstAmlVideoDecoderClass *klass;
4433 GstQuery *query = NULL;
4434 GstBufferPool *pool = NULL;
4435 GstAllocator *allocator;
4436 GstAllocationParams params;
4437 gboolean ret = TRUE;
4438
4439 klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
4440
xuesong.jiang0223de52024-09-13 15:33:10 +08004441
le.han02c38f02024-08-16 02:35:36 +00004442 query = gst_query_new_allocation (caps, TRUE);
4443
xuesong.jiang0223de52024-09-13 15:33:10 +08004444 if (decoder->priv->local_buf_pool)
4445 {
4446 GST_DEBUG_OBJECT (decoder, "mock query ALLOCATION");
le.han02c38f02024-08-16 02:35:36 +00004447
xuesong.jiang0223de52024-09-13 15:33:10 +08004448 GstBufferPool *drm_pool = NULL;
4449 GstVideoInfo video_info;
4450
4451 //TODO:need parse is secure scene
4452
4453 memset(&video_info, 0, sizeof(video_info));
4454 if (G_UNLIKELY (!gst_video_info_from_caps(&video_info, caps)))
4455 {
4456 GST_ERROR_OBJECT(decoder, "can't get video info from caps");
4457 ret = FALSE;
4458 goto no_decide_allocation;
4459 }
4460
4461 drm_pool = gst_drm_bufferpool_new(decoder->svp, GST_DRM_BUFFERPOOL_TYPE_VIDEO_PLANE);
4462 if (G_UNLIKELY (!drm_pool))
4463 {
4464 GST_ERROR_OBJECT(decoder, "can't alloc drmbufferpool");
4465 ret = FALSE;
4466 goto no_decide_allocation;
4467 }
4468
4469 gst_query_add_allocation_pool(query, drm_pool, video_info.size, 1, 1);
4470 g_object_unref(drm_pool);
4471
4472 gst_query_add_allocation_meta(query, GST_VIDEO_META_API_TYPE, NULL);
4473 }
4474 else
4475 {
4476 GST_DEBUG_OBJECT (decoder, "do query ALLOCATION");
4477
4478 if (!gst_pad_peer_query (decoder->srcpad, query))
4479 {
4480 GST_DEBUG_OBJECT (decoder, "didn't get downstream ALLOCATION hints");
4481 }
le.han02c38f02024-08-16 02:35:36 +00004482 }
4483
4484 g_assert (klass->decide_allocation != NULL);
4485 ret = klass->decide_allocation (decoder, query);
le.han02c38f02024-08-16 02:35:36 +00004486 if (!ret)
xuesong.jiang0223de52024-09-13 15:33:10 +08004487 goto no_decide_allocation;
le.han02c38f02024-08-16 02:35:36 +00004488
xuesong.jiang0223de52024-09-13 15:33:10 +08004489 GST_DEBUG_OBJECT (decoder, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, ret, query);
4490
4491 /* we got configuration from our peer or the decide_allocation method, parse them */
4492 if (gst_query_get_n_allocation_params (query) > 0)
4493 {
4494 gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
le.han02c38f02024-08-16 02:35:36 +00004495 }
xuesong.jiang0223de52024-09-13 15:33:10 +08004496 else
4497 {
4498 allocator = NULL;
4499 gst_allocation_params_init (&params);
4500 }
le.han02c38f02024-08-16 02:35:36 +00004501 if (gst_query_get_n_allocation_pools (query) > 0)
xuesong.jiang0223de52024-09-13 15:33:10 +08004502 gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL);
4503
le.han02c38f02024-08-16 02:35:36 +00004504 if (!pool) {
4505 if (allocator)
4506 gst_object_unref (allocator);
4507 ret = FALSE;
4508 goto no_decide_allocation;
4509 }
4510
4511 if (decoder->priv->allocator)
4512 gst_object_unref (decoder->priv->allocator);
4513 decoder->priv->allocator = allocator;
4514 decoder->priv->params = params;
4515
4516 if (decoder->priv->pool) {
4517 /* do not set the bufferpool to inactive here, it will be done
4518 * on its finalize function. As videodecoder do late renegotiation
4519 * it might happen that some element downstream is already using this
4520 * same bufferpool and deactivating it will make it fail.
4521 * Happens when a downstream element changes from passthrough to
4522 * non-passthrough and gets this same bufferpool to use */
4523 GST_DEBUG_OBJECT (decoder, "unref pool %" GST_PTR_FORMAT,
4524 decoder->priv->pool);
4525 gst_object_unref (decoder->priv->pool);
4526 }
4527 decoder->priv->pool = pool;
4528
4529 /* and activate */
4530 GST_DEBUG_OBJECT (decoder, "activate pool %" GST_PTR_FORMAT, pool);
4531 gst_buffer_pool_set_active (pool, TRUE);
4532
4533done:
4534 if (query)
4535 gst_query_unref (query);
4536
4537 return ret;
4538
4539 /* Errors */
4540no_decide_allocation:
4541 {
4542 GST_WARNING_OBJECT (decoder, "Subclass failed to decide allocation");
4543 goto done;
4544 }
4545}
4546
4547static gboolean
4548gst_aml_video_decoder_negotiate_default (GstAmlVideoDecoder * decoder)
4549{
4550 GstAmlVideoCodecState *state = decoder->priv->output_state;
4551 gboolean ret = TRUE;
4552 GstAmlVideoCodecFrame *frame;
4553 GstCaps *prevcaps;
4554 GstCaps *incaps;
4555
4556 if (!state) {
4557 GST_DEBUG_OBJECT (decoder,
4558 "Trying to negotiate the pool with out setting the o/p format");
4559 ret = gst_aml_video_decoder_negotiate_pool (decoder, NULL);
4560 goto done;
4561 }
4562
4563 g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (&state->info) != 0, FALSE);
4564 g_return_val_if_fail (GST_VIDEO_INFO_HEIGHT (&state->info) != 0, FALSE);
4565
4566 /* If the base class didn't set any multiview params, assume mono
4567 * now */
4568 if (GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) ==
4569 GST_VIDEO_MULTIVIEW_MODE_NONE) {
4570 GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) =
4571 GST_VIDEO_MULTIVIEW_MODE_MONO;
4572 GST_VIDEO_INFO_MULTIVIEW_FLAGS (&state->info) =
4573 GST_VIDEO_MULTIVIEW_FLAGS_NONE;
4574 }
4575
4576 GST_DEBUG_OBJECT (decoder, "output_state par %d/%d fps %d/%d",
4577 state->info.par_n, state->info.par_d,
4578 state->info.fps_n, state->info.fps_d);
4579
4580 if (state->caps == NULL)
4581 state->caps = gst_video_info_to_caps (&state->info);
4582
4583 incaps = gst_pad_get_current_caps (GST_AML_VIDEO_DECODER_SINK_PAD (decoder));
4584 if (incaps) {
4585 GstStructure *in_struct;
4586
4587 in_struct = gst_caps_get_structure (incaps, 0);
4588 if (gst_structure_has_field (in_struct, "mastering-display-info") ||
4589 gst_structure_has_field (in_struct, "content-light-level")) {
4590 const gchar *s;
4591
4592 /* prefer upstream information */
4593 state->caps = gst_caps_make_writable (state->caps);
4594 if ((s = gst_structure_get_string (in_struct, "mastering-display-info"))) {
4595 gst_caps_set_simple (state->caps,
4596 "mastering-display-info", G_TYPE_STRING, s, NULL);
4597 }
4598
4599 if ((s = gst_structure_get_string (in_struct, "content-light-level"))) {
4600 gst_caps_set_simple (state->caps,
4601 "content-light-level", G_TYPE_STRING, s, NULL);
4602 }
4603 }
4604
4605 gst_caps_unref (incaps);
4606 }
4607
4608 if (state->allocation_caps == NULL)
4609 state->allocation_caps = gst_caps_ref (state->caps);
4610
4611 GST_DEBUG_OBJECT (decoder, "setting caps %" GST_PTR_FORMAT, state->caps);
4612
4613 /* Push all pending pre-caps events of the oldest frame before
4614 * setting caps */
4615 frame = decoder->priv->frames.head ? decoder->priv->frames.head->data : NULL;
4616 if (frame || decoder->priv->current_frame_events) {
4617 GList **events, *l;
4618
4619 if (frame) {
4620 events = &frame->events;
4621 } else {
4622 events = &decoder->priv->current_frame_events;
4623 }
4624
4625 for (l = g_list_last (*events); l;) {
4626 GstEvent *event = GST_EVENT (l->data);
4627 GList *tmp;
4628
4629 if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
4630 gst_aml_video_decoder_push_event (decoder, event);
4631 tmp = l;
4632 l = l->prev;
4633 *events = g_list_delete_link (*events, tmp);
4634 } else {
4635 l = l->prev;
4636 }
4637 }
4638 }
4639
4640 prevcaps = gst_pad_get_current_caps (decoder->srcpad);
4641 if (!prevcaps || !gst_caps_is_equal (prevcaps, state->caps)) {
4642 if (!prevcaps) {
4643 GST_DEBUG_OBJECT (decoder, "decoder src pad has currently NULL caps");
4644 }
4645 ret = gst_pad_set_caps (decoder->srcpad, state->caps);
4646 } else {
4647 ret = TRUE;
4648 GST_DEBUG_OBJECT (decoder,
4649 "current src pad and output state caps are the same");
4650 }
4651 if (prevcaps)
4652 gst_caps_unref (prevcaps);
4653
4654 if (!ret)
4655 goto done;
4656 decoder->priv->output_state_changed = FALSE;
4657 /* Negotiate pool */
4658 ret = gst_aml_video_decoder_negotiate_pool (decoder, state->allocation_caps);
4659
4660done:
4661 return ret;
4662}
4663
4664static gboolean
4665gst_aml_video_decoder_negotiate_unlocked (GstAmlVideoDecoder * decoder)
4666{
4667 GstAmlVideoDecoderClass *klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
4668 gboolean ret = TRUE;
4669
4670 if (G_LIKELY (klass->negotiate))
4671 ret = klass->negotiate (decoder);
4672
4673 return ret;
4674}
4675
4676/**
4677 * gst_aml_video_decoder_negotiate:
4678 * @decoder: a #GstAmlVideoDecoder
4679 *
4680 * Negotiate with downstream elements to currently configured #GstAmlVideoCodecState.
4681 * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
4682 * negotiate fails.
4683 *
4684 * Returns: %TRUE if the negotiation succeeded, else %FALSE.
4685 */
4686gboolean
4687gst_aml_video_decoder_negotiate (GstAmlVideoDecoder * decoder)
4688{
4689 GstAmlVideoDecoderClass *klass;
4690 gboolean ret = TRUE;
4691
4692 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (decoder), FALSE);
4693
4694 klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
4695
4696 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4697 gst_pad_check_reconfigure (decoder->srcpad);
4698 if (klass->negotiate) {
4699 ret = klass->negotiate (decoder);
4700 if (!ret)
4701 gst_pad_mark_reconfigure (decoder->srcpad);
4702 }
4703 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4704
4705 return ret;
4706}
4707
4708/**
4709 * gst_aml_video_decoder_allocate_output_buffer:
4710 * @decoder: a #GstAmlVideoDecoder
4711 *
4712 * Helper function that allocates a buffer to hold a video frame for @decoder's
4713 * current #GstAmlVideoCodecState.
4714 *
4715 * You should use gst_aml_video_decoder_allocate_output_frame() instead of this
4716 * function, if possible at all.
4717 *
4718 * Returns: (transfer full): allocated buffer, or NULL if no buffer could be
4719 * allocated (e.g. when downstream is flushing or shutting down)
4720 */
4721GstBuffer *
4722gst_aml_video_decoder_allocate_output_buffer (GstAmlVideoDecoder * decoder)
4723{
4724 GstFlowReturn flow;
4725 GstBuffer *buffer = NULL;
4726 gboolean needs_reconfigure = FALSE;
4727
4728 GST_DEBUG ("alloc src buffer");
4729
4730 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4731 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4732 if (G_UNLIKELY (!decoder->priv->output_state
4733 || decoder->priv->output_state_changed || needs_reconfigure)) {
4734 if (!gst_aml_video_decoder_negotiate_unlocked (decoder)) {
4735 if (decoder->priv->output_state) {
4736 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
4737 gst_pad_mark_reconfigure (decoder->srcpad);
4738 goto fallback;
4739 } else {
4740 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, output_buffer=NULL");
4741 goto failed_allocation;
4742 }
4743 }
4744 }
4745
4746 flow = gst_buffer_pool_acquire_buffer (decoder->priv->pool, &buffer, NULL);
4747
4748 if (flow != GST_FLOW_OK) {
4749 GST_INFO_OBJECT (decoder, "couldn't allocate output buffer, flow %s",
4750 gst_flow_get_name (flow));
4751 if (decoder->priv->output_state && decoder->priv->output_state->info.size)
4752 goto fallback;
4753 else
4754 goto failed_allocation;
4755 }
4756 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4757
4758 return buffer;
4759
4760fallback:
4761 GST_INFO_OBJECT (decoder,
4762 "Fallback allocation, creating new buffer which doesn't belongs to any buffer pool");
4763 buffer =
4764 gst_buffer_new_allocate (NULL, decoder->priv->output_state->info.size,
4765 NULL);
4766
4767failed_allocation:
4768 GST_ERROR_OBJECT (decoder, "Failed to allocate the buffer..");
4769 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4770
4771 return buffer;
4772}
4773
4774/**
4775 * gst_aml_video_decoder_allocate_output_frame:
4776 * @decoder: a #GstAmlVideoDecoder
4777 * @frame: a #GstAmlVideoCodecFrame
4778 *
4779 * Helper function that allocates a buffer to hold a video frame for @decoder's
4780 * current #GstAmlVideoCodecState. Subclass should already have configured video
4781 * state and set src pad caps.
4782 *
4783 * The buffer allocated here is owned by the frame and you should only
4784 * keep references to the frame, not the buffer.
4785 *
4786 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4787 */
4788GstFlowReturn
4789gst_aml_video_decoder_allocate_output_frame (GstAmlVideoDecoder *
4790 decoder, GstAmlVideoCodecFrame * frame)
4791{
4792 return gst_aml_video_decoder_allocate_output_frame_with_params (decoder, frame,
4793 NULL);
4794}
4795
4796/**
4797 * gst_aml_video_decoder_allocate_output_frame_with_params:
4798 * @decoder: a #GstAmlVideoDecoder
4799 * @frame: a #GstAmlVideoCodecFrame
4800 * @params: a #GstBufferPoolAcquireParams
4801 *
4802 * Same as #gst_aml_video_decoder_allocate_output_frame except it allows passing
4803 * #GstBufferPoolAcquireParams to the sub call gst_buffer_pool_acquire_buffer.
4804 *
4805 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4806 *
4807 * Since: 1.12
4808 */
4809GstFlowReturn
4810gst_aml_video_decoder_allocate_output_frame_with_params (GstAmlVideoDecoder *
4811 decoder, GstAmlVideoCodecFrame * frame, GstBufferPoolAcquireParams * params)
4812{
4813 GstFlowReturn flow_ret;
4814 GstAmlVideoCodecState *state;
4815 int num_bytes;
4816 gboolean needs_reconfigure = FALSE;
4817
4818 g_return_val_if_fail (decoder->priv->output_state, GST_FLOW_NOT_NEGOTIATED);
4819 g_return_val_if_fail (frame->output_buffer == NULL, GST_FLOW_ERROR);
4820
4821 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4822
4823 state = decoder->priv->output_state;
4824 if (state == NULL) {
4825 g_warning ("Output state should be set before allocating frame");
4826 goto error;
4827 }
4828 num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
4829 if (num_bytes == 0) {
4830 g_warning ("Frame size should not be 0");
4831 goto error;
4832 }
4833
4834 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4835 if (G_UNLIKELY (decoder->priv->output_state_changed || needs_reconfigure)) {
4836 if (!gst_aml_video_decoder_negotiate_unlocked (decoder)) {
4837 gst_pad_mark_reconfigure (decoder->srcpad);
4838 if (GST_PAD_IS_FLUSHING (decoder->srcpad)) {
4839 GST_DEBUG_OBJECT (decoder,
4840 "Failed to negotiate a pool: pad is flushing");
4841 goto flushing;
4842 } else if (!decoder->priv->pool || decoder->priv->output_state_changed) {
4843 GST_DEBUG_OBJECT (decoder,
4844 "Failed to negotiate a pool and no previous pool to reuse");
4845 goto error;
4846 } else {
4847 GST_DEBUG_OBJECT (decoder,
4848 "Failed to negotiate a pool, falling back to the previous pool");
4849 }
4850 }
4851 }
4852
4853 GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
4854
4855 flow_ret = gst_buffer_pool_acquire_buffer (decoder->priv->pool,
4856 &frame->output_buffer, params);
4857
4858 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4859
4860 return flow_ret;
4861
4862flushing:
4863 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4864 return GST_FLOW_FLUSHING;
4865
4866error:
4867 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4868 return GST_FLOW_ERROR;
4869}
4870
4871/**
4872 * gst_aml_video_decoder_get_max_decode_time:
4873 * @decoder: a #GstAmlVideoDecoder
4874 * @frame: a #GstAmlVideoCodecFrame
4875 *
4876 * Determines maximum possible decoding time for @frame that will
4877 * allow it to decode and arrive in time (as determined by QoS events).
4878 * In particular, a negative result means decoding in time is no longer possible
4879 * and should therefore occur as soon/skippy as possible.
4880 *
4881 * Returns: max decoding time.
4882 */
4883GstClockTimeDiff
4884gst_aml_video_decoder_get_max_decode_time (GstAmlVideoDecoder *
4885 decoder, GstAmlVideoCodecFrame * frame)
4886{
4887 GstClockTimeDiff deadline;
4888 GstClockTime earliest_time;
4889
4890 GST_OBJECT_LOCK (decoder);
4891 earliest_time = decoder->priv->earliest_time;
4892 if (GST_CLOCK_TIME_IS_VALID (earliest_time)
4893 && GST_CLOCK_TIME_IS_VALID (frame->deadline))
4894 deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline);
4895 else
4896 deadline = G_MAXINT64;
4897
4898 GST_LOG_OBJECT (decoder, "earliest %" GST_TIME_FORMAT
4899 ", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_STIME_FORMAT,
4900 GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline),
4901 GST_STIME_ARGS (deadline));
4902
4903 GST_OBJECT_UNLOCK (decoder);
4904
4905 return deadline;
4906}
4907
4908/**
4909 * gst_aml_video_decoder_get_qos_proportion:
4910 * @decoder: a #GstAmlVideoDecoder
4911 * current QoS proportion, or %NULL
4912 *
4913 * Returns: The current QoS proportion.
4914 *
4915 * Since: 1.0.3
4916 */
4917gdouble
4918gst_aml_video_decoder_get_qos_proportion (GstAmlVideoDecoder * decoder)
4919{
4920 gdouble proportion;
4921
4922 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (decoder), 1.0);
4923
4924 GST_OBJECT_LOCK (decoder);
4925 proportion = decoder->priv->proportion;
4926 GST_OBJECT_UNLOCK (decoder);
4927
4928 return proportion;
4929}
4930
4931GstFlowReturn
4932_gst_aml_video_decoder_error (GstAmlVideoDecoder * dec, gint weight,
4933 GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
4934 const gchar * function, gint line)
4935{
4936 if (txt)
4937 GST_WARNING_OBJECT (dec, "error: %s", txt);
4938 if (dbg)
4939 GST_WARNING_OBJECT (dec, "error: %s", dbg);
4940 dec->priv->error_count += weight;
4941 dec->priv->discont = TRUE;
4942 if (dec->priv->max_errors >= 0 &&
4943 dec->priv->error_count > dec->priv->max_errors) {
4944 gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR,
4945 domain, code, txt, dbg, file, function, line);
4946 return GST_FLOW_ERROR;
4947 } else {
4948 g_free (txt);
4949 g_free (dbg);
4950 return GST_FLOW_OK;
4951 }
4952}
4953
4954/**
4955 * gst_aml_video_decoder_set_max_errors:
4956 * @dec: a #GstAmlVideoDecoder
4957 * @num: max tolerated errors
4958 *
4959 * Sets numbers of tolerated decoder errors, where a tolerated one is then only
4960 * warned about, but more than tolerated will lead to fatal error. You can set
4961 * -1 for never returning fatal errors. Default is set to
4962 * GST_AML_VIDEO_DECODER_MAX_ERRORS.
4963 *
4964 * The '-1' option was added in 1.4
4965 */
4966void
4967gst_aml_video_decoder_set_max_errors (GstAmlVideoDecoder * dec, gint num)
4968{
4969 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
4970
4971 dec->priv->max_errors = num;
4972}
4973
4974/**
4975 * gst_aml_video_decoder_get_max_errors:
4976 * @dec: a #GstAmlVideoDecoder
4977 *
4978 * Returns: currently configured decoder tolerated error count.
4979 */
4980gint
4981gst_aml_video_decoder_get_max_errors (GstAmlVideoDecoder * dec)
4982{
4983 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (dec), 0);
4984
4985 return dec->priv->max_errors;
4986}
4987
4988/**
4989 * gst_aml_video_decoder_set_needs_format:
4990 * @dec: a #GstAmlVideoDecoder
4991 * @enabled: new state
4992 *
4993 * Configures decoder format needs. If enabled, subclass needs to be
4994 * negotiated with format caps before it can process any data. It will then
4995 * never be handed any data before it has been configured.
4996 * Otherwise, it might be handed data without having been configured and
4997 * is then expected being able to do so either by default
4998 * or based on the input data.
4999 *
5000 * Since: 1.4
5001 */
5002void
5003gst_aml_video_decoder_set_needs_format (GstAmlVideoDecoder * dec, gboolean enabled)
5004{
5005 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
5006
5007 dec->priv->needs_format = enabled;
5008}
5009
5010/**
5011 * gst_aml_video_decoder_get_needs_format:
5012 * @dec: a #GstAmlVideoDecoder
5013 *
5014 * Queries decoder required format handling.
5015 *
5016 * Returns: %TRUE if required format handling is enabled.
5017 *
5018 * Since: 1.4
5019 */
5020gboolean
5021gst_aml_video_decoder_get_needs_format (GstAmlVideoDecoder * dec)
5022{
5023 gboolean result;
5024
5025 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (dec), FALSE);
5026
5027 result = dec->priv->needs_format;
5028
5029 return result;
5030}
5031
5032/**
5033 * gst_aml_video_decoder_set_packetized:
5034 * @decoder: a #GstAmlVideoDecoder
5035 * @packetized: whether the input data should be considered as packetized.
5036 *
5037 * Allows baseclass to consider input data as packetized or not. If the
5038 * input is packetized, then the @parse method will not be called.
5039 */
5040void
5041gst_aml_video_decoder_set_packetized (GstAmlVideoDecoder * decoder,
5042 gboolean packetized)
5043{
5044 decoder->priv->packetized = packetized;
5045}
5046
5047/**
5048 * gst_aml_video_decoder_get_packetized:
5049 * @decoder: a #GstAmlVideoDecoder
5050 *
5051 * Queries whether input data is considered packetized or not by the
5052 * base class.
5053 *
5054 * Returns: TRUE if input data is considered packetized.
5055 */
5056gboolean
5057gst_aml_video_decoder_get_packetized (GstAmlVideoDecoder * decoder)
5058{
5059 return decoder->priv->packetized;
5060}
5061
5062/**
5063 * gst_aml_video_decoder_have_last_subframe:
5064 * @decoder: a #GstAmlVideoDecoder
5065 * @frame: (transfer none): the #GstAmlVideoCodecFrame to update
5066 *
5067 * Indicates that the last subframe has been processed by the decoder
5068 * in @frame. This will release the current frame in video decoder
5069 * allowing to receive new frames from upstream elements. This method
5070 * must be called in the subclass @handle_frame callback.
5071 *
5072 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
5073 *
5074 * Since: 1.20
5075 */
5076GstFlowReturn
5077gst_aml_video_decoder_have_last_subframe (GstAmlVideoDecoder * decoder,
5078 GstAmlVideoCodecFrame * frame)
5079{
5080 g_return_val_if_fail (gst_aml_video_decoder_get_subframe_mode (decoder),
5081 GST_FLOW_OK);
5082 /* unref once from the list */
5083 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
5084 if (decoder->priv->current_frame == frame) {
5085 gst_aml_video_codec_frame_unref (decoder->priv->current_frame);
5086 decoder->priv->current_frame = NULL;
5087 }
5088 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
5089
5090 return GST_FLOW_OK;
5091}
5092
5093/**
5094 * gst_aml_video_decoder_set_subframe_mode:
5095 * @decoder: a #GstAmlVideoDecoder
5096 * @subframe_mode: whether the input data should be considered as subframes.
5097 *
5098 * If this is set to TRUE, it informs the base class that the subclass
5099 * can receive the data at a granularity lower than one frame.
5100 *
5101 * Note that in this mode, the subclass has two options. It can either
5102 * require the presence of a GST_VIDEO_BUFFER_FLAG_MARKER to mark the
5103 * end of a frame. Or it can operate in such a way that it will decode
5104 * a single frame at a time. In this second case, every buffer that
5105 * arrives to the element is considered part of the same frame until
5106 * gst_aml_video_decoder_finish_frame() is called.
5107 *
5108 * In either case, the same #GstAmlVideoCodecFrame will be passed to the
5109 * GstAmlVideoDecoderClass:handle_frame vmethod repeatedly with a
5110 * different GstAmlVideoCodecFrame:input_buffer every time until the end of the
5111 * frame has been signaled using either method.
5112 * This method must be called during the decoder subclass @set_format call.
5113 *
5114 * Since: 1.20
5115 */
5116void
5117gst_aml_video_decoder_set_subframe_mode (GstAmlVideoDecoder * decoder,
5118 gboolean subframe_mode)
5119{
5120 decoder->priv->subframe_mode = subframe_mode;
5121}
5122
5123/**
5124 * gst_aml_video_decoder_get_subframe_mode:
5125 * @decoder: a #GstAmlVideoDecoder
5126 *
5127 * Queries whether input data is considered as subframes or not by the
5128 * base class. If FALSE, each input buffer will be considered as a full
5129 * frame.
5130 *
5131 * Returns: TRUE if input data is considered as sub frames.
5132 *
5133 * Since: 1.20
5134 */
5135gboolean
5136gst_aml_video_decoder_get_subframe_mode (GstAmlVideoDecoder * decoder)
5137{
5138 return decoder->priv->subframe_mode;
5139}
5140
5141/**
5142 * gst_aml_video_decoder_get_input_subframe_index:
5143 * @decoder: a #GstAmlVideoDecoder
5144 * @frame: (transfer none): the #GstAmlVideoCodecFrame to update
5145 *
5146 * Queries the number of the last subframe received by
5147 * the decoder baseclass in the @frame.
5148 *
5149 * Returns: the current subframe index received in subframe mode, 1 otherwise.
5150 *
5151 * Since: 1.20
5152 */
5153guint
5154gst_aml_video_decoder_get_input_subframe_index (GstAmlVideoDecoder * decoder,
5155 GstAmlVideoCodecFrame * frame)
5156{
5157 return frame->abidata.ABI.num_subframes;
5158}
5159
5160/**
5161 * gst_aml_video_decoder_get_processed_subframe_index:
5162 * @decoder: a #GstAmlVideoDecoder
5163 * @frame: (transfer none): the #GstAmlVideoCodecFrame to update
5164 *
5165 * Queries the number of subframes in the frame processed by
5166 * the decoder baseclass.
5167 *
5168 * Returns: the current subframe processed received in subframe mode.
5169 *
5170 * Since: 1.20
5171 */
5172guint
5173gst_aml_video_decoder_get_processed_subframe_index (GstAmlVideoDecoder * decoder,
5174 GstAmlVideoCodecFrame * frame)
5175{
5176 return frame->abidata.ABI.subframes_processed;
5177}
5178
5179/**
5180 * gst_aml_video_decoder_set_estimate_rate:
5181 * @dec: a #GstAmlVideoDecoder
5182 * @enabled: whether to enable byte to time conversion
5183 *
5184 * Allows baseclass to perform byte to time estimated conversion.
5185 */
5186void
5187gst_aml_video_decoder_set_estimate_rate (GstAmlVideoDecoder * dec, gboolean enabled)
5188{
5189 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
5190
5191 dec->priv->do_estimate_rate = enabled;
5192}
5193
5194/**
5195 * gst_aml_video_decoder_get_estimate_rate:
5196 * @dec: a #GstAmlVideoDecoder
5197 *
5198 * Returns: currently configured byte to time conversion setting
5199 */
5200gboolean
5201gst_aml_video_decoder_get_estimate_rate (GstAmlVideoDecoder * dec)
5202{
5203 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (dec), 0);
5204
5205 return dec->priv->do_estimate_rate;
5206}
5207
5208/**
5209 * gst_aml_video_decoder_set_latency:
5210 * @decoder: a #GstAmlVideoDecoder
5211 * @min_latency: minimum latency
5212 * @max_latency: maximum latency
5213 *
5214 * Lets #GstAmlVideoDecoder sub-classes tell the baseclass what the decoder latency
5215 * is. If the provided values changed from previously provided ones, this will
5216 * also post a LATENCY message on the bus so the pipeline can reconfigure its
5217 * global latency.
5218 */
5219void
5220gst_aml_video_decoder_set_latency (GstAmlVideoDecoder * decoder,
5221 GstClockTime min_latency, GstClockTime max_latency)
5222{
5223 gboolean post_message = FALSE;
5224 g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
5225 g_return_if_fail (max_latency >= min_latency);
5226
5227 GST_DEBUG_OBJECT (decoder,
5228 "min_latency:%" GST_TIME_FORMAT " max_latency:%" GST_TIME_FORMAT,
5229 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
5230
5231 GST_OBJECT_LOCK (decoder);
5232 if (decoder->priv->min_latency != min_latency) {
5233 decoder->priv->min_latency = min_latency;
5234 post_message = TRUE;
5235 }
5236 if (decoder->priv->max_latency != max_latency) {
5237 decoder->priv->max_latency = max_latency;
5238 post_message = TRUE;
5239 }
5240 if (!decoder->priv->posted_latency_msg) {
5241 decoder->priv->posted_latency_msg = TRUE;
5242 post_message = TRUE;
5243 }
5244 GST_OBJECT_UNLOCK (decoder);
5245
5246 if (post_message)
5247 gst_element_post_message (GST_ELEMENT_CAST (decoder),
5248 gst_message_new_latency (GST_OBJECT_CAST (decoder)));
5249}
5250
5251/**
5252 * gst_aml_video_decoder_get_latency:
5253 * @decoder: a #GstAmlVideoDecoder
5254 * @min_latency: (out) (allow-none): address of variable in which to store the
5255 * configured minimum latency, or %NULL
5256 * @max_latency: (out) (allow-none): address of variable in which to store the
5257 * configured mximum latency, or %NULL
5258 *
5259 * Query the configured decoder latency. Results will be returned via
5260 * @min_latency and @max_latency.
5261 */
5262void
5263gst_aml_video_decoder_get_latency (GstAmlVideoDecoder * decoder,
5264 GstClockTime * min_latency, GstClockTime * max_latency)
5265{
5266 GST_OBJECT_LOCK (decoder);
5267 if (min_latency)
5268 *min_latency = decoder->priv->min_latency;
5269 if (max_latency)
5270 *max_latency = decoder->priv->max_latency;
5271 GST_OBJECT_UNLOCK (decoder);
5272}
5273
5274/**
5275 * gst_aml_video_decoder_merge_tags:
5276 * @decoder: a #GstAmlVideoDecoder
5277 * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
5278 * previously-set tags
5279 * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
5280 *
5281 * Sets the audio decoder tags and how they should be merged with any
5282 * upstream stream tags. This will override any tags previously-set
5283 * with gst_audio_decoder_merge_tags().
5284 *
5285 * Note that this is provided for convenience, and the subclass is
5286 * not required to use this and can still do tag handling on its own.
5287 *
5288 * MT safe.
5289 */
5290void
5291gst_aml_video_decoder_merge_tags (GstAmlVideoDecoder * decoder,
5292 const GstTagList * tags, GstTagMergeMode mode)
5293{
5294 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (decoder));
5295 g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
5296 g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
5297
5298 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
5299 if (decoder->priv->tags != tags) {
5300 if (decoder->priv->tags) {
5301 gst_tag_list_unref (decoder->priv->tags);
5302 decoder->priv->tags = NULL;
5303 decoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
5304 }
5305 if (tags) {
5306 decoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
5307 decoder->priv->tags_merge_mode = mode;
5308 }
5309
5310 GST_DEBUG_OBJECT (decoder, "set decoder tags to %" GST_PTR_FORMAT, tags);
5311 decoder->priv->tags_changed = TRUE;
5312 }
5313 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
5314}
5315
5316/**
5317 * gst_aml_video_decoder_get_buffer_pool:
5318 * @decoder: a #GstAmlVideoDecoder
5319 *
5320 * Returns: (transfer full): the instance of the #GstBufferPool used
5321 * by the decoder; free it after use it
5322 */
5323GstBufferPool *
5324gst_aml_video_decoder_get_buffer_pool (GstAmlVideoDecoder * decoder)
5325{
5326 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (decoder), NULL);
5327
5328 if (decoder->priv->pool)
5329 return gst_object_ref (decoder->priv->pool);
5330
5331 return NULL;
5332}
5333
5334/**
5335 * gst_aml_video_decoder_get_allocator:
5336 * @decoder: a #GstAmlVideoDecoder
5337 * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
5338 * used
5339 * @params: (out) (allow-none) (transfer full): the
5340 * #GstAllocationParams of @allocator
5341 *
5342 * Lets #GstAmlVideoDecoder sub-classes to know the memory @allocator
5343 * used by the base class and its @params.
5344 *
5345 * Unref the @allocator after use it.
5346 */
5347void
5348gst_aml_video_decoder_get_allocator (GstAmlVideoDecoder * decoder,
5349 GstAllocator ** allocator, GstAllocationParams * params)
5350{
5351 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (decoder));
5352
5353 if (allocator)
5354 *allocator = decoder->priv->allocator ?
5355 gst_object_ref (decoder->priv->allocator) : NULL;
5356
5357 if (params)
5358 *params = decoder->priv->params;
5359}
5360
5361/**
5362 * gst_aml_video_decoder_set_use_default_pad_acceptcaps:
5363 * @decoder: a #GstAmlVideoDecoder
5364 * @use: if the default pad accept-caps query handling should be used
5365 *
5366 * Lets #GstAmlVideoDecoder sub-classes decide if they want the sink pad
5367 * to use the default pad query handler to reply to accept-caps queries.
5368 *
5369 * By setting this to true it is possible to further customize the default
5370 * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
5371 * %GST_PAD_SET_ACCEPT_TEMPLATE
5372 *
5373 * Since: 1.6
5374 */
5375void
5376gst_aml_video_decoder_set_use_default_pad_acceptcaps (GstAmlVideoDecoder * decoder,
5377 gboolean use)
5378{
5379 decoder->priv->use_default_pad_acceptcaps = use;
5380}
5381
5382static void
5383gst_aml_video_decoder_request_sync_point_internal (GstAmlVideoDecoder * dec,
5384 GstClockTime deadline, GstAmlVideoDecoderRequestSyncPointFlags flags)
5385{
5386 GstEvent *fku = NULL;
5387 GstAmlVideoDecoderPrivate *priv;
5388
5389 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
5390
5391 priv = dec->priv;
5392
5393 GST_OBJECT_LOCK (dec);
5394
5395 /* Check if we're allowed to send a new force-keyunit event.
5396 * frame->deadline is set to the running time of the PTS. */
5397 if (priv->min_force_key_unit_interval == 0 ||
5398 deadline == GST_CLOCK_TIME_NONE ||
5399 (priv->min_force_key_unit_interval != GST_CLOCK_TIME_NONE &&
5400 (priv->last_force_key_unit_time == GST_CLOCK_TIME_NONE
5401 || (priv->last_force_key_unit_time +
5402 priv->min_force_key_unit_interval <= deadline)))) {
5403 GST_DEBUG_OBJECT (dec,
5404 "Requesting a new key-unit for frame with deadline %" GST_TIME_FORMAT,
5405 GST_TIME_ARGS (deadline));
5406 fku =
5407 gst_video_event_new_upstream_force_key_unit (GST_CLOCK_TIME_NONE, FALSE,
5408 0);
5409 priv->last_force_key_unit_time = deadline;
5410 } else {
5411 GST_DEBUG_OBJECT (dec,
5412 "Can't request a new key-unit for frame with deadline %"
5413 GST_TIME_FORMAT, GST_TIME_ARGS (deadline));
5414 }
5415 priv->request_sync_point_flags |= flags;
5416 /* We don't know yet the frame number of the sync point so set it to a
5417 * frame number higher than any allowed frame number */
5418 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_PENDING;
5419 GST_OBJECT_UNLOCK (dec);
5420
5421 if (fku)
5422 gst_pad_push_event (dec->sinkpad, fku);
5423}
5424
5425/**
5426 * gst_aml_video_decoder_request_sync_point:
5427 * @dec: a #GstAmlVideoDecoder
5428 * @frame: a #GstAmlVideoCodecFrame
5429 * @flags: #GstAmlVideoDecoderRequestSyncPointFlags
5430 *
5431 * Allows the #GstAmlVideoDecoder subclass to request from the base class that
5432 * a new sync should be requested from upstream, and that @frame was the frame
5433 * when the subclass noticed that a new sync point is required. A reason for
5434 * the subclass to do this could be missing reference frames, for example.
5435 *
5436 * The base class will then request a new sync point from upstream as long as
5437 * the time that passed since the last one is exceeding
5438 * #GstAmlVideoDecoder:min-force-key-unit-interval.
5439 *
5440 * The subclass can signal via @flags how the frames until the next sync point
5441 * should be handled:
5442 *
5443 * * If %GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT is selected then
5444 * all following input frames until the next sync point are discarded.
5445 * This can be useful if the lack of a sync point will prevent all further
5446 * decoding and the decoder implementation is not very robust in handling
5447 * missing references frames.
5448 * * If %GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT is selected
5449 * then all output frames following @frame are marked as corrupted via
5450 * %GST_BUFFER_FLAG_CORRUPTED. Corrupted frames can be automatically
5451 * dropped by the base class, see #GstAmlVideoDecoder:discard-corrupted-frames.
5452 * Subclasses can manually mark frames as corrupted via %GST_AML_VIDEO_CODEC_FRAME_FLAG_CORRUPTED
5453 * before calling gst_aml_video_decoder_finish_frame().
5454 *
5455 * Since: 1.20
5456 */
5457void
5458gst_aml_video_decoder_request_sync_point (GstAmlVideoDecoder * dec,
5459 GstAmlVideoCodecFrame * frame, GstAmlVideoDecoderRequestSyncPointFlags flags)
5460{
5461 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
5462 g_return_if_fail (frame != NULL);
5463
5464 gst_aml_video_decoder_request_sync_point_internal (dec, frame->deadline, flags);
5465}
5466
5467/**
5468 * gst_aml_video_decoder_set_needs_sync_point:
5469 * @dec: a #GstAmlVideoDecoder
5470 * @enabled: new state
5471 *
5472 * Configures whether the decoder requires a sync point before it starts
5473 * outputting data in the beginning. If enabled, the base class will discard
5474 * all non-sync point frames in the beginning and after a flush and does not
5475 * pass it to the subclass.
5476 *
5477 * If the first frame is not a sync point, the base class will request a sync
5478 * point via the force-key-unit event.
5479 *
5480 * Since: 1.20
5481 */
5482void
5483gst_aml_video_decoder_set_needs_sync_point (GstAmlVideoDecoder * dec, gboolean enabled)
5484{
5485 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
5486
5487 dec->priv->needs_sync_point = enabled;
5488}
5489
5490/**
5491 * gst_aml_video_decoder_get_needs_sync_point:
5492 * @dec: a #GstAmlVideoDecoder
5493 *
5494 * Queries if the decoder requires a sync point before it starts outputting
5495 * data in the beginning.
5496 *
5497 * Returns: %TRUE if a sync point is required in the beginning.
5498 *
5499 * Since: 1.20
5500 */
5501gboolean
5502gst_aml_video_decoder_get_needs_sync_point (GstAmlVideoDecoder * dec)
5503{
5504 gboolean result;
5505
5506 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (dec), FALSE);
5507
5508 result = dec->priv->needs_sync_point;
5509
5510 return result;
5511}