blob: 37077845d18e442b6d229234b6491543bd895708 [file] [log] [blame]
le.han02c38f02024-08-16 02:35:36 +00001/* GStreamer
2 * Copyright (C) 2008 David Schleef <ds@schleef.org>
3 * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
4 * Copyright (C) 2011 Nokia Corporation. All rights reserved.
5 * Contact: Stefan Kost <stefan.kost@nokia.com>
6 * Copyright (C) 2012 Collabora Ltd.
7 * Author : Edward Hervey <edward@collabora.com>
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
18 *
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
23 */
24
25/**
26 * SECTION:gstvideodecoder
27 * @title: GstAmlVideoDecoder
28 * @short_description: Base class for video decoders
29 *
30 * This base class is for video decoders turning encoded data into raw video
31 * frames.
32 *
33 * The GstAmlVideoDecoder base class and derived subclasses should cooperate as
34 * follows:
35 *
36 * ## Configuration
37 *
38 * * Initially, GstAmlVideoDecoder calls @start when the decoder element
39 * is activated, which allows the subclass to perform any global setup.
40 *
41 * * GstAmlVideoDecoder calls @set_format to inform the subclass of caps
42 * describing input video data that it is about to receive, including
43 * possibly configuration data.
44 * While unlikely, it might be called more than once, if changing input
45 * parameters require reconfiguration.
46 *
47 * * Incoming data buffers are processed as needed, described in Data
48 * Processing below.
49 *
50 * * GstAmlVideoDecoder calls @stop at end of all processing.
51 *
52 * ## Data processing
53 *
54 * * The base class gathers input data, and optionally allows subclass
55 * to parse this into subsequently manageable chunks, typically
56 * corresponding to and referred to as 'frames'.
57 *
58 * * Each input frame is provided in turn to the subclass' @handle_frame
59 * callback.
60 * * When the subclass enables the subframe mode with `gst_aml_video_decoder_set_subframe_mode`,
61 * the base class will provide to the subclass the same input frame with
62 * different input buffers to the subclass @handle_frame
63 * callback. During this call, the subclass needs to take
64 * ownership of the input_buffer as @GstAmlVideoCodecFrame.input_buffer
65 * will have been changed before the next subframe buffer is received.
66 * The subclass will call `gst_aml_video_decoder_have_last_subframe`
67 * when a new input frame can be created by the base class.
68 * Every subframe will share the same @GstAmlVideoCodecFrame.output_buffer
69 * to write the decoding result. The subclass is responsible to protect
70 * its access.
71 *
72 * * If codec processing results in decoded data, the subclass should call
73 * @gst_aml_video_decoder_finish_frame to have decoded data pushed
74 * downstream. In subframe mode
75 * the subclass should call @gst_aml_video_decoder_finish_subframe until the
76 * last subframe where it should call @gst_aml_video_decoder_finish_frame.
77 * The subclass can detect the last subframe using GST_VIDEO_BUFFER_FLAG_MARKER
78 * on buffers or using its own logic to collect the subframes.
79 * In case of decoding failure, the subclass must call
80 * @gst_aml_video_decoder_drop_frame or @gst_aml_video_decoder_drop_subframe,
81 * to allow the base class to do timestamp and offset tracking, and possibly
82 * to requeue the frame for a later attempt in the case of reverse playback.
83 *
84 * ## Shutdown phase
85 *
86 * * The GstAmlVideoDecoder class calls @stop to inform the subclass that data
87 * parsing will be stopped.
88 *
89 * ## Additional Notes
90 *
91 * * Seeking/Flushing
92 *
93 * * When the pipeline is seeked or otherwise flushed, the subclass is
94 * informed via a call to its @reset callback, with the hard parameter
95 * set to true. This indicates the subclass should drop any internal data
96 * queues and timestamps and prepare for a fresh set of buffers to arrive
97 * for parsing and decoding.
98 *
99 * * End Of Stream
100 *
101 * * At end-of-stream, the subclass @parse function may be called some final
102 * times with the at_eos parameter set to true, indicating that the element
103 * should not expect any more data to be arriving, and it should parse and
104 * remaining frames and call gst_aml_video_decoder_have_frame() if possible.
105 *
106 * The subclass is responsible for providing pad template caps for
107 * source and sink pads. The pads need to be named "sink" and "src". It also
108 * needs to provide information about the output caps, when they are known.
109 * This may be when the base class calls the subclass' @set_format function,
110 * though it might be during decoding, before calling
111 * @gst_aml_video_decoder_finish_frame. This is done via
112 * @gst_aml_video_decoder_set_output_state
113 *
114 * The subclass is also responsible for providing (presentation) timestamps
115 * (likely based on corresponding input ones). If that is not applicable
116 * or possible, the base class provides limited framerate based interpolation.
117 *
118 * Similarly, the base class provides some limited (legacy) seeking support
119 * if specifically requested by the subclass, as full-fledged support
120 * should rather be left to upstream demuxer, parser or alike. This simple
121 * approach caters for seeking and duration reporting using estimated input
122 * bitrates. To enable it, a subclass should call
123 * @gst_aml_video_decoder_set_estimate_rate to enable handling of incoming
124 * byte-streams.
125 *
126 * The base class provides some support for reverse playback, in particular
127 * in case incoming data is not packetized or upstream does not provide
128 * fragments on keyframe boundaries. However, the subclass should then be
129 * prepared for the parsing and frame processing stage to occur separately
130 * (in normal forward processing, the latter immediately follows the former),
131 * The subclass also needs to ensure the parsing stage properly marks
132 * keyframes, unless it knows the upstream elements will do so properly for
133 * incoming data.
134 *
135 * The bare minimum that a functional subclass needs to implement is:
136 *
137 * * Provide pad templates
138 * * Inform the base class of output caps via
139 * @gst_aml_video_decoder_set_output_state
140 *
141 * * Parse input data, if it is not considered packetized from upstream
142 * Data will be provided to @parse which should invoke
143 * @gst_aml_video_decoder_add_to_frame and @gst_aml_video_decoder_have_frame to
144 * separate the data belonging to each video frame.
145 *
146 * * Accept data in @handle_frame and provide decoded results to
147 * @gst_aml_video_decoder_finish_frame, or call @gst_aml_video_decoder_drop_frame.
148 */
149
150#ifdef HAVE_CONFIG_H
151#include "config.h"
152#endif
153
154/* TODO
155 *
156 * * Add a flag/boolean for I-frame-only/image decoders so we can do extra
157 * features, like applying QoS on input (as opposed to after the frame is
158 * decoded).
159 * * Add a flag/boolean for decoders that require keyframes, so the base
160 * class can automatically discard non-keyframes before one has arrived
161 * * Detect reordered frame/timestamps and fix the pts/dts
162 * * Support for GstIndex (or shall we not care ?)
163 * * Calculate actual latency based on input/output timestamp/frame_number
164 * and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
165 * * Emit latency message when it changes
166 *
167 */
168
169/* Implementation notes:
170 * The Video Decoder base class operates in 2 primary processing modes, depending
171 * on whether forward or reverse playback is requested.
172 *
173 * Forward playback:
174 * * Incoming buffer -> @parse() -> add_to_frame()/have_frame() ->
175 * handle_frame() -> push downstream
176 *
177 * Reverse playback is more complicated, since it involves gathering incoming
178 * data regions as we loop backwards through the upstream data. The processing
179 * concept (using incoming buffers as containing one frame each to simplify
180 * things) is:
181 *
182 * Upstream data we want to play:
183 * Buffer encoded order: 1 2 3 4 5 6 7 8 9 EOS
184 * Keyframe flag: K K
185 * Groupings: AAAAAAA BBBBBBB CCCCCCC
186 *
187 * Input:
188 * Buffer reception order: 7 8 9 4 5 6 1 2 3 EOS
189 * Keyframe flag: K K
190 * Discont flag: D D D
191 *
192 * - Each Discont marks a discont in the decoding order.
193 * - The keyframes mark where we can start decoding.
194 *
195 * Initially, we prepend incoming buffers to the gather queue. Whenever the
196 * discont flag is set on an incoming buffer, the gather queue is flushed out
197 * before the new buffer is collected.
198 *
199 * The above data will be accumulated in the gather queue like this:
200 *
201 * gather queue: 9 8 7
202 * D
203 *
204 * When buffer 4 is received (with a DISCONT), we flush the gather queue like
205 * this:
206 *
207 * while (gather)
208 * take head of queue and prepend to parse queue (this reverses the
209 * sequence, so parse queue is 7 -> 8 -> 9)
210 *
211 * Next, we process the parse queue, which now contains all un-parsed packets
212 * (including any leftover ones from the previous decode section)
213 *
214 * for each buffer now in the parse queue:
215 * Call the subclass parse function, prepending each resulting frame to
216 * the parse_gather queue. Buffers which precede the first one that
217 * produces a parsed frame are retained in the parse queue for
218 * re-processing on the next cycle of parsing.
219 *
220 * The parse_gather queue now contains frame objects ready for decoding,
221 * in reverse order.
222 * parse_gather: 9 -> 8 -> 7
223 *
224 * while (parse_gather)
225 * Take the head of the queue and prepend it to the decode queue
226 * If the frame was a keyframe, process the decode queue
227 * decode is now 7-8-9
228 *
229 * Processing the decode queue results in frames with attached output buffers
230 * stored in the 'output_queue' ready for outputting in reverse order.
231 *
232 * After we flushed the gather queue and parsed it, we add 4 to the (now empty)
233 * gather queue. We get the following situation:
234 *
235 * gather queue: 4
236 * decode queue: 7 8 9
237 *
238 * After we received 5 (Keyframe) and 6:
239 *
240 * gather queue: 6 5 4
241 * decode queue: 7 8 9
242 *
243 * When we receive 1 (DISCONT) which triggers a flush of the gather queue:
244 *
245 * Copy head of the gather queue (6) to decode queue:
246 *
247 * gather queue: 5 4
248 * decode queue: 6 7 8 9
249 *
250 * Copy head of the gather queue (5) to decode queue. This is a keyframe so we
251 * can start decoding.
252 *
253 * gather queue: 4
254 * decode queue: 5 6 7 8 9
255 *
256 * Decode frames in decode queue, store raw decoded data in output queue, we
257 * can take the head of the decode queue and prepend the decoded result in the
258 * output queue:
259 *
260 * gather queue: 4
261 * decode queue:
262 * output queue: 9 8 7 6 5
263 *
264 * Now output all the frames in the output queue, picking a frame from the
265 * head of the queue.
266 *
267 * Copy head of the gather queue (4) to decode queue, we flushed the gather
268 * queue and can now store input buffer in the gather queue:
269 *
270 * gather queue: 1
271 * decode queue: 4
272 *
273 * When we receive EOS, the queue looks like:
274 *
275 * gather queue: 3 2 1
276 * decode queue: 4
277 *
278 * Fill decode queue, first keyframe we copy is 2:
279 *
280 * gather queue: 1
281 * decode queue: 2 3 4
282 *
283 * Decoded output:
284 *
285 * gather queue: 1
286 * decode queue:
287 * output queue: 4 3 2
288 *
289 * Leftover buffer 1 cannot be decoded and must be discarded.
290 */
291
292#include "gstamlvideodecoder.h"
293#include "gstamlvideoutils.h"
294#include "gstamlvideoutilsprivate.h"
295
296#include <gst/video/video.h>
297#include <gst/video/video-event.h>
298#include <gst/video/gstvideopool.h>
299#include <gst/video/gstvideometa.h>
300#include <string.h>
301
302GST_DEBUG_CATEGORY (videodecoder_debug);
303#define GST_CAT_DEFAULT videodecoder_debug
304
305/* properties */
306#define DEFAULT_QOS TRUE
307#define DEFAULT_MAX_ERRORS GST_AML_VIDEO_DECODER_MAX_ERRORS
308#define DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL 0
309#define DEFAULT_DISCARD_CORRUPTED_FRAMES FALSE
310#define DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS FALSE
311#define DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS (GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT | GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT)
312
313/* Used for request_sync_point_frame_number. These are out of range for the
314 * frame numbers and can be given special meaning */
315#define REQUEST_SYNC_POINT_PENDING G_MAXUINT + 1
316#define REQUEST_SYNC_POINT_UNSET G_MAXUINT64
317
318enum
319{
320 PROP_0,
321 PROP_QOS,
322 PROP_MAX_ERRORS,
323 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
324 PROP_DISCARD_CORRUPTED_FRAMES,
325 PROP_AUTOMATIC_REQUEST_SYNC_POINTS,
326 PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
327};
328
329struct _GstAmlVideoDecoderPrivate
330{
331 /* FIXME introduce a context ? */
332
333 GstBufferPool *pool;
334 GstAllocator *allocator;
335 GstAllocationParams params;
336
337 /* parse tracking */
338 /* input data */
339 GstAdapter *input_adapter;
340 /* assembles current frame */
341 GstAdapter *output_adapter;
342
343 /* Whether we attempt to convert newsegment from bytes to
344 * time using a bitrate estimation */
345 gboolean do_estimate_rate;
346
347 /* Whether input is considered packetized or not */
348 gboolean packetized;
349
350 /* whether input is considered as subframes */
351 gboolean subframe_mode;
352
353 /* Error handling */
354 gint max_errors;
355 gint error_count;
356 gboolean had_output_data;
357 gboolean had_input_data;
358
359 gboolean needs_format;
360 /* input_segment are output_segment identical */
361 gboolean in_out_segment_sync;
362
363 /* TRUE if we have an active set of instant rate flags */
364 gboolean decode_flags_override;
365 GstSegmentFlags decode_flags;
366
367 /* ... being tracked here;
368 * only available during parsing or when doing subframe decoding */
369 GstAmlVideoCodecFrame *current_frame;
370 /* events that should apply to the current frame */
371 /* FIXME 2.0: Use a GQueue or similar, see GstAmlVideoCodecFrame::events */
372 GList *current_frame_events;
373 /* events that should be pushed before the next frame */
374 /* FIXME 2.0: Use a GQueue or similar, see GstAmlVideoCodecFrame::events */
375 GList *pending_events;
376
377 /* relative offset of input data */
378 guint64 input_offset;
379 /* relative offset of frame */
380 guint64 frame_offset;
381 /* tracking ts and offsets */
382 GQueue timestamps;
383
384 /* last outgoing ts */
385 GstClockTime last_timestamp_out;
386 /* incoming pts - dts */
387 GstClockTime pts_delta;
388 gboolean reordered_output;
389
390 /* FIXME: Consider using a GQueue or other better fitting data structure */
391 /* reverse playback */
392 /* collect input */
393 GList *gather;
394 /* to-be-parsed */
395 GList *parse;
396 /* collected parsed frames */
397 GList *parse_gather;
398 /* frames to be handled == decoded */
399 GList *decode;
400 /* collected output - of buffer objects, not frames */
401 GList *output_queued;
402
403
404 /* base_picture_number is the picture number of the reference picture */
405 guint64 base_picture_number;
406 /* combine with base_picture_number, framerate and calcs to yield (presentation) ts */
407 GstClockTime base_timestamp;
408
409 /* Properties */
410 GstClockTime min_force_key_unit_interval;
411 gboolean discard_corrupted_frames;
412
413 /* Key unit related state */
414 gboolean needs_sync_point;
415 GstAmlVideoDecoderRequestSyncPointFlags request_sync_point_flags;
416 guint64 request_sync_point_frame_number;
417 GstClockTime last_force_key_unit_time;
418 /* -1 if we saw no sync point yet */
419 guint64 distance_from_sync;
420
421 gboolean automatic_request_sync_points;
422 GstAmlVideoDecoderRequestSyncPointFlags automatic_request_sync_point_flags;
423
424 guint32 system_frame_number;
425 guint32 decode_frame_number;
426
427 GQueue frames; /* Protected with OBJECT_LOCK */
428 GstAmlVideoCodecState *input_state;
429 GstAmlVideoCodecState *output_state; /* OBJECT_LOCK and STREAM_LOCK */
430 gboolean output_state_changed;
431
432 /* QoS properties */
433 gboolean do_qos;
434 gdouble proportion; /* OBJECT_LOCK */
435 GstClockTime earliest_time; /* OBJECT_LOCK */
436 GstClockTime qos_frame_duration; /* OBJECT_LOCK */
437 gboolean discont;
438 /* qos messages: frames dropped/processed */
439 guint dropped;
440 guint processed;
441
442 /* Outgoing byte size ? */
443 gint64 bytes_out;
444 gint64 time;
445
446 gint64 min_latency;
447 gint64 max_latency;
448
449 /* Tracks whether the latency message was posted at least once */
450 gboolean posted_latency_msg;
451
452 /* upstream stream tags (global tags are passed through as-is) */
453 GstTagList *upstream_tags;
454
455 /* subclass tags */
456 GstTagList *tags;
457 GstTagMergeMode tags_merge_mode;
458
459 gboolean tags_changed;
460
461 /* flags */
462 gboolean use_default_pad_acceptcaps;
463
464#ifndef GST_DISABLE_DEBUG
465 /* Diagnostic time for reporting the time
466 * from flush to first output */
467 GstClockTime last_reset_time;
468#endif
469};
470
471static GstElementClass *parent_class = NULL;
472static gint private_offset = 0;
473
474/* cached quark to avoid contention on the global quark table lock */
475#define META_TAG_VIDEO meta_tag_video_quark
476static GQuark meta_tag_video_quark;
477
478static void gst_aml_video_decoder_class_init (GstAmlVideoDecoderClass * klass);
479static void gst_aml_video_decoder_init (GstAmlVideoDecoder * dec,
480 GstAmlVideoDecoderClass * klass);
481
482static void gst_aml_video_decoder_finalize (GObject * object);
483static void gst_aml_video_decoder_get_property (GObject * object, guint property_id,
484 GValue * value, GParamSpec * pspec);
485static void gst_aml_video_decoder_set_property (GObject * object, guint property_id,
486 const GValue * value, GParamSpec * pspec);
487
488static gboolean gst_aml_video_decoder_setcaps (GstAmlVideoDecoder * dec,
489 GstCaps * caps);
490static gboolean gst_aml_video_decoder_sink_event (GstPad * pad, GstObject * parent,
491 GstEvent * event);
492static gboolean gst_aml_video_decoder_src_event (GstPad * pad, GstObject * parent,
493 GstEvent * event);
494static GstFlowReturn gst_aml_video_decoder_chain (GstPad * pad, GstObject * parent,
495 GstBuffer * buf);
496static gboolean gst_aml_video_decoder_sink_query (GstPad * pad, GstObject * parent,
497 GstQuery * query);
498static GstStateChangeReturn gst_aml_video_decoder_change_state (GstElement *
499 element, GstStateChange transition);
500static gboolean gst_aml_video_decoder_src_query (GstPad * pad, GstObject * parent,
501 GstQuery * query);
502static void gst_aml_video_decoder_reset (GstAmlVideoDecoder * decoder, gboolean full,
503 gboolean flush_hard);
504
505static GstFlowReturn gst_aml_video_decoder_decode_frame (GstAmlVideoDecoder * decoder,
506 GstAmlVideoCodecFrame * frame);
507
508static void gst_aml_video_decoder_push_event_list (GstAmlVideoDecoder * decoder,
509 GList * events);
510static GstClockTime gst_aml_video_decoder_get_frame_duration (GstAmlVideoDecoder *
511 decoder, GstAmlVideoCodecFrame * frame);
512static GstAmlVideoCodecFrame *gst_aml_video_decoder_new_frame (GstAmlVideoDecoder *
513 decoder);
514static GstFlowReturn gst_aml_video_decoder_clip_and_push_buf (GstAmlVideoDecoder *
515 decoder, GstBuffer * buf);
516static GstFlowReturn gst_aml_video_decoder_flush_parse (GstAmlVideoDecoder * dec,
517 gboolean at_eos);
518
519static void gst_aml_video_decoder_clear_queues (GstAmlVideoDecoder * dec);
520
521static gboolean gst_aml_video_decoder_sink_event_default (GstAmlVideoDecoder * decoder,
522 GstEvent * event);
523static gboolean gst_aml_video_decoder_src_event_default (GstAmlVideoDecoder * decoder,
524 GstEvent * event);
525static gboolean gst_aml_video_decoder_decide_allocation_default (GstAmlVideoDecoder *
526 decoder, GstQuery * query);
527static gboolean gst_aml_video_decoder_propose_allocation_default (GstAmlVideoDecoder *
528 decoder, GstQuery * query);
529static gboolean gst_aml_video_decoder_negotiate_default (GstAmlVideoDecoder * decoder);
530static GstFlowReturn gst_aml_video_decoder_parse_available (GstAmlVideoDecoder * dec,
531 gboolean at_eos, gboolean new_buffer);
532static gboolean gst_aml_video_decoder_negotiate_unlocked (GstAmlVideoDecoder *
533 decoder);
534static gboolean gst_aml_video_decoder_sink_query_default (GstAmlVideoDecoder * decoder,
535 GstQuery * query);
536static gboolean gst_aml_video_decoder_src_query_default (GstAmlVideoDecoder * decoder,
537 GstQuery * query);
538
539static gboolean gst_aml_video_decoder_transform_meta_default (GstAmlVideoDecoder *
540 decoder, GstAmlVideoCodecFrame * frame, GstMeta * meta);
541
542static gboolean gst_aml_video_decoder_handle_missing_data_default (GstAmlVideoDecoder *
543 decoder, GstClockTime timestamp, GstClockTime duration);
544
545static void gst_aml_video_decoder_copy_metas (GstAmlVideoDecoder * decoder,
546 GstAmlVideoCodecFrame * frame, GstBuffer * src_buffer,
547 GstBuffer * dest_buffer);
548
549static void gst_aml_video_decoder_request_sync_point_internal (GstAmlVideoDecoder *
550 dec, GstClockTime deadline, GstAmlVideoDecoderRequestSyncPointFlags flags);
551
552/* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
553 * method to get to the padtemplates */
554GType
555gst_aml_video_decoder_get_type (void)
556{
557 static gsize type = 0;
558
559 if (g_once_init_enter (&type)) {
560 GType _type;
561 static const GTypeInfo info = {
562 sizeof (GstAmlVideoDecoderClass),
563 NULL,
564 NULL,
565 (GClassInitFunc) gst_aml_video_decoder_class_init,
566 NULL,
567 NULL,
568 sizeof (GstAmlVideoDecoder),
569 0,
570 (GInstanceInitFunc) gst_aml_video_decoder_init,
571 };
572
573 _type = g_type_register_static (GST_TYPE_ELEMENT,
574 "GstAmlVideoDecoder", &info, G_TYPE_FLAG_ABSTRACT);
575
576 private_offset =
577 g_type_add_instance_private (_type, sizeof (GstAmlVideoDecoderPrivate));
578
579 g_once_init_leave (&type, _type);
580 }
581 return type;
582}
583
584static inline GstAmlVideoDecoderPrivate *
585gst_aml_video_decoder_get_instance_private (GstAmlVideoDecoder * self)
586{
587 return (G_STRUCT_MEMBER_P (self, private_offset));
588}
589
590static void
591gst_aml_video_decoder_class_init (GstAmlVideoDecoderClass * klass)
592{
593 GObjectClass *gobject_class;
594 GstElementClass *gstelement_class;
595
596 gobject_class = G_OBJECT_CLASS (klass);
597 gstelement_class = GST_ELEMENT_CLASS (klass);
598
599 GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "amlvideodecoder", 0,
600 "Base Video Decoder");
601
602 parent_class = g_type_class_peek_parent (klass);
603
604 if (private_offset != 0)
605 g_type_class_adjust_private_offset (klass, &private_offset);
606
607 gobject_class->finalize = gst_aml_video_decoder_finalize;
608 gobject_class->get_property = gst_aml_video_decoder_get_property;
609 gobject_class->set_property = gst_aml_video_decoder_set_property;
610
611 gstelement_class->change_state =
612 GST_DEBUG_FUNCPTR (gst_aml_video_decoder_change_state);
613
614 klass->sink_event = gst_aml_video_decoder_sink_event_default;
615 klass->src_event = gst_aml_video_decoder_src_event_default;
616 klass->decide_allocation = gst_aml_video_decoder_decide_allocation_default;
617 klass->propose_allocation = gst_aml_video_decoder_propose_allocation_default;
618 klass->negotiate = gst_aml_video_decoder_negotiate_default;
619 klass->sink_query = gst_aml_video_decoder_sink_query_default;
620 klass->src_query = gst_aml_video_decoder_src_query_default;
621 klass->transform_meta = gst_aml_video_decoder_transform_meta_default;
622 klass->handle_missing_data = gst_aml_video_decoder_handle_missing_data_default;
623
624 /**
625 * GstAmlVideoDecoder:qos:
626 *
627 * If set to %TRUE the decoder will handle QoS events received
628 * from downstream elements.
629 * This includes dropping output frames which are detected as late
630 * using the metrics reported by those events.
631 *
632 * Since: 1.18
633 */
634 g_object_class_install_property (gobject_class, PROP_QOS,
635 g_param_spec_boolean ("qos", "Quality of Service",
636 "Handle Quality-of-Service events from downstream",
637 DEFAULT_QOS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
638
639 /**
640 * GstAmlVideoDecoder:max-errors:
641 *
642 * Maximum number of tolerated consecutive decode errors. See
643 * gst_aml_video_decoder_set_max_errors() for more details.
644 *
645 * Since: 1.18
646 */
647 g_object_class_install_property (gobject_class, PROP_MAX_ERRORS,
648 g_param_spec_int ("max-errors", "Max errors",
649 "Max consecutive decoder errors before returning flow error",
650 -1, G_MAXINT, DEFAULT_MAX_ERRORS,
651 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
652
653 /**
654 * GstAmlVideoDecoder:min-force-key-unit-interval:
655 *
656 * Minimum interval between force-key-unit events sent upstream by the
657 * decoder. Setting this to 0 will cause every event to be handled, setting
658 * this to %GST_CLOCK_TIME_NONE will cause every event to be ignored.
659 *
660 * See gst_video_event_new_upstream_force_key_unit() for more details about
661 * force-key-unit events.
662 *
663 * Since: 1.20
664 */
665 g_object_class_install_property (gobject_class,
666 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
667 g_param_spec_uint64 ("min-force-key-unit-interval",
668 "Minimum Force Keyunit Interval",
669 "Minimum interval between force-keyunit requests in nanoseconds", 0,
670 G_MAXUINT64, DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL,
671 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
672
673 /**
674 * GstAmlVideoDecoder:discard-corrupted-frames:
675 *
676 * If set to %TRUE the decoder will discard frames that are marked as
677 * corrupted instead of outputting them.
678 *
679 * Since: 1.20
680 */
681 g_object_class_install_property (gobject_class, PROP_DISCARD_CORRUPTED_FRAMES,
682 g_param_spec_boolean ("discard-corrupted-frames",
683 "Discard Corrupted Frames",
684 "Discard frames marked as corrupted instead of outputting them",
685 DEFAULT_DISCARD_CORRUPTED_FRAMES,
686 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
687
688 /**
689 * GstAmlVideoDecoder:automatic-request-sync-points:
690 *
691 * If set to %TRUE the decoder will automatically request sync points when
692 * it seems like a good idea, e.g. if the first frames are not key frames or
693 * if packet loss was reported by upstream.
694 *
695 * Since: 1.20
696 */
697 g_object_class_install_property (gobject_class,
698 PROP_AUTOMATIC_REQUEST_SYNC_POINTS,
699 g_param_spec_boolean ("automatic-request-sync-points",
700 "Automatic Request Sync Points",
701 "Automatically request sync points when it would be useful",
702 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS,
703 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
704
705 /**
706 * GstAmlVideoDecoder:automatic-request-sync-point-flags:
707 *
708 * GstAmlVideoDecoderRequestSyncPointFlags to use for the automatically
709 * requested sync points if `automatic-request-sync-points` is enabled.
710 *
711 * Since: 1.20
712 */
713 g_object_class_install_property (gobject_class,
714 PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
715 g_param_spec_flags ("automatic-request-sync-point-flags",
716 "Automatic Request Sync Point Flags",
717 "Flags to use when automatically requesting sync points",
718 GST_TYPE_VIDEO_DECODER_REQUEST_SYNC_POINT_FLAGS,
719 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS,
720 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
721
722 meta_tag_video_quark = g_quark_from_static_string (GST_META_TAG_VIDEO_STR);
723}
724
725static void
726gst_aml_video_decoder_init (GstAmlVideoDecoder * decoder, GstAmlVideoDecoderClass * klass)
727{
728 GstPadTemplate *pad_template;
729 GstPad *pad;
730
731 GST_DEBUG_OBJECT (decoder, "gst_aml_video_decoder_init");
732
733 decoder->priv = gst_aml_video_decoder_get_instance_private (decoder);
734
735 pad_template =
736 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
737 g_return_if_fail (pad_template != NULL);
738
739 decoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
740
741 gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_aml_video_decoder_chain));
742 gst_pad_set_event_function (pad,
743 GST_DEBUG_FUNCPTR (gst_aml_video_decoder_sink_event));
744 gst_pad_set_query_function (pad,
745 GST_DEBUG_FUNCPTR (gst_aml_video_decoder_sink_query));
746 gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
747
748 pad_template =
749 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
750 g_return_if_fail (pad_template != NULL);
751
752 decoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
753
754 gst_pad_set_event_function (pad,
755 GST_DEBUG_FUNCPTR (gst_aml_video_decoder_src_event));
756 gst_pad_set_query_function (pad,
757 GST_DEBUG_FUNCPTR (gst_aml_video_decoder_src_query));
758 gst_element_add_pad (GST_ELEMENT (decoder), decoder->srcpad);
759
760 gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
761 gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
762
763 g_rec_mutex_init (&decoder->stream_lock);
764
765 decoder->priv->input_adapter = gst_adapter_new ();
766 decoder->priv->output_adapter = gst_adapter_new ();
767 decoder->priv->packetized = TRUE;
768 decoder->priv->needs_format = FALSE;
769
770 g_queue_init (&decoder->priv->frames);
771 g_queue_init (&decoder->priv->timestamps);
772
773 /* properties */
774 decoder->priv->do_qos = DEFAULT_QOS;
775 decoder->priv->max_errors = GST_AML_VIDEO_DECODER_MAX_ERRORS;
776
777 decoder->priv->min_latency = 0;
778 decoder->priv->max_latency = 0;
779
780 decoder->priv->automatic_request_sync_points =
781 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINTS;
782 decoder->priv->automatic_request_sync_point_flags =
783 DEFAULT_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS;
784
785 gst_aml_video_decoder_reset (decoder, TRUE, TRUE);
786}
787
788static GstAmlVideoCodecState *
789_new_input_state (GstCaps * caps)
790{
791 GstAmlVideoCodecState *state;
792 GstStructure *structure;
793 const GValue *codec_data;
794
795 state = g_slice_new0 (GstAmlVideoCodecState);
796 state->ref_count = 1;
797 gst_video_info_init (&state->info);
798 if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
799 goto parse_fail;
800 state->caps = gst_caps_ref (caps);
801
802 structure = gst_caps_get_structure (caps, 0);
803
804 codec_data = gst_structure_get_value (structure, "codec_data");
805 if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
806 state->codec_data = GST_BUFFER (g_value_dup_boxed (codec_data));
807
808 return state;
809
810parse_fail:
811 {
812 g_slice_free (GstAmlVideoCodecState, state);
813 return NULL;
814 }
815}
816
817static GstAmlVideoCodecState *
818_new_output_state (GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode,
819 guint width, guint height, GstAmlVideoCodecState * reference,
820 gboolean copy_interlace_mode)
821{
822 GstAmlVideoCodecState *state;
823
824 state = g_slice_new0 (GstAmlVideoCodecState);
825 state->ref_count = 1;
826 gst_video_info_init (&state->info);
827 if (!gst_video_info_set_interlaced_format (&state->info, fmt, interlace_mode,
828 width, height)) {
829 g_slice_free (GstAmlVideoCodecState, state);
830 return NULL;
831 }
832
833 if (reference) {
834 GstVideoInfo *tgt, *ref;
835
836 tgt = &state->info;
837 ref = &reference->info;
838
839 /* Copy over extra fields from reference state */
840 if (copy_interlace_mode)
841 tgt->interlace_mode = ref->interlace_mode;
842 tgt->flags = ref->flags;
843 tgt->chroma_site = ref->chroma_site;
844 tgt->colorimetry = ref->colorimetry;
845 GST_DEBUG ("reference par %d/%d fps %d/%d",
846 ref->par_n, ref->par_d, ref->fps_n, ref->fps_d);
847 tgt->par_n = ref->par_n;
848 tgt->par_d = ref->par_d;
849 tgt->fps_n = ref->fps_n;
850 tgt->fps_d = ref->fps_d;
851 tgt->views = ref->views;
852
853 GST_VIDEO_INFO_FIELD_ORDER (tgt) = GST_VIDEO_INFO_FIELD_ORDER (ref);
854
855 if (GST_VIDEO_INFO_MULTIVIEW_MODE (ref) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
856 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
857 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) =
858 GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
859 } else {
860 /* Default to MONO, overridden as needed by sub-classes */
861 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_MULTIVIEW_MODE_MONO;
862 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_MULTIVIEW_FLAGS_NONE;
863 }
864 }
865
866 GST_DEBUG ("reference par %d/%d fps %d/%d",
867 state->info.par_n, state->info.par_d,
868 state->info.fps_n, state->info.fps_d);
869
870 return state;
871}
872
873static gboolean
874gst_aml_video_decoder_setcaps (GstAmlVideoDecoder * decoder, GstCaps * caps)
875{
876 GstAmlVideoDecoderClass *decoder_class;
877 GstAmlVideoCodecState *state;
878 gboolean ret = TRUE;
879
880 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
881
882 GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
883
884 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
885
886 if (decoder->priv->input_state) {
887 GST_DEBUG_OBJECT (decoder,
888 "Checking if caps changed old %" GST_PTR_FORMAT " new %" GST_PTR_FORMAT,
889 decoder->priv->input_state->caps, caps);
890 if (gst_caps_is_equal (decoder->priv->input_state->caps, caps))
891 goto caps_not_changed;
892 }
893
894 state = _new_input_state (caps);
895
896 if (G_UNLIKELY (state == NULL))
897 goto parse_fail;
898
899 if (decoder_class->set_format)
900 ret = decoder_class->set_format (decoder, state);
901
902 if (!ret)
903 goto refused_format;
904
905 if (decoder->priv->input_state)
906 gst_aml_video_codec_state_unref (decoder->priv->input_state);
907 decoder->priv->input_state = state;
908
909 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
910
911 return ret;
912
913caps_not_changed:
914 {
915 GST_DEBUG_OBJECT (decoder, "Caps did not change - ignore");
916 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
917 return TRUE;
918 }
919
920 /* ERRORS */
921parse_fail:
922 {
923 GST_WARNING_OBJECT (decoder, "Failed to parse caps");
924 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
925 return FALSE;
926 }
927
928refused_format:
929 {
930 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
931 GST_WARNING_OBJECT (decoder, "Subclass refused caps");
932 gst_aml_video_codec_state_unref (state);
933 return FALSE;
934 }
935}
936
937static void
938gst_aml_video_decoder_finalize (GObject * object)
939{
940 GstAmlVideoDecoder *decoder;
941
942 decoder = GST_AML_VIDEO_DECODER (object);
943
944 GST_DEBUG_OBJECT (object, "finalize");
945
946 g_rec_mutex_clear (&decoder->stream_lock);
947
948 if (decoder->priv->input_adapter) {
949 g_object_unref (decoder->priv->input_adapter);
950 decoder->priv->input_adapter = NULL;
951 }
952 if (decoder->priv->output_adapter) {
953 g_object_unref (decoder->priv->output_adapter);
954 decoder->priv->output_adapter = NULL;
955 }
956
957 if (decoder->priv->input_state)
958 gst_aml_video_codec_state_unref (decoder->priv->input_state);
959 if (decoder->priv->output_state)
960 gst_aml_video_codec_state_unref (decoder->priv->output_state);
961
962 if (decoder->priv->pool) {
963 gst_object_unref (decoder->priv->pool);
964 decoder->priv->pool = NULL;
965 }
966
967 if (decoder->priv->allocator) {
968 gst_object_unref (decoder->priv->allocator);
969 decoder->priv->allocator = NULL;
970 }
971
972 G_OBJECT_CLASS (parent_class)->finalize (object);
973}
974
975static void
976gst_aml_video_decoder_get_property (GObject * object, guint property_id,
977 GValue * value, GParamSpec * pspec)
978{
979 GstAmlVideoDecoder *dec = GST_AML_VIDEO_DECODER (object);
980 GstAmlVideoDecoderPrivate *priv = dec->priv;
981
982 switch (property_id) {
983 case PROP_QOS:
984 g_value_set_boolean (value, priv->do_qos);
985 break;
986 case PROP_MAX_ERRORS:
987 g_value_set_int (value, gst_aml_video_decoder_get_max_errors (dec));
988 break;
989 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
990 g_value_set_uint64 (value, priv->min_force_key_unit_interval);
991 break;
992 case PROP_DISCARD_CORRUPTED_FRAMES:
993 g_value_set_boolean (value, priv->discard_corrupted_frames);
994 break;
995 case PROP_AUTOMATIC_REQUEST_SYNC_POINTS:
996 g_value_set_boolean (value, priv->automatic_request_sync_points);
997 break;
998 case PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS:
999 g_value_set_flags (value, priv->automatic_request_sync_point_flags);
1000 break;
1001 default:
1002 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
1003 break;
1004 }
1005}
1006
1007static void
1008gst_aml_video_decoder_set_property (GObject * object, guint property_id,
1009 const GValue * value, GParamSpec * pspec)
1010{
1011 GstAmlVideoDecoder *dec = GST_AML_VIDEO_DECODER (object);
1012 GstAmlVideoDecoderPrivate *priv = dec->priv;
1013
1014 switch (property_id) {
1015 case PROP_QOS:
1016 priv->do_qos = g_value_get_boolean (value);
1017 break;
1018 case PROP_MAX_ERRORS:
1019 gst_aml_video_decoder_set_max_errors (dec, g_value_get_int (value));
1020 break;
1021 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
1022 priv->min_force_key_unit_interval = g_value_get_uint64 (value);
1023 break;
1024 case PROP_DISCARD_CORRUPTED_FRAMES:
1025 priv->discard_corrupted_frames = g_value_get_boolean (value);
1026 break;
1027 case PROP_AUTOMATIC_REQUEST_SYNC_POINTS:
1028 priv->automatic_request_sync_points = g_value_get_boolean (value);
1029 break;
1030 case PROP_AUTOMATIC_REQUEST_SYNC_POINT_FLAGS:
1031 priv->automatic_request_sync_point_flags = g_value_get_flags (value);
1032 break;
1033 default:
1034 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
1035 break;
1036 }
1037}
1038
1039/* hard == FLUSH, otherwise discont */
1040static GstFlowReturn
1041gst_aml_video_decoder_flush (GstAmlVideoDecoder * dec, gboolean hard)
1042{
1043 GstAmlVideoDecoderClass *klass = GST_AML_VIDEO_DECODER_GET_CLASS (dec);
1044 GstFlowReturn ret = GST_FLOW_OK;
1045
1046 GST_LOG_OBJECT (dec, "flush hard %d", hard);
1047
1048 /* Inform subclass */
1049 if (klass->reset) {
1050 GST_FIXME_OBJECT (dec, "GstAmlVideoDecoder::reset() is deprecated");
1051 klass->reset (dec, hard);
1052 }
1053
1054 if (klass->flush)
1055 klass->flush (dec);
1056
1057 /* and get (re)set for the sequel */
1058 gst_aml_video_decoder_reset (dec, FALSE, hard);
1059
1060 return ret;
1061}
1062
1063static GstEvent *
1064gst_aml_video_decoder_create_merged_tags_event (GstAmlVideoDecoder * dec)
1065{
1066 GstTagList *merged_tags;
1067
1068 GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
1069 GST_LOG_OBJECT (dec, "decoder : %" GST_PTR_FORMAT, dec->priv->tags);
1070 GST_LOG_OBJECT (dec, "mode : %d", dec->priv->tags_merge_mode);
1071
1072 merged_tags =
1073 gst_tag_list_merge (dec->priv->upstream_tags, dec->priv->tags,
1074 dec->priv->tags_merge_mode);
1075
1076 GST_DEBUG_OBJECT (dec, "merged : %" GST_PTR_FORMAT, merged_tags);
1077
1078 if (merged_tags == NULL)
1079 return NULL;
1080
1081 if (gst_tag_list_is_empty (merged_tags)) {
1082 gst_tag_list_unref (merged_tags);
1083 return NULL;
1084 }
1085
1086 return gst_event_new_tag (merged_tags);
1087}
1088
1089static gboolean
1090gst_aml_video_decoder_push_event (GstAmlVideoDecoder * decoder, GstEvent * event)
1091{
1092 switch (GST_EVENT_TYPE (event)) {
1093 case GST_EVENT_SEGMENT:
1094 {
1095 GstSegment segment;
1096
1097 gst_event_copy_segment (event, &segment);
1098
1099 GST_DEBUG_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
1100
1101 if (segment.format != GST_FORMAT_TIME) {
1102 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1103 break;
1104 }
1105
1106 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1107 decoder->output_segment = segment;
1108 decoder->priv->in_out_segment_sync =
1109 gst_segment_is_equal (&decoder->input_segment, &segment);
1110 decoder->priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
1111 decoder->priv->earliest_time = GST_CLOCK_TIME_NONE;
1112 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1113 break;
1114 }
1115 default:
1116 break;
1117 }
1118
1119 GST_DEBUG_OBJECT (decoder, "pushing event %s",
1120 gst_event_type_get_name (GST_EVENT_TYPE (event)));
1121
1122 return gst_pad_push_event (decoder->srcpad, event);
1123}
1124
1125static GstFlowReturn
1126gst_aml_video_decoder_parse_available (GstAmlVideoDecoder * dec, gboolean at_eos,
1127 gboolean new_buffer)
1128{
1129 GstAmlVideoDecoderClass *decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (dec);
1130 GstAmlVideoDecoderPrivate *priv = dec->priv;
1131 GstFlowReturn ret = GST_FLOW_OK;
1132 gsize was_available, available;
1133 guint inactive = 0;
1134
1135 available = gst_adapter_available (priv->input_adapter);
1136
1137 while (available || new_buffer) {
1138 new_buffer = FALSE;
1139 /* current frame may have been parsed and handled,
1140 * so we need to set up a new one when asking subclass to parse */
1141 if (priv->current_frame == NULL)
1142 priv->current_frame = gst_aml_video_decoder_new_frame (dec);
1143
1144 was_available = available;
1145 ret = decoder_class->parse (dec, priv->current_frame,
1146 priv->input_adapter, at_eos);
1147 if (ret != GST_FLOW_OK)
1148 break;
1149
1150 /* if the subclass returned success (GST_FLOW_OK), it is expected
1151 * to have collected and submitted a frame, i.e. it should have
1152 * called gst_aml_video_decoder_have_frame(), or at least consumed a
1153 * few bytes through gst_aml_video_decoder_add_to_frame().
1154 *
1155 * Otherwise, this is an implementation bug, and we error out
1156 * after 2 failed attempts */
1157 available = gst_adapter_available (priv->input_adapter);
1158 if (!priv->current_frame || available != was_available)
1159 inactive = 0;
1160 else if (++inactive == 2)
1161 goto error_inactive;
1162 }
1163
1164 return ret;
1165
1166 /* ERRORS */
1167error_inactive:
1168 {
1169 GST_ERROR_OBJECT (dec, "Failed to consume data. Error in subclass?");
1170 return GST_FLOW_ERROR;
1171 }
1172}
1173
1174/* This function has to be called with the stream lock taken. */
1175static GstFlowReturn
1176gst_aml_video_decoder_drain_out (GstAmlVideoDecoder * dec, gboolean at_eos)
1177{
1178 GstAmlVideoDecoderClass *decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (dec);
1179 GstAmlVideoDecoderPrivate *priv = dec->priv;
1180 GstFlowReturn ret = GST_FLOW_OK;
1181
1182 if (dec->input_segment.rate > 0.0) {
1183 /* Forward mode, if unpacketized, give the child class
1184 * a final chance to flush out packets */
1185 if (!priv->packetized) {
1186 ret = gst_aml_video_decoder_parse_available (dec, TRUE, FALSE);
1187 }
1188
1189 if (at_eos) {
1190 if (decoder_class->finish)
1191 ret = decoder_class->finish (dec);
1192 } else {
1193 if (decoder_class->drain) {
1194 ret = decoder_class->drain (dec);
1195 } else {
1196 GST_FIXME_OBJECT (dec, "Sub-class should implement drain()");
1197 }
1198 }
1199 } else {
1200 /* Reverse playback mode */
1201 ret = gst_aml_video_decoder_flush_parse (dec, TRUE);
1202 }
1203
1204 return ret;
1205}
1206
1207static GList *
1208_flush_events (GstPad * pad, GList * events)
1209{
1210 GList *tmp;
1211
1212 for (tmp = events; tmp; tmp = tmp->next) {
1213 if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
1214 GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
1215 GST_EVENT_IS_STICKY (tmp->data)) {
1216 gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
1217 }
1218 gst_event_unref (tmp->data);
1219 }
1220 g_list_free (events);
1221
1222 return NULL;
1223}
1224
1225/* Must be called holding the GST_AML_VIDEO_DECODER_STREAM_LOCK */
1226static gboolean
1227gst_aml_video_decoder_negotiate_default_caps (GstAmlVideoDecoder * decoder)
1228{
1229 GstCaps *caps, *templcaps;
1230 GstAmlVideoCodecState *state;
1231 GstVideoInfo info;
1232 gint i;
1233 gint caps_size;
1234 GstStructure *structure;
1235
1236 templcaps = gst_pad_get_pad_template_caps (decoder->srcpad);
1237 caps = gst_pad_peer_query_caps (decoder->srcpad, templcaps);
1238 if (caps)
1239 gst_caps_unref (templcaps);
1240 else
1241 caps = templcaps;
1242 templcaps = NULL;
1243
1244 if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
1245 goto caps_error;
1246
1247 GST_LOG_OBJECT (decoder, "peer caps %" GST_PTR_FORMAT, caps);
1248
1249 /* before fixating, try to use whatever upstream provided */
1250 caps = gst_caps_make_writable (caps);
1251 caps_size = gst_caps_get_size (caps);
1252 if (decoder->priv->input_state && decoder->priv->input_state->caps) {
1253 GstCaps *sinkcaps = decoder->priv->input_state->caps;
1254 GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
1255 gint width, height;
1256
1257 if (gst_structure_get_int (structure, "width", &width)) {
1258 for (i = 0; i < caps_size; i++) {
1259 gst_structure_set (gst_caps_get_structure (caps, i), "width",
1260 G_TYPE_INT, width, NULL);
1261 }
1262 }
1263
1264 if (gst_structure_get_int (structure, "height", &height)) {
1265 for (i = 0; i < caps_size; i++) {
1266 gst_structure_set (gst_caps_get_structure (caps, i), "height",
1267 G_TYPE_INT, height, NULL);
1268 }
1269 }
1270 }
1271
1272 for (i = 0; i < caps_size; i++) {
1273 structure = gst_caps_get_structure (caps, i);
1274 /* Random I420 1280x720 for fixation */
1275 if (gst_structure_has_field (structure, "format"))
1276 gst_structure_fixate_field_string (structure, "format", "I420");
1277 else
1278 gst_structure_set (structure, "format", G_TYPE_STRING, "I420", NULL);
1279
1280 if (gst_structure_has_field (structure, "width"))
1281 gst_structure_fixate_field_nearest_int (structure, "width", 1280);
1282 else
1283 gst_structure_set (structure, "width", G_TYPE_INT, 1280, NULL);
1284
1285 if (gst_structure_has_field (structure, "height"))
1286 gst_structure_fixate_field_nearest_int (structure, "height", 720);
1287 else
1288 gst_structure_set (structure, "height", G_TYPE_INT, 720, NULL);
1289 }
1290 caps = gst_caps_fixate (caps);
1291
1292 if (!caps || !gst_video_info_from_caps (&info, caps))
1293 goto caps_error;
1294
1295 GST_INFO_OBJECT (decoder,
1296 "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
1297 state =
1298 gst_aml_video_decoder_set_output_state (decoder, info.finfo->format,
1299 info.width, info.height, decoder->priv->input_state);
1300 gst_aml_video_codec_state_unref (state);
1301 gst_caps_unref (caps);
1302
1303 return TRUE;
1304
1305caps_error:
1306 {
1307 if (caps)
1308 gst_caps_unref (caps);
1309 return FALSE;
1310 }
1311}
1312
1313static gboolean
1314gst_aml_video_decoder_handle_missing_data_default (GstAmlVideoDecoder *
1315 decoder, GstClockTime timestamp, GstClockTime duration)
1316{
1317 GstAmlVideoDecoderPrivate *priv;
1318
1319 priv = decoder->priv;
1320
1321 if (priv->automatic_request_sync_points) {
1322 GstClockTime deadline =
1323 gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
1324 timestamp);
1325
1326 GST_DEBUG_OBJECT (decoder,
1327 "Requesting sync point for missing data at running time %"
1328 GST_TIME_FORMAT " timestamp %" GST_TIME_FORMAT " with duration %"
1329 GST_TIME_FORMAT, GST_TIME_ARGS (deadline), GST_TIME_ARGS (timestamp),
1330 GST_TIME_ARGS (duration));
1331
1332 gst_aml_video_decoder_request_sync_point_internal (decoder, deadline,
1333 priv->automatic_request_sync_point_flags);
1334 }
1335
1336 return TRUE;
1337}
1338
1339static gboolean
1340gst_aml_video_decoder_sink_event_default (GstAmlVideoDecoder * decoder,
1341 GstEvent * event)
1342{
1343 GstAmlVideoDecoderClass *decoder_class;
1344 GstAmlVideoDecoderPrivate *priv;
1345 gboolean ret = FALSE;
1346 gboolean forward_immediate = FALSE;
1347
1348 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
1349
1350 priv = decoder->priv;
1351
1352 switch (GST_EVENT_TYPE (event)) {
1353 case GST_EVENT_STREAM_START:
1354 {
1355 GstFlowReturn flow_ret = GST_FLOW_OK;
1356
1357 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1358 flow_ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
1359 ret = (flow_ret == GST_FLOW_OK);
1360
1361 GST_DEBUG_OBJECT (decoder, "received STREAM_START. Clearing taglist");
1362 /* Flush upstream tags after a STREAM_START */
1363 if (priv->upstream_tags) {
1364 gst_tag_list_unref (priv->upstream_tags);
1365 priv->upstream_tags = NULL;
1366 priv->tags_changed = TRUE;
1367 }
1368 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1369
1370 /* Forward STREAM_START immediately. Everything is drained after
1371 * the STREAM_START event and we can forward this event immediately
1372 * now without having buffers out of order.
1373 */
1374 forward_immediate = TRUE;
1375 break;
1376 }
1377 case GST_EVENT_CAPS:
1378 {
1379 GstCaps *caps;
1380
1381 gst_event_parse_caps (event, &caps);
1382 ret = gst_aml_video_decoder_setcaps (decoder, caps);
1383 gst_event_unref (event);
1384 event = NULL;
1385 break;
1386 }
1387 case GST_EVENT_SEGMENT_DONE:
1388 {
1389 GstFlowReturn flow_ret = GST_FLOW_OK;
1390
1391 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1392 flow_ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
1393 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1394 ret = (flow_ret == GST_FLOW_OK);
1395
1396 /* Forward SEGMENT_DONE immediately. This is required
1397 * because no buffer or serialized event might come
1398 * after SEGMENT_DONE and nothing could trigger another
1399 * _finish_frame() call.
1400 *
1401 * The subclass can override this behaviour by overriding
1402 * the ::sink_event() vfunc and not chaining up to the
1403 * parent class' ::sink_event() until a later time.
1404 */
1405 forward_immediate = TRUE;
1406 break;
1407 }
1408 case GST_EVENT_EOS:
1409 {
1410 GstFlowReturn flow_ret = GST_FLOW_OK;
1411
1412 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1413 flow_ret = gst_aml_video_decoder_drain_out (decoder, TRUE);
1414 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1415 ret = (flow_ret == GST_FLOW_OK);
1416
1417 /* Error out even if EOS was ok when we had input, but no output */
1418 if (ret && priv->had_input_data && !priv->had_output_data) {
1419 GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
1420 ("No valid frames decoded before end of stream"),
1421 ("no valid frames found"));
1422 }
1423
1424 /* Forward EOS immediately. This is required because no
1425 * buffer or serialized event will come after EOS and
1426 * nothing could trigger another _finish_frame() call.
1427 *
1428 * The subclass can override this behaviour by overriding
1429 * the ::sink_event() vfunc and not chaining up to the
1430 * parent class' ::sink_event() until a later time.
1431 */
1432 forward_immediate = TRUE;
1433 break;
1434 }
1435 case GST_EVENT_GAP:
1436 {
1437 GstClockTime timestamp, duration;
1438 GstGapFlags gap_flags = 0;
1439 GstFlowReturn flow_ret = GST_FLOW_OK;
1440 gboolean needs_reconfigure = FALSE;
1441 GList *events;
1442 GList *frame_events;
1443
1444 gst_event_parse_gap (event, &timestamp, &duration);
1445 gst_event_parse_gap_flags (event, &gap_flags);
1446
1447 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1448 /* If this is not missing data, or the subclass does not handle it
1449 * specifically, then drain out the decoder and forward the event
1450 * directly. */
1451 if ((gap_flags & GST_GAP_FLAG_MISSING_DATA) == 0
1452 || !decoder_class->handle_missing_data
1453 || decoder_class->handle_missing_data (decoder, timestamp,
1454 duration)) {
1455 if (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)
1456 flow_ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
1457 ret = (flow_ret == GST_FLOW_OK);
1458
1459 /* Ensure we have caps before forwarding the event */
1460 if (!decoder->priv->output_state) {
1461 if (!gst_aml_video_decoder_negotiate_default_caps (decoder)) {
1462 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1463 GST_ELEMENT_ERROR (decoder, STREAM, FORMAT, (NULL),
1464 ("Decoder output not negotiated before GAP event."));
1465 forward_immediate = TRUE;
1466 break;
1467 }
1468 needs_reconfigure = TRUE;
1469 }
1470
1471 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad)
1472 || needs_reconfigure;
1473 if (decoder->priv->output_state_changed || needs_reconfigure) {
1474 if (!gst_aml_video_decoder_negotiate_unlocked (decoder)) {
1475 GST_WARNING_OBJECT (decoder, "Failed to negotiate with downstream");
1476 gst_pad_mark_reconfigure (decoder->srcpad);
1477 }
1478 }
1479
1480 GST_DEBUG_OBJECT (decoder, "Pushing all pending serialized events"
1481 " before the gap");
1482 events = decoder->priv->pending_events;
1483 frame_events = decoder->priv->current_frame_events;
1484 decoder->priv->pending_events = NULL;
1485 decoder->priv->current_frame_events = NULL;
1486
1487 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1488
1489 gst_aml_video_decoder_push_event_list (decoder, events);
1490 gst_aml_video_decoder_push_event_list (decoder, frame_events);
1491
1492 /* Forward GAP immediately. Everything is drained after
1493 * the GAP event and we can forward this event immediately
1494 * now without having buffers out of order.
1495 */
1496 forward_immediate = TRUE;
1497 } else {
1498 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1499 gst_clear_event (&event);
1500 }
1501 break;
1502 }
1503 case GST_EVENT_CUSTOM_DOWNSTREAM:
1504 {
1505 gboolean in_still;
1506 GstFlowReturn flow_ret = GST_FLOW_OK;
1507
1508 if (gst_video_event_parse_still_frame (event, &in_still)) {
1509 if (in_still) {
1510 GST_DEBUG_OBJECT (decoder, "draining current data for still-frame");
1511 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1512 flow_ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
1513 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1514 ret = (flow_ret == GST_FLOW_OK);
1515 }
1516 /* Forward STILL_FRAME immediately. Everything is drained after
1517 * the STILL_FRAME event and we can forward this event immediately
1518 * now without having buffers out of order.
1519 */
1520 forward_immediate = TRUE;
1521 }
1522 break;
1523 }
1524 case GST_EVENT_SEGMENT:
1525 {
1526 GstSegment segment;
1527
1528 gst_event_copy_segment (event, &segment);
1529
1530 if (segment.format == GST_FORMAT_TIME) {
1531 GST_DEBUG_OBJECT (decoder,
1532 "received TIME SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1533 } else {
1534 gint64 start;
1535
1536 GST_DEBUG_OBJECT (decoder,
1537 "received SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1538
1539 /* handle newsegment as a result from our legacy simple seeking */
1540 /* note that initial 0 should convert to 0 in any case */
1541 if (priv->do_estimate_rate &&
1542 gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES,
1543 segment.start, GST_FORMAT_TIME, &start)) {
1544 /* best attempt convert */
1545 /* as these are only estimates, stop is kept open-ended to avoid
1546 * premature cutting */
1547 GST_DEBUG_OBJECT (decoder,
1548 "converted to TIME start %" GST_TIME_FORMAT,
1549 GST_TIME_ARGS (start));
1550 segment.start = start;
1551 segment.stop = GST_CLOCK_TIME_NONE;
1552 segment.time = start;
1553 /* replace event */
1554 gst_event_unref (event);
1555 event = gst_event_new_segment (&segment);
1556 } else {
1557 goto newseg_wrong_format;
1558 }
1559 }
1560
1561 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1562
1563 /* Update the decode flags in the segment if we have an instant-rate
1564 * override active */
1565 GST_OBJECT_LOCK (decoder);
1566 if (!priv->decode_flags_override)
1567 priv->decode_flags = segment.flags;
1568 else {
1569 segment.flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1570 segment.flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1571 }
1572
1573 priv->base_timestamp = GST_CLOCK_TIME_NONE;
1574 priv->base_picture_number = 0;
1575
1576 decoder->input_segment = segment;
1577 decoder->priv->in_out_segment_sync = FALSE;
1578
1579 GST_OBJECT_UNLOCK (decoder);
1580 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1581
1582 break;
1583 }
1584 case GST_EVENT_INSTANT_RATE_CHANGE:
1585 {
1586 GstSegmentFlags flags;
1587 GstSegment *seg;
1588
1589 gst_event_parse_instant_rate_change (event, NULL, &flags);
1590
1591 GST_OBJECT_LOCK (decoder);
1592 priv->decode_flags_override = TRUE;
1593 priv->decode_flags = flags;
1594
1595 /* Update the input segment flags */
1596 seg = &decoder->input_segment;
1597 seg->flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1598 seg->flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1599 GST_OBJECT_UNLOCK (decoder);
1600 break;
1601 }
1602 case GST_EVENT_FLUSH_STOP:
1603 {
1604 GList *l;
1605
1606 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1607 for (l = priv->frames.head; l; l = l->next) {
1608 GstAmlVideoCodecFrame *frame = l->data;
1609
1610 frame->events = _flush_events (decoder->srcpad, frame->events);
1611 }
1612 priv->current_frame_events = _flush_events (decoder->srcpad,
1613 decoder->priv->current_frame_events);
1614
1615 /* well, this is kind of worse than a DISCONT */
1616 gst_aml_video_decoder_flush (decoder, TRUE);
1617 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1618 /* Forward FLUSH_STOP immediately. This is required because it is
1619 * expected to be forwarded immediately and no buffers are queued
1620 * anyway.
1621 */
1622 forward_immediate = TRUE;
1623 break;
1624 }
1625 case GST_EVENT_TAG:
1626 {
1627 GstTagList *tags;
1628
1629 gst_event_parse_tag (event, &tags);
1630
1631 if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
1632 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1633 if (priv->upstream_tags != tags) {
1634 if (priv->upstream_tags)
1635 gst_tag_list_unref (priv->upstream_tags);
1636 priv->upstream_tags = gst_tag_list_ref (tags);
1637 GST_INFO_OBJECT (decoder, "upstream tags: %" GST_PTR_FORMAT, tags);
1638 }
1639 gst_event_unref (event);
1640 event = gst_aml_video_decoder_create_merged_tags_event (decoder);
1641 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1642 if (!event)
1643 ret = TRUE;
1644 }
1645 break;
1646 }
1647 default:
1648 break;
1649 }
1650
1651 /* Forward non-serialized events immediately, and all other
1652 * events which can be forwarded immediately without potentially
1653 * causing the event to go out of order with other events and
1654 * buffers as decided above.
1655 */
1656 if (event) {
1657 if (!GST_EVENT_IS_SERIALIZED (event) || forward_immediate) {
1658 ret = gst_aml_video_decoder_push_event (decoder, event);
1659 } else {
1660 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
1661 decoder->priv->current_frame_events =
1662 g_list_prepend (decoder->priv->current_frame_events, event);
1663 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1664 ret = TRUE;
1665 }
1666 }
1667
1668 return ret;
1669
1670newseg_wrong_format:
1671 {
1672 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1673 gst_event_unref (event);
1674 /* SWALLOW EVENT */
1675 return TRUE;
1676 }
1677}
1678
1679static gboolean
1680gst_aml_video_decoder_sink_event (GstPad * pad, GstObject * parent,
1681 GstEvent * event)
1682{
1683 GstAmlVideoDecoder *decoder;
1684 GstAmlVideoDecoderClass *decoder_class;
1685 gboolean ret = FALSE;
1686
1687 decoder = GST_AML_VIDEO_DECODER (parent);
1688 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
1689
1690 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1691 GST_EVENT_TYPE_NAME (event));
1692
1693 if (decoder_class->sink_event)
1694 ret = decoder_class->sink_event (decoder, event);
1695
1696 return ret;
1697}
1698
1699/* perform upstream byte <-> time conversion (duration, seeking)
1700 * if subclass allows and if enough data for moderately decent conversion */
1701static inline gboolean
1702gst_aml_video_decoder_do_byte (GstAmlVideoDecoder * dec)
1703{
1704 gboolean ret;
1705
1706 GST_OBJECT_LOCK (dec);
1707 ret = dec->priv->do_estimate_rate && (dec->priv->bytes_out > 0)
1708 && (dec->priv->time > GST_SECOND);
1709 GST_OBJECT_UNLOCK (dec);
1710
1711 return ret;
1712}
1713
1714static gboolean
1715gst_aml_video_decoder_do_seek (GstAmlVideoDecoder * dec, GstEvent * event)
1716{
1717 GstFormat format;
1718 GstSeekFlags flags;
1719 GstSeekType start_type, end_type;
1720 gdouble rate;
1721 gint64 start, start_time, end_time;
1722 GstSegment seek_segment;
1723 guint32 seqnum;
1724
1725 gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
1726 &start_time, &end_type, &end_time);
1727
1728 /* we'll handle plain open-ended flushing seeks with the simple approach */
1729 if (rate != 1.0) {
1730 GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
1731 return FALSE;
1732 }
1733
1734 if (start_type != GST_SEEK_TYPE_SET) {
1735 GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
1736 return FALSE;
1737 }
1738
1739 if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
1740 (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
1741 GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
1742 return FALSE;
1743 }
1744
1745 if (!(flags & GST_SEEK_FLAG_FLUSH)) {
1746 GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
1747 return FALSE;
1748 }
1749
1750 memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
1751 gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
1752 start_time, end_type, end_time, NULL);
1753 start_time = seek_segment.position;
1754
1755 if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
1756 GST_FORMAT_BYTES, &start)) {
1757 GST_DEBUG_OBJECT (dec, "conversion failed");
1758 return FALSE;
1759 }
1760
1761 seqnum = gst_event_get_seqnum (event);
1762 event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
1763 GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
1764 gst_event_set_seqnum (event, seqnum);
1765
1766 GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
1767 G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
1768
1769 return gst_pad_push_event (dec->sinkpad, event);
1770}
1771
1772static gboolean
1773gst_aml_video_decoder_src_event_default (GstAmlVideoDecoder * decoder,
1774 GstEvent * event)
1775{
1776 GstAmlVideoDecoderPrivate *priv;
1777 gboolean res = FALSE;
1778
1779 priv = decoder->priv;
1780
1781 GST_DEBUG_OBJECT (decoder,
1782 "received event %d, %s", GST_EVENT_TYPE (event),
1783 GST_EVENT_TYPE_NAME (event));
1784
1785 switch (GST_EVENT_TYPE (event)) {
1786 case GST_EVENT_SEEK:
1787 {
1788 GstFormat format;
1789 gdouble rate;
1790 GstSeekFlags flags;
1791 GstSeekType start_type, stop_type;
1792 gint64 start, stop;
1793 gint64 tstart, tstop;
1794 guint32 seqnum;
1795
1796 gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
1797 &stop_type, &stop);
1798 seqnum = gst_event_get_seqnum (event);
1799
1800 /* upstream gets a chance first */
1801 if ((res = gst_pad_push_event (decoder->sinkpad, event)))
1802 break;
1803
1804 /* if upstream fails for a time seek, maybe we can help if allowed */
1805 if (format == GST_FORMAT_TIME) {
1806 if (gst_aml_video_decoder_do_byte (decoder))
1807 res = gst_aml_video_decoder_do_seek (decoder, event);
1808 break;
1809 }
1810
1811 /* ... though a non-time seek can be aided as well */
1812 /* First bring the requested format to time */
1813 if (!(res =
1814 gst_pad_query_convert (decoder->srcpad, format, start,
1815 GST_FORMAT_TIME, &tstart)))
1816 goto convert_error;
1817 if (!(res =
1818 gst_pad_query_convert (decoder->srcpad, format, stop,
1819 GST_FORMAT_TIME, &tstop)))
1820 goto convert_error;
1821
1822 /* then seek with time on the peer */
1823 event = gst_event_new_seek (rate, GST_FORMAT_TIME,
1824 flags, start_type, tstart, stop_type, tstop);
1825 gst_event_set_seqnum (event, seqnum);
1826
1827 res = gst_pad_push_event (decoder->sinkpad, event);
1828 break;
1829 }
1830 case GST_EVENT_QOS:
1831 {
1832 GstQOSType type;
1833 gdouble proportion;
1834 GstClockTimeDiff diff;
1835 GstClockTime timestamp;
1836
1837 gst_event_parse_qos (event, &type, &proportion, &diff, &timestamp);
1838
1839 GST_OBJECT_LOCK (decoder);
1840 priv->proportion = proportion;
1841 if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {
1842 if (G_UNLIKELY (diff > 0)) {
1843 priv->earliest_time = timestamp + 2 * diff + priv->qos_frame_duration;
1844 } else {
1845 priv->earliest_time = timestamp + diff;
1846 }
1847 } else {
1848 priv->earliest_time = GST_CLOCK_TIME_NONE;
1849 }
1850 GST_OBJECT_UNLOCK (decoder);
1851
1852 GST_DEBUG_OBJECT (decoder,
1853 "got QoS %" GST_TIME_FORMAT ", %" GST_STIME_FORMAT ", %g",
1854 GST_TIME_ARGS (timestamp), GST_STIME_ARGS (diff), proportion);
1855
1856 res = gst_pad_push_event (decoder->sinkpad, event);
1857 break;
1858 }
1859 default:
1860 res = gst_pad_push_event (decoder->sinkpad, event);
1861 break;
1862 }
1863done:
1864 return res;
1865
1866convert_error:
1867 GST_DEBUG_OBJECT (decoder, "could not convert format");
1868 goto done;
1869}
1870
1871static gboolean
1872gst_aml_video_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
1873{
1874 GstAmlVideoDecoder *decoder;
1875 GstAmlVideoDecoderClass *decoder_class;
1876 gboolean ret = FALSE;
1877
1878 decoder = GST_AML_VIDEO_DECODER (parent);
1879 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
1880
1881 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1882 GST_EVENT_TYPE_NAME (event));
1883
1884 if (decoder_class->src_event)
1885 ret = decoder_class->src_event (decoder, event);
1886
1887 return ret;
1888}
1889
1890static gboolean
1891gst_aml_video_decoder_src_query_default (GstAmlVideoDecoder * dec, GstQuery * query)
1892{
1893 GstPad *pad = GST_AML_VIDEO_DECODER_SRC_PAD (dec);
1894 gboolean res = TRUE;
1895
1896 GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
1897
1898 switch (GST_QUERY_TYPE (query)) {
1899 case GST_QUERY_POSITION:
1900 {
1901 GstFormat format;
1902 gint64 time, value;
1903
1904 /* upstream gets a chance first */
1905 if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
1906 GST_LOG_OBJECT (dec, "returning peer response");
1907 break;
1908 }
1909
1910 /* Refuse BYTES format queries. If it made sense to
1911 * answer them, upstream would have already */
1912 gst_query_parse_position (query, &format, NULL);
1913
1914 if (format == GST_FORMAT_BYTES) {
1915 GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
1916 break;
1917 }
1918
1919 /* we start from the last seen time */
1920 time = dec->priv->last_timestamp_out;
1921 /* correct for the segment values */
1922 time = gst_segment_to_stream_time (&dec->output_segment,
1923 GST_FORMAT_TIME, time);
1924
1925 GST_LOG_OBJECT (dec,
1926 "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
1927
1928 /* and convert to the final format */
1929 if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
1930 format, &value)))
1931 break;
1932
1933 gst_query_set_position (query, format, value);
1934
1935 GST_LOG_OBJECT (dec,
1936 "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
1937 format);
1938 break;
1939 }
1940 case GST_QUERY_DURATION:
1941 {
1942 GstFormat format;
1943
1944 /* upstream in any case */
1945 if ((res = gst_pad_query_default (pad, GST_OBJECT (dec), query)))
1946 break;
1947
1948 gst_query_parse_duration (query, &format, NULL);
1949 /* try answering TIME by converting from BYTE if subclass allows */
1950 if (format == GST_FORMAT_TIME && gst_aml_video_decoder_do_byte (dec)) {
1951 gint64 value;
1952
1953 if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
1954 &value)) {
1955 GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
1956 if (gst_pad_query_convert (dec->sinkpad,
1957 GST_FORMAT_BYTES, value, GST_FORMAT_TIME, &value)) {
1958 gst_query_set_duration (query, GST_FORMAT_TIME, value);
1959 res = TRUE;
1960 }
1961 }
1962 }
1963 break;
1964 }
1965 case GST_QUERY_CONVERT:
1966 {
1967 GstFormat src_fmt, dest_fmt;
1968 gint64 src_val, dest_val;
1969
1970 GST_DEBUG_OBJECT (dec, "convert query");
1971
1972 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1973 GST_OBJECT_LOCK (dec);
1974 if (dec->priv->output_state != NULL)
1975 res = __gst_aml_video_rawvideo_convert (dec->priv->output_state,
1976 src_fmt, src_val, &dest_fmt, &dest_val);
1977 else
1978 res = FALSE;
1979 GST_OBJECT_UNLOCK (dec);
1980 if (!res)
1981 goto error;
1982 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1983 break;
1984 }
1985 case GST_QUERY_LATENCY:
1986 {
1987 gboolean live;
1988 GstClockTime min_latency, max_latency;
1989
1990 res = gst_pad_peer_query (dec->sinkpad, query);
1991 if (res) {
1992 gst_query_parse_latency (query, &live, &min_latency, &max_latency);
1993 GST_DEBUG_OBJECT (dec, "Peer qlatency: live %d, min %"
1994 GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
1995 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
1996
1997 GST_OBJECT_LOCK (dec);
1998 min_latency += dec->priv->min_latency;
1999 if (max_latency == GST_CLOCK_TIME_NONE
2000 || dec->priv->max_latency == GST_CLOCK_TIME_NONE)
2001 max_latency = GST_CLOCK_TIME_NONE;
2002 else
2003 max_latency += dec->priv->max_latency;
2004 GST_OBJECT_UNLOCK (dec);
2005
2006 gst_query_set_latency (query, live, min_latency, max_latency);
2007 }
2008 }
2009 break;
2010 default:
2011 res = gst_pad_query_default (pad, GST_OBJECT (dec), query);
2012 }
2013 return res;
2014
2015error:
2016 GST_ERROR_OBJECT (dec, "query failed");
2017 return res;
2018}
2019
2020static gboolean
2021gst_aml_video_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
2022{
2023 GstAmlVideoDecoder *decoder;
2024 GstAmlVideoDecoderClass *decoder_class;
2025 gboolean ret = FALSE;
2026
2027 decoder = GST_AML_VIDEO_DECODER (parent);
2028 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
2029
2030 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
2031 GST_QUERY_TYPE_NAME (query));
2032
2033 if (decoder_class->src_query)
2034 ret = decoder_class->src_query (decoder, query);
2035
2036 return ret;
2037}
2038
2039/**
2040 * gst_aml_video_decoder_proxy_getcaps:
2041 * @decoder: a #GstAmlVideoDecoder
2042 * @caps: (allow-none): initial caps
2043 * @filter: (allow-none): filter caps
2044 *
2045 * Returns caps that express @caps (or sink template caps if @caps == NULL)
2046 * restricted to resolution/format/... combinations supported by downstream
2047 * elements.
2048 *
2049 * Returns: (transfer full): a #GstCaps owned by caller
2050 *
2051 * Since: 1.6
2052 */
2053GstCaps *
2054gst_aml_video_decoder_proxy_getcaps (GstAmlVideoDecoder * decoder, GstCaps * caps,
2055 GstCaps * filter)
2056{
2057 return __gst_aml_video_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
2058 GST_AML_VIDEO_DECODER_SINK_PAD (decoder),
2059 GST_AML_VIDEO_DECODER_SRC_PAD (decoder), caps, filter);
2060}
2061
2062static GstCaps *
2063gst_aml_video_decoder_sink_getcaps (GstAmlVideoDecoder * decoder, GstCaps * filter)
2064{
2065 GstAmlVideoDecoderClass *klass;
2066 GstCaps *caps;
2067
2068 klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
2069
2070 if (klass->getcaps)
2071 caps = klass->getcaps (decoder, filter);
2072 else
2073 caps = gst_aml_video_decoder_proxy_getcaps (decoder, NULL, filter);
2074
2075 GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
2076
2077 return caps;
2078}
2079
2080static gboolean
2081gst_aml_video_decoder_sink_query_default (GstAmlVideoDecoder * decoder,
2082 GstQuery * query)
2083{
2084 GstPad *pad = GST_AML_VIDEO_DECODER_SINK_PAD (decoder);
2085 GstAmlVideoDecoderPrivate *priv;
2086 gboolean res = FALSE;
2087
2088 priv = decoder->priv;
2089
2090 GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
2091
2092 switch (GST_QUERY_TYPE (query)) {
2093 case GST_QUERY_CONVERT:
2094 {
2095 GstFormat src_fmt, dest_fmt;
2096 gint64 src_val, dest_val;
2097
2098 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
2099 GST_OBJECT_LOCK (decoder);
2100 res =
2101 __gst_aml_video_encoded_video_convert (priv->bytes_out, priv->time,
2102 src_fmt, src_val, &dest_fmt, &dest_val);
2103 GST_OBJECT_UNLOCK (decoder);
2104 if (!res)
2105 goto error;
2106 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
2107 break;
2108 }
2109 case GST_QUERY_ALLOCATION:{
2110 GstAmlVideoDecoderClass *klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
2111
2112 if (klass->propose_allocation)
2113 res = klass->propose_allocation (decoder, query);
2114 break;
2115 }
2116 case GST_QUERY_CAPS:{
2117 GstCaps *filter, *caps;
2118
2119 gst_query_parse_caps (query, &filter);
2120 caps = gst_aml_video_decoder_sink_getcaps (decoder, filter);
2121 gst_query_set_caps_result (query, caps);
2122 gst_caps_unref (caps);
2123 res = TRUE;
2124 break;
2125 }
2126 case GST_QUERY_ACCEPT_CAPS:{
2127 if (decoder->priv->use_default_pad_acceptcaps) {
2128 res =
2129 gst_pad_query_default (GST_AML_VIDEO_DECODER_SINK_PAD (decoder),
2130 GST_OBJECT_CAST (decoder), query);
2131 } else {
2132 GstCaps *caps;
2133 GstCaps *allowed_caps;
2134 GstCaps *template_caps;
2135 gboolean accept;
2136
2137 gst_query_parse_accept_caps (query, &caps);
2138
2139 template_caps = gst_pad_get_pad_template_caps (pad);
2140 accept = gst_caps_is_subset (caps, template_caps);
2141 gst_caps_unref (template_caps);
2142
2143 if (accept) {
2144 allowed_caps =
2145 gst_pad_query_caps (GST_AML_VIDEO_DECODER_SINK_PAD (decoder), caps);
2146
2147 accept = gst_caps_can_intersect (caps, allowed_caps);
2148
2149 gst_caps_unref (allowed_caps);
2150 }
2151
2152 gst_query_set_accept_caps_result (query, accept);
2153 res = TRUE;
2154 }
2155 break;
2156 }
2157 default:
2158 res = gst_pad_query_default (pad, GST_OBJECT (decoder), query);
2159 break;
2160 }
2161done:
2162
2163 return res;
2164error:
2165 GST_DEBUG_OBJECT (decoder, "query failed");
2166 goto done;
2167
2168}
2169
2170static gboolean
2171gst_aml_video_decoder_sink_query (GstPad * pad, GstObject * parent,
2172 GstQuery * query)
2173{
2174 GstAmlVideoDecoder *decoder;
2175 GstAmlVideoDecoderClass *decoder_class;
2176 gboolean ret = FALSE;
2177
2178 decoder = GST_AML_VIDEO_DECODER (parent);
2179 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
2180
2181 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
2182 GST_QUERY_TYPE_NAME (query));
2183
2184 if (decoder_class->sink_query)
2185 ret = decoder_class->sink_query (decoder, query);
2186
2187 return ret;
2188}
2189
2190typedef struct _Timestamp Timestamp;
2191struct _Timestamp
2192{
2193 guint64 offset;
2194 GstClockTime pts;
2195 GstClockTime dts;
2196 GstClockTime duration;
2197 guint flags;
2198};
2199
2200static void
2201timestamp_free (Timestamp * ts)
2202{
2203 g_slice_free (Timestamp, ts);
2204}
2205
2206static void
2207gst_aml_video_decoder_add_buffer_info (GstAmlVideoDecoder * decoder,
2208 GstBuffer * buffer)
2209{
2210 GstAmlVideoDecoderPrivate *priv = decoder->priv;
2211 Timestamp *ts;
2212
2213 if (!GST_BUFFER_PTS_IS_VALID (buffer) &&
2214 !GST_BUFFER_DTS_IS_VALID (buffer) &&
2215 !GST_BUFFER_DURATION_IS_VALID (buffer) &&
2216 GST_BUFFER_FLAGS (buffer) == 0) {
2217 /* Save memory - don't bother storing info
2218 * for buffers with no distinguishing info */
2219 return;
2220 }
2221
2222 ts = g_slice_new (Timestamp);
2223
2224 GST_LOG_OBJECT (decoder,
2225 "adding PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT
2226 " (offset:%" G_GUINT64_FORMAT ")",
2227 GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2228 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), priv->input_offset);
2229
2230 ts->offset = priv->input_offset;
2231 ts->pts = GST_BUFFER_PTS (buffer);
2232 ts->dts = GST_BUFFER_DTS (buffer);
2233 ts->duration = GST_BUFFER_DURATION (buffer);
2234 ts->flags = GST_BUFFER_FLAGS (buffer);
2235
2236 g_queue_push_tail (&priv->timestamps, ts);
2237
2238 if (g_queue_get_length (&priv->timestamps) > 40) {
2239 GST_WARNING_OBJECT (decoder,
2240 "decoder timestamp list getting long: %d timestamps,"
2241 "possible internal leaking?", g_queue_get_length (&priv->timestamps));
2242 }
2243}
2244
2245static void
2246gst_aml_video_decoder_get_buffer_info_at_offset (GstAmlVideoDecoder *
2247 decoder, guint64 offset, GstClockTime * pts, GstClockTime * dts,
2248 GstClockTime * duration, guint * flags)
2249{
2250#ifndef GST_DISABLE_GST_DEBUG
2251 guint64 got_offset = 0;
2252#endif
2253 Timestamp *ts;
2254 GList *g;
2255
2256 *pts = GST_CLOCK_TIME_NONE;
2257 *dts = GST_CLOCK_TIME_NONE;
2258 *duration = GST_CLOCK_TIME_NONE;
2259 *flags = 0;
2260
2261 g = decoder->priv->timestamps.head;
2262 while (g) {
2263 ts = g->data;
2264 if (ts->offset <= offset) {
2265 GList *next = g->next;
2266#ifndef GST_DISABLE_GST_DEBUG
2267 got_offset = ts->offset;
2268#endif
2269 *pts = ts->pts;
2270 *dts = ts->dts;
2271 *duration = ts->duration;
2272 *flags = ts->flags;
2273 g_queue_delete_link (&decoder->priv->timestamps, g);
2274 g = next;
2275 timestamp_free (ts);
2276 } else {
2277 break;
2278 }
2279 }
2280
2281 GST_LOG_OBJECT (decoder,
2282 "got PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT " flags %x @ offs %"
2283 G_GUINT64_FORMAT " (wanted offset:%" G_GUINT64_FORMAT ")",
2284 GST_TIME_ARGS (*pts), GST_TIME_ARGS (*dts), *flags, got_offset, offset);
2285}
2286
2287#if !GLIB_CHECK_VERSION(2, 60, 0)
2288#define g_queue_clear_full queue_clear_full
2289static void
2290queue_clear_full (GQueue * queue, GDestroyNotify free_func)
2291{
2292 gpointer data;
2293
2294 while ((data = g_queue_pop_head (queue)) != NULL)
2295 free_func (data);
2296}
2297#endif
2298
2299static void
2300gst_aml_video_decoder_clear_queues (GstAmlVideoDecoder * dec)
2301{
2302 GstAmlVideoDecoderPrivate *priv = dec->priv;
2303
2304 g_list_free_full (priv->output_queued,
2305 (GDestroyNotify) gst_mini_object_unref);
2306 priv->output_queued = NULL;
2307
2308 g_list_free_full (priv->gather, (GDestroyNotify) gst_mini_object_unref);
2309 priv->gather = NULL;
2310 g_list_free_full (priv->decode, (GDestroyNotify) gst_aml_video_codec_frame_unref);
2311 priv->decode = NULL;
2312 g_list_free_full (priv->parse, (GDestroyNotify) gst_mini_object_unref);
2313 priv->parse = NULL;
2314 g_list_free_full (priv->parse_gather,
2315 (GDestroyNotify) gst_aml_video_codec_frame_unref);
2316 priv->parse_gather = NULL;
2317 g_queue_clear_full (&priv->frames,
2318 (GDestroyNotify) gst_aml_video_codec_frame_unref);
2319}
2320
2321static void
2322gst_aml_video_decoder_reset (GstAmlVideoDecoder * decoder, gboolean full,
2323 gboolean flush_hard)
2324{
2325 GstAmlVideoDecoderPrivate *priv = decoder->priv;
2326
2327 GST_DEBUG_OBJECT (decoder, "reset full %d", full);
2328
2329 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
2330
2331 if (full || flush_hard) {
2332 gst_segment_init (&decoder->input_segment, GST_FORMAT_UNDEFINED);
2333 gst_segment_init (&decoder->output_segment, GST_FORMAT_UNDEFINED);
2334 gst_aml_video_decoder_clear_queues (decoder);
2335 decoder->priv->in_out_segment_sync = TRUE;
2336
2337 if (priv->current_frame) {
2338 gst_aml_video_codec_frame_unref (priv->current_frame);
2339 priv->current_frame = NULL;
2340 }
2341
2342 g_list_free_full (priv->current_frame_events,
2343 (GDestroyNotify) gst_event_unref);
2344 priv->current_frame_events = NULL;
2345 g_list_free_full (priv->pending_events, (GDestroyNotify) gst_event_unref);
2346 priv->pending_events = NULL;
2347
2348 priv->error_count = 0;
2349 priv->had_output_data = FALSE;
2350 priv->had_input_data = FALSE;
2351
2352 GST_OBJECT_LOCK (decoder);
2353 priv->earliest_time = GST_CLOCK_TIME_NONE;
2354 priv->proportion = 0.5;
2355 priv->decode_flags_override = FALSE;
2356
2357 priv->request_sync_point_flags = 0;
2358 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
2359 priv->last_force_key_unit_time = GST_CLOCK_TIME_NONE;
2360 GST_OBJECT_UNLOCK (decoder);
2361 priv->distance_from_sync = -1;
2362 }
2363
2364 if (full) {
2365 if (priv->input_state)
2366 gst_aml_video_codec_state_unref (priv->input_state);
2367 priv->input_state = NULL;
2368 GST_OBJECT_LOCK (decoder);
2369 if (priv->output_state)
2370 gst_aml_video_codec_state_unref (priv->output_state);
2371 priv->output_state = NULL;
2372
2373 priv->qos_frame_duration = 0;
2374 GST_OBJECT_UNLOCK (decoder);
2375
2376 if (priv->tags)
2377 gst_tag_list_unref (priv->tags);
2378 priv->tags = NULL;
2379 priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
2380 if (priv->upstream_tags) {
2381 gst_tag_list_unref (priv->upstream_tags);
2382 priv->upstream_tags = NULL;
2383 }
2384 priv->tags_changed = FALSE;
2385 priv->reordered_output = FALSE;
2386
2387 priv->dropped = 0;
2388 priv->processed = 0;
2389
2390 priv->posted_latency_msg = FALSE;
2391
2392 priv->decode_frame_number = 0;
2393 priv->base_picture_number = 0;
2394
2395 if (priv->pool) {
2396 GST_DEBUG_OBJECT (decoder, "deactivate pool %" GST_PTR_FORMAT,
2397 priv->pool);
2398 gst_buffer_pool_set_active (priv->pool, FALSE);
2399 gst_object_unref (priv->pool);
2400 priv->pool = NULL;
2401 }
2402
2403 if (priv->allocator) {
2404 gst_object_unref (priv->allocator);
2405 priv->allocator = NULL;
2406 }
2407 }
2408
2409 priv->discont = TRUE;
2410
2411 priv->base_timestamp = GST_CLOCK_TIME_NONE;
2412 priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
2413 priv->pts_delta = GST_CLOCK_TIME_NONE;
2414
2415 priv->input_offset = 0;
2416 priv->frame_offset = 0;
2417 gst_adapter_clear (priv->input_adapter);
2418 gst_adapter_clear (priv->output_adapter);
2419 g_queue_clear_full (&priv->timestamps, (GDestroyNotify) timestamp_free);
2420
2421 GST_OBJECT_LOCK (decoder);
2422 priv->bytes_out = 0;
2423 priv->time = 0;
2424 GST_OBJECT_UNLOCK (decoder);
2425
2426#ifndef GST_DISABLE_DEBUG
2427 priv->last_reset_time = gst_util_get_timestamp ();
2428#endif
2429
2430 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2431}
2432
2433static GstFlowReturn
2434gst_aml_video_decoder_chain_forward (GstAmlVideoDecoder * decoder,
2435 GstBuffer * buf, gboolean at_eos)
2436{
2437 GstAmlVideoDecoderPrivate *priv;
2438 GstAmlVideoDecoderClass *klass;
2439 GstFlowReturn ret = GST_FLOW_OK;
2440
2441 klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
2442 priv = decoder->priv;
2443
2444 g_return_val_if_fail (priv->packetized || klass->parse, GST_FLOW_ERROR);
2445
2446 /* Draining on DISCONT is handled in chain_reverse() for reverse playback,
2447 * and this function would only be called to get everything collected GOP
2448 * by GOP in the parse_gather list */
2449 if (decoder->input_segment.rate > 0.0 && GST_BUFFER_IS_DISCONT (buf)
2450 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2451 ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
2452
2453 if (priv->current_frame == NULL)
2454 priv->current_frame = gst_aml_video_decoder_new_frame (decoder);
2455
2456 if (!priv->packetized)
2457 gst_aml_video_decoder_add_buffer_info (decoder, buf);
2458
2459 priv->input_offset += gst_buffer_get_size (buf);
2460
2461 if (priv->packetized) {
2462 GstAmlVideoCodecFrame *frame;
2463 gboolean was_keyframe = FALSE;
2464
2465 frame = priv->current_frame;
2466
2467 frame->abidata.ABI.num_subframes++;
2468 if (gst_aml_video_decoder_get_subframe_mode (decoder)) {
2469 /* End the frame if the marker flag is set */
2470 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_VIDEO_BUFFER_FLAG_MARKER)
2471 && (decoder->input_segment.rate > 0.0))
2472 priv->current_frame = gst_aml_video_codec_frame_ref (frame);
2473 else
2474 priv->current_frame = NULL;
2475 } else {
2476 priv->current_frame = frame;
2477 }
2478
2479 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
2480 was_keyframe = TRUE;
2481 GST_DEBUG_OBJECT (decoder, "Marking current_frame as sync point");
2482 GST_AML_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
2483 }
2484
2485 if (frame->input_buffer) {
2486 gst_aml_video_decoder_copy_metas (decoder, frame, frame->input_buffer, buf);
2487 gst_buffer_unref (frame->input_buffer);
2488 }
2489 frame->input_buffer = buf;
2490
2491 if (decoder->input_segment.rate < 0.0) {
2492 priv->parse_gather = g_list_prepend (priv->parse_gather, frame);
2493 priv->current_frame = NULL;
2494 } else {
2495 ret = gst_aml_video_decoder_decode_frame (decoder, frame);
2496 if (!gst_aml_video_decoder_get_subframe_mode (decoder))
2497 priv->current_frame = NULL;
2498 }
2499 /* If in trick mode and it was a keyframe, drain decoder to avoid extra
2500 * latency. Only do this for forwards playback as reverse playback handles
2501 * draining on keyframes in flush_parse(), and would otherwise call back
2502 * from drain_out() to here causing an infinite loop.
2503 * Also this function is only called for reverse playback to gather frames
2504 * GOP by GOP, and does not do any actual decoding. That would be done by
2505 * flush_decode() */
2506 if (ret == GST_FLOW_OK && was_keyframe && decoder->input_segment.rate > 0.0
2507 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2508 ret = gst_aml_video_decoder_drain_out (decoder, FALSE);
2509 } else {
2510 gst_adapter_push (priv->input_adapter, buf);
2511
2512 ret = gst_aml_video_decoder_parse_available (decoder, at_eos, TRUE);
2513 }
2514
2515 if (ret == GST_AML_VIDEO_DECODER_FLOW_NEED_DATA)
2516 return GST_FLOW_OK;
2517
2518 return ret;
2519}
2520
2521static GstFlowReturn
2522gst_aml_video_decoder_flush_decode (GstAmlVideoDecoder * dec)
2523{
2524 GstAmlVideoDecoderPrivate *priv = dec->priv;
2525 GstFlowReturn res = GST_FLOW_OK;
2526 GList *walk;
2527 GstAmlVideoCodecFrame *current_frame = NULL;
2528 gboolean last_subframe;
2529 GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
2530
2531 walk = priv->decode;
2532 while (walk) {
2533 GList *next;
2534 GstAmlVideoCodecFrame *frame = (GstAmlVideoCodecFrame *) (walk->data);
2535 last_subframe = TRUE;
2536 /* In subframe mode, we need to get rid of intermediary frames
2537 * created during the buffer gather stage. That's why that we keep a current
2538 * frame as the main frame and drop all the frame afterwhile until the end
2539 * of the subframes batch.
2540 * */
2541 if (gst_aml_video_decoder_get_subframe_mode (dec)) {
2542 if (current_frame == NULL) {
2543 current_frame = gst_aml_video_codec_frame_ref (frame);
2544 } else {
2545 if (current_frame->input_buffer) {
2546 gst_aml_video_decoder_copy_metas (dec, current_frame,
2547 current_frame->input_buffer, current_frame->output_buffer);
2548 gst_buffer_unref (current_frame->input_buffer);
2549 }
2550 current_frame->input_buffer = gst_buffer_ref (frame->input_buffer);
2551 gst_aml_video_codec_frame_unref (frame);
2552 }
2553 last_subframe = GST_BUFFER_FLAG_IS_SET (current_frame->input_buffer,
2554 GST_VIDEO_BUFFER_FLAG_MARKER);
2555 } else {
2556 current_frame = frame;
2557 }
2558
2559 GST_DEBUG_OBJECT (dec, "decoding frame %p buffer %p, PTS %" GST_TIME_FORMAT
2560 ", DTS %" GST_TIME_FORMAT, frame, frame->input_buffer,
2561 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2562 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2563
2564 next = walk->next;
2565
2566 priv->decode = g_list_delete_link (priv->decode, walk);
2567
2568 /* decode buffer, resulting data prepended to queue */
2569 res = gst_aml_video_decoder_decode_frame (dec, current_frame);
2570 if (res != GST_FLOW_OK)
2571 break;
2572 if (!gst_aml_video_decoder_get_subframe_mode (dec)
2573 || last_subframe)
2574 current_frame = NULL;
2575 walk = next;
2576 }
2577
2578 return res;
2579}
2580
2581/* gst_aml_video_decoder_flush_parse is called from the
2582 * chain_reverse() function when a buffer containing
2583 * a DISCONT - indicating that reverse playback
2584 * looped back to the next data block, and therefore
2585 * all available data should be fed through the
2586 * decoder and frames gathered for reversed output
2587 */
2588static GstFlowReturn
2589gst_aml_video_decoder_flush_parse (GstAmlVideoDecoder * dec, gboolean at_eos)
2590{
2591 GstAmlVideoDecoderPrivate *priv = dec->priv;
2592 GstFlowReturn res = GST_FLOW_OK;
2593 GList *walk;
2594 GstAmlVideoDecoderClass *decoder_class;
2595
2596 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (dec);
2597
2598 GST_DEBUG_OBJECT (dec, "flushing buffers to parsing");
2599
2600 /* Reverse the gather list, and prepend it to the parse list,
2601 * then flush to parse whatever we can */
2602 priv->gather = g_list_reverse (priv->gather);
2603 priv->parse = g_list_concat (priv->gather, priv->parse);
2604 priv->gather = NULL;
2605
2606 /* clear buffer and decoder state */
2607 gst_aml_video_decoder_flush (dec, FALSE);
2608
2609 walk = priv->parse;
2610 while (walk) {
2611 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2612 GList *next = walk->next;
2613
2614 GST_DEBUG_OBJECT (dec, "parsing buffer %p, PTS %" GST_TIME_FORMAT
2615 ", DTS %" GST_TIME_FORMAT " flags %x", buf,
2616 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2617 GST_TIME_ARGS (GST_BUFFER_DTS (buf)), GST_BUFFER_FLAGS (buf));
2618
2619 /* parse buffer, resulting frames prepended to parse_gather queue */
2620 gst_buffer_ref (buf);
2621 res = gst_aml_video_decoder_chain_forward (dec, buf, at_eos);
2622
2623 /* if we generated output, we can discard the buffer, else we
2624 * keep it in the queue */
2625 if (priv->parse_gather) {
2626 GST_DEBUG_OBJECT (dec, "parsed buffer to %p", priv->parse_gather->data);
2627 priv->parse = g_list_delete_link (priv->parse, walk);
2628 gst_buffer_unref (buf);
2629 } else {
2630 GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
2631 }
2632 walk = next;
2633 }
2634
2635 walk = priv->parse_gather;
2636 while (walk) {
2637 GstAmlVideoCodecFrame *frame = (GstAmlVideoCodecFrame *) (walk->data);
2638 GList *walk2;
2639
2640 /* this is reverse playback, check if we need to apply some segment
2641 * to the output before decoding, as during decoding the segment.rate
2642 * must be used to determine if a buffer should be pushed or added to
2643 * the output list for reverse pushing.
2644 *
2645 * The new segment is not immediately pushed here because we must
2646 * wait for negotiation to happen before it can be pushed to avoid
2647 * pushing a segment before caps event. Negotiation only happens
2648 * when finish_frame is called.
2649 */
2650 for (walk2 = frame->events; walk2;) {
2651 GList *cur = walk2;
2652 GstEvent *event = walk2->data;
2653
2654 walk2 = g_list_next (walk2);
2655 if (GST_EVENT_TYPE (event) <= GST_EVENT_SEGMENT) {
2656
2657 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
2658 GstSegment segment;
2659
2660 GST_DEBUG_OBJECT (dec, "Segment at frame %p %" GST_TIME_FORMAT,
2661 frame, GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)));
2662 gst_event_copy_segment (event, &segment);
2663 if (segment.format == GST_FORMAT_TIME) {
2664 dec->output_segment = segment;
2665 dec->priv->in_out_segment_sync =
2666 gst_segment_is_equal (&dec->input_segment, &segment);
2667 }
2668 }
2669 dec->priv->pending_events =
2670 g_list_append (dec->priv->pending_events, event);
2671 frame->events = g_list_delete_link (frame->events, cur);
2672 }
2673 }
2674
2675 walk = walk->next;
2676 }
2677
2678 /* now we can process frames. Start by moving each frame from the parse_gather
2679 * to the decode list, reverse the order as we go, and stopping when/if we
2680 * copy a keyframe. */
2681 GST_DEBUG_OBJECT (dec, "checking parsed frames for a keyframe to decode");
2682 walk = priv->parse_gather;
2683 while (walk) {
2684 GstAmlVideoCodecFrame *frame = (GstAmlVideoCodecFrame *) (walk->data);
2685
2686 /* remove from the gather list */
2687 priv->parse_gather = g_list_remove_link (priv->parse_gather, walk);
2688
2689 /* move it to the front of the decode queue */
2690 priv->decode = g_list_concat (walk, priv->decode);
2691
2692 /* if we copied a keyframe, flush and decode the decode queue */
2693 if (GST_AML_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
2694 GST_DEBUG_OBJECT (dec, "found keyframe %p with PTS %" GST_TIME_FORMAT
2695 ", DTS %" GST_TIME_FORMAT, frame,
2696 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2697 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2698 res = gst_aml_video_decoder_flush_decode (dec);
2699 if (res != GST_FLOW_OK)
2700 goto done;
2701
2702 /* We need to tell the subclass to drain now.
2703 * We prefer the drain vfunc, but for backward-compat
2704 * we use a finish() vfunc if drain isn't implemented */
2705 if (decoder_class->drain) {
2706 GST_DEBUG_OBJECT (dec, "Draining");
2707 res = decoder_class->drain (dec);
2708 } else if (decoder_class->finish) {
2709 GST_FIXME_OBJECT (dec, "Sub-class should implement drain(). "
2710 "Calling finish() for backwards-compat");
2711 res = decoder_class->finish (dec);
2712 }
2713
2714 if (res != GST_FLOW_OK)
2715 goto done;
2716
2717 /* now send queued data downstream */
2718 walk = priv->output_queued;
2719 while (walk) {
2720 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2721
2722 priv->output_queued =
2723 g_list_delete_link (priv->output_queued, priv->output_queued);
2724
2725 if (G_LIKELY (res == GST_FLOW_OK)) {
2726 /* avoid stray DISCONT from forward processing,
2727 * which have no meaning in reverse pushing */
2728 GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
2729
2730 /* Last chance to calculate a timestamp as we loop backwards
2731 * through the list */
2732 if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE)
2733 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2734 else if (priv->last_timestamp_out != GST_CLOCK_TIME_NONE &&
2735 GST_BUFFER_DURATION (buf) != GST_CLOCK_TIME_NONE) {
2736 GST_BUFFER_TIMESTAMP (buf) =
2737 priv->last_timestamp_out - GST_BUFFER_DURATION (buf);
2738 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2739 GST_LOG_OBJECT (dec,
2740 "Calculated TS %" GST_TIME_FORMAT " working backwards",
2741 GST_TIME_ARGS (priv->last_timestamp_out));
2742 }
2743
2744 res = gst_aml_video_decoder_clip_and_push_buf (dec, buf);
2745 } else {
2746 gst_buffer_unref (buf);
2747 }
2748
2749 walk = priv->output_queued;
2750 }
2751
2752 /* clear buffer and decoder state again
2753 * before moving to the previous keyframe */
2754 gst_aml_video_decoder_flush (dec, FALSE);
2755 }
2756
2757 walk = priv->parse_gather;
2758 }
2759
2760done:
2761 return res;
2762}
2763
2764static GstFlowReturn
2765gst_aml_video_decoder_chain_reverse (GstAmlVideoDecoder * dec, GstBuffer * buf)
2766{
2767 GstAmlVideoDecoderPrivate *priv = dec->priv;
2768 GstFlowReturn result = GST_FLOW_OK;
2769
2770 /* if we have a discont, move buffers to the decode list */
2771 if (!buf || GST_BUFFER_IS_DISCONT (buf)) {
2772 GST_DEBUG_OBJECT (dec, "received discont");
2773
2774 /* parse and decode stuff in the gather and parse queues */
2775 result = gst_aml_video_decoder_flush_parse (dec, FALSE);
2776 }
2777
2778 if (G_LIKELY (buf)) {
2779 GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2780 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
2781 GST_TIME_FORMAT, buf, gst_buffer_get_size (buf),
2782 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2783 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2784 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2785
2786 /* add buffer to gather queue */
2787 priv->gather = g_list_prepend (priv->gather, buf);
2788 }
2789
2790 return result;
2791}
2792
2793static GstFlowReturn
2794gst_aml_video_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
2795{
2796 GstAmlVideoDecoder *decoder;
2797 GstFlowReturn ret = GST_FLOW_OK;
2798
2799 decoder = GST_AML_VIDEO_DECODER (parent);
2800
2801 if (G_UNLIKELY (!decoder->priv->input_state && decoder->priv->needs_format))
2802 goto not_negotiated;
2803
2804 GST_LOG_OBJECT (decoder,
2805 "AML chain PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT " duration %"
2806 GST_TIME_FORMAT " size %" G_GSIZE_FORMAT " flags %x",
2807 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2808 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2809 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)),
2810 gst_buffer_get_size (buf), GST_BUFFER_FLAGS (buf));
2811
2812 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
2813
2814 /* NOTE:
2815 * requiring the pad to be negotiated makes it impossible to use
2816 * oggdemux or filesrc ! decoder */
2817
2818 if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
2819 GstEvent *event;
2820 GstSegment *segment = &decoder->input_segment;
2821
2822 GST_WARNING_OBJECT (decoder,
2823 "Received buffer without a new-segment. "
2824 "Assuming timestamps start from 0.");
2825
2826 gst_segment_init (segment, GST_FORMAT_TIME);
2827
2828 event = gst_event_new_segment (segment);
2829
2830 decoder->priv->current_frame_events =
2831 g_list_prepend (decoder->priv->current_frame_events, event);
2832 }
2833
2834 decoder->priv->had_input_data = TRUE;
2835
2836 if (decoder->input_segment.rate > 0.0)
2837 ret = gst_aml_video_decoder_chain_forward (decoder, buf, FALSE);
2838 else
2839 ret = gst_aml_video_decoder_chain_reverse (decoder, buf);
2840
2841 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2842 return ret;
2843
2844 /* ERRORS */
2845not_negotiated:
2846 {
2847 GST_ELEMENT_ERROR (decoder, CORE, NEGOTIATION, (NULL),
2848 ("decoder not initialized"));
2849 gst_buffer_unref (buf);
2850 return GST_FLOW_NOT_NEGOTIATED;
2851 }
2852}
2853
2854static GstStateChangeReturn
2855gst_aml_video_decoder_change_state (GstElement * element, GstStateChange transition)
2856{
2857 GstAmlVideoDecoder *decoder;
2858 GstAmlVideoDecoderClass *decoder_class;
2859 GstStateChangeReturn ret;
2860
2861 decoder = GST_AML_VIDEO_DECODER (element);
2862 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (element);
2863
2864 switch (transition) {
2865 case GST_STATE_CHANGE_NULL_TO_READY:
2866 /* open device/library if needed */
2867 if (decoder_class->open && !decoder_class->open (decoder))
2868 goto open_failed;
2869 break;
2870 case GST_STATE_CHANGE_READY_TO_PAUSED:
2871 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
2872 gst_aml_video_decoder_reset (decoder, TRUE, TRUE);
2873 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2874
2875 /* Initialize device/library if needed */
2876 if (decoder_class->start && !decoder_class->start (decoder))
2877 goto start_failed;
2878 break;
2879 default:
2880 break;
2881 }
2882
2883 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
2884
2885 switch (transition) {
2886 case GST_STATE_CHANGE_PAUSED_TO_READY:{
2887 gboolean stopped = TRUE;
2888
2889 if (decoder_class->stop)
2890 stopped = decoder_class->stop (decoder);
2891
2892 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
2893 gst_aml_video_decoder_reset (decoder, TRUE, TRUE);
2894 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2895
2896 if (!stopped)
2897 goto stop_failed;
2898
2899 break;
2900 }
2901 case GST_STATE_CHANGE_READY_TO_NULL:
2902 /* close device/library if needed */
2903 if (decoder_class->close && !decoder_class->close (decoder))
2904 goto close_failed;
2905 break;
2906 default:
2907 break;
2908 }
2909
2910 return ret;
2911
2912 /* Errors */
2913open_failed:
2914 {
2915 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2916 ("Failed to open decoder"));
2917 return GST_STATE_CHANGE_FAILURE;
2918 }
2919
2920start_failed:
2921 {
2922 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2923 ("Failed to start decoder"));
2924 return GST_STATE_CHANGE_FAILURE;
2925 }
2926
2927stop_failed:
2928 {
2929 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2930 ("Failed to stop decoder"));
2931 return GST_STATE_CHANGE_FAILURE;
2932 }
2933
2934close_failed:
2935 {
2936 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2937 ("Failed to close decoder"));
2938 return GST_STATE_CHANGE_FAILURE;
2939 }
2940}
2941
2942static GstAmlVideoCodecFrame *
2943gst_aml_video_decoder_new_frame (GstAmlVideoDecoder * decoder)
2944{
2945 GstAmlVideoDecoderPrivate *priv = decoder->priv;
2946 GstAmlVideoCodecFrame *frame;
2947
2948 frame = g_slice_new0 (GstAmlVideoCodecFrame);
2949
2950 frame->ref_count = 1;
2951
2952 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
2953 frame->system_frame_number = priv->system_frame_number;
2954 priv->system_frame_number++;
2955 frame->decode_frame_number = priv->decode_frame_number;
2956 priv->decode_frame_number++;
2957
2958 frame->dts = GST_CLOCK_TIME_NONE;
2959 frame->pts = GST_CLOCK_TIME_NONE;
2960 frame->duration = GST_CLOCK_TIME_NONE;
2961 frame->events = priv->current_frame_events;
2962 priv->current_frame_events = NULL;
2963
2964 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2965
2966 GST_LOG_OBJECT (decoder, "Created new frame %p (sfn:%d)",
2967 frame, frame->system_frame_number);
2968
2969 return frame;
2970}
2971
2972static void
2973gst_aml_video_decoder_push_event_list (GstAmlVideoDecoder * decoder, GList * events)
2974{
2975 GList *l;
2976
2977 /* events are stored in reverse order */
2978 for (l = g_list_last (events); l; l = g_list_previous (l)) {
2979 GST_LOG_OBJECT (decoder, "pushing %s event", GST_EVENT_TYPE_NAME (l->data));
2980 gst_aml_video_decoder_push_event (decoder, l->data);
2981 }
2982 g_list_free (events);
2983}
2984
2985static void
2986gst_aml_video_decoder_prepare_finish_frame (GstAmlVideoDecoder *
2987 decoder, GstAmlVideoCodecFrame * frame, gboolean dropping)
2988{
2989 GstAmlVideoDecoderPrivate *priv = decoder->priv;
2990 GList *l, *events = NULL;
2991 gboolean sync;
2992
2993#ifndef GST_DISABLE_GST_DEBUG
2994 GST_LOG_OBJECT (decoder, "n %d in %" G_GSIZE_FORMAT " out %" G_GSIZE_FORMAT,
2995 priv->frames.length,
2996 gst_adapter_available (priv->input_adapter),
2997 gst_adapter_available (priv->output_adapter));
2998#endif
2999
3000 sync = GST_AML_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame);
3001
3002 GST_LOG_OBJECT (decoder,
3003 "finish frame %p (#%d)(sub=#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
3004 GST_TIME_FORMAT,
3005 frame, frame->system_frame_number, frame->abidata.ABI.num_subframes,
3006 sync, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts));
3007
3008 /* Push all pending events that arrived before this frame */
3009 for (l = priv->frames.head; l; l = l->next) {
3010 GstAmlVideoCodecFrame *tmp = l->data;
3011
3012 if (tmp->events) {
3013 events = g_list_concat (tmp->events, events);
3014 tmp->events = NULL;
3015 }
3016
3017 if (tmp == frame)
3018 break;
3019 }
3020
3021 if (dropping || !decoder->priv->output_state) {
3022 /* Push before the next frame that is not dropped */
3023 decoder->priv->pending_events =
3024 g_list_concat (events, decoder->priv->pending_events);
3025 } else {
3026 gst_aml_video_decoder_push_event_list (decoder, decoder->priv->pending_events);
3027 decoder->priv->pending_events = NULL;
3028
3029 gst_aml_video_decoder_push_event_list (decoder, events);
3030 }
3031
3032 /* Check if the data should not be displayed. For example altref/invisible
3033 * frame in vp8. In this case we should not update the timestamps. */
3034 if (GST_AML_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
3035 return;
3036
3037 /* If the frame is meant to be output but we don't have an output_buffer
3038 * we have a problem :) */
3039 if (G_UNLIKELY ((frame->output_buffer == NULL) && !dropping))
3040 goto no_output_buffer;
3041
3042 if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
3043 if (frame->pts != priv->base_timestamp) {
3044 GST_DEBUG_OBJECT (decoder,
3045 "sync timestamp %" GST_TIME_FORMAT " diff %" GST_STIME_FORMAT,
3046 GST_TIME_ARGS (frame->pts),
3047 GST_STIME_ARGS (GST_CLOCK_DIFF (frame->pts,
3048 decoder->output_segment.start)));
3049 priv->base_timestamp = frame->pts;
3050 priv->base_picture_number = frame->decode_frame_number;
3051 }
3052 }
3053
3054 if (frame->duration == GST_CLOCK_TIME_NONE) {
3055 frame->duration = gst_aml_video_decoder_get_frame_duration (decoder, frame);
3056 GST_LOG_OBJECT (decoder,
3057 "Guessing duration %" GST_TIME_FORMAT " for frame...",
3058 GST_TIME_ARGS (frame->duration));
3059 }
3060
3061 /* PTS is expected montone ascending,
3062 * so a good guess is lowest unsent DTS */
3063 {
3064 GstClockTime min_ts = GST_CLOCK_TIME_NONE;
3065 GstAmlVideoCodecFrame *oframe = NULL;
3066 gboolean seen_none = FALSE;
3067
3068 /* some maintenance regardless */
3069 for (l = priv->frames.head; l; l = l->next) {
3070 GstAmlVideoCodecFrame *tmp = l->data;
3071
3072 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts)) {
3073 seen_none = TRUE;
3074 continue;
3075 }
3076
3077 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts < min_ts) {
3078 min_ts = tmp->abidata.ABI.ts;
3079 oframe = tmp;
3080 }
3081 }
3082 /* save a ts if needed */
3083 if (oframe && oframe != frame) {
3084 oframe->abidata.ABI.ts = frame->abidata.ABI.ts;
3085 }
3086
3087 /* and set if needed;
3088 * valid delta means we have reasonable DTS input */
3089 /* also, if we ended up reordered, means this approach is conflicting
3090 * with some sparse existing PTS, and so it does not work out */
3091 if (!priv->reordered_output &&
3092 !GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none &&
3093 GST_CLOCK_TIME_IS_VALID (priv->pts_delta)) {
3094 frame->pts = min_ts + priv->pts_delta;
3095 GST_DEBUG_OBJECT (decoder,
3096 "no valid PTS, using oldest DTS %" GST_TIME_FORMAT,
3097 GST_TIME_ARGS (frame->pts));
3098 }
3099
3100 /* some more maintenance, ts2 holds PTS */
3101 min_ts = GST_CLOCK_TIME_NONE;
3102 seen_none = FALSE;
3103 for (l = priv->frames.head; l; l = l->next) {
3104 GstAmlVideoCodecFrame *tmp = l->data;
3105
3106 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts2)) {
3107 seen_none = TRUE;
3108 continue;
3109 }
3110
3111 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts2 < min_ts) {
3112 min_ts = tmp->abidata.ABI.ts2;
3113 oframe = tmp;
3114 }
3115 }
3116 /* save a ts if needed */
3117 if (oframe && oframe != frame) {
3118 oframe->abidata.ABI.ts2 = frame->abidata.ABI.ts2;
3119 }
3120
3121 /* if we detected reordered output, then PTS are void,
3122 * however those were obtained; bogus input, subclass etc */
3123 if (priv->reordered_output && !seen_none) {
3124 GST_DEBUG_OBJECT (decoder, "invalidating PTS");
3125 frame->pts = GST_CLOCK_TIME_NONE;
3126 }
3127
3128 if (!GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none) {
3129 frame->pts = min_ts;
3130 GST_DEBUG_OBJECT (decoder,
3131 "no valid PTS, using oldest PTS %" GST_TIME_FORMAT,
3132 GST_TIME_ARGS (frame->pts));
3133 }
3134 }
3135
3136
3137 if (frame->pts == GST_CLOCK_TIME_NONE) {
3138 /* Last ditch timestamp guess: Just add the duration to the previous
3139 * frame. If it's the first frame, just use the segment start. */
3140 if (frame->duration != GST_CLOCK_TIME_NONE) {
3141 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out))
3142 frame->pts = priv->last_timestamp_out + frame->duration;
3143 else if (frame->dts != GST_CLOCK_TIME_NONE) {
3144 frame->pts = frame->dts;
3145 GST_LOG_OBJECT (decoder,
3146 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
3147 GST_TIME_ARGS (frame->pts));
3148 } else if (decoder->output_segment.rate > 0.0)
3149 frame->pts = decoder->output_segment.start;
3150 GST_INFO_OBJECT (decoder,
3151 "Guessing PTS=%" GST_TIME_FORMAT " for frame... DTS=%"
3152 GST_TIME_FORMAT, GST_TIME_ARGS (frame->pts),
3153 GST_TIME_ARGS (frame->dts));
3154 } else if (sync && frame->dts != GST_CLOCK_TIME_NONE) {
3155 frame->pts = frame->dts;
3156 GST_LOG_OBJECT (decoder,
3157 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
3158 GST_TIME_ARGS (frame->pts));
3159 }
3160 }
3161
3162 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out)) {
3163 if (frame->pts < priv->last_timestamp_out) {
3164 GST_WARNING_OBJECT (decoder,
3165 "decreasing timestamp (%" GST_TIME_FORMAT " < %"
3166 GST_TIME_FORMAT ")",
3167 GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
3168 priv->reordered_output = TRUE;
3169 /* make it a bit less weird downstream */
3170 frame->pts = priv->last_timestamp_out;
3171 }
3172 }
3173
3174 if (GST_CLOCK_TIME_IS_VALID (frame->pts))
3175 priv->last_timestamp_out = frame->pts;
3176
3177 return;
3178
3179 /* ERRORS */
3180no_output_buffer:
3181 {
3182 GST_ERROR_OBJECT (decoder, "No buffer to output !");
3183 }
3184}
3185
3186/**
3187 * gst_aml_video_decoder_release_frame:
3188 * @dec: a #GstAmlVideoDecoder
3189 * @frame: (transfer full): the #GstAmlVideoCodecFrame to release
3190 *
3191 * Similar to gst_aml_video_decoder_drop_frame(), but simply releases @frame
3192 * without any processing other than removing it from list of pending frames,
3193 * after which it is considered finished and released.
3194 *
3195 * Since: 1.2.2
3196 */
3197void
3198gst_aml_video_decoder_release_frame (GstAmlVideoDecoder * dec,
3199 GstAmlVideoCodecFrame * frame)
3200{
3201 GList *link;
3202
3203 /* unref once from the list */
3204 GST_AML_VIDEO_DECODER_STREAM_LOCK (dec);
3205 link = g_queue_find (&dec->priv->frames, frame);
3206 if (link) {
3207 gst_aml_video_codec_frame_unref (frame);
3208 g_queue_delete_link (&dec->priv->frames, link);
3209 }
3210 if (frame->events) {
3211 dec->priv->pending_events =
3212 g_list_concat (frame->events, dec->priv->pending_events);
3213 frame->events = NULL;
3214 }
3215 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (dec);
3216
3217 /* unref because this function takes ownership */
3218 gst_aml_video_codec_frame_unref (frame);
3219}
3220
3221/* called with STREAM_LOCK */
3222static void
3223gst_aml_video_decoder_post_qos_drop (GstAmlVideoDecoder * dec, GstClockTime timestamp)
3224{
3225 GstClockTime stream_time, jitter, earliest_time, qostime;
3226 GstSegment *segment;
3227 GstMessage *qos_msg;
3228 gdouble proportion;
3229 dec->priv->dropped++;
3230
3231 /* post QoS message */
3232 GST_OBJECT_LOCK (dec);
3233 proportion = dec->priv->proportion;
3234 earliest_time = dec->priv->earliest_time;
3235 GST_OBJECT_UNLOCK (dec);
3236
3237 segment = &dec->output_segment;
3238 if (G_UNLIKELY (segment->format == GST_FORMAT_UNDEFINED))
3239 segment = &dec->input_segment;
3240 stream_time =
3241 gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);
3242 qostime = gst_segment_to_running_time (segment, GST_FORMAT_TIME, timestamp);
3243 jitter = GST_CLOCK_DIFF (qostime, earliest_time);
3244 qos_msg =
3245 gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, qostime, stream_time,
3246 timestamp, GST_CLOCK_TIME_NONE);
3247 gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
3248 gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
3249 dec->priv->processed, dec->priv->dropped);
3250 gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
3251}
3252
3253/**
3254 * gst_aml_video_decoder_drop_frame:
3255 * @dec: a #GstAmlVideoDecoder
3256 * @frame: (transfer full): the #GstAmlVideoCodecFrame to drop
3257 *
3258 * Similar to gst_aml_video_decoder_finish_frame(), but drops @frame in any
3259 * case and posts a QoS message with the frame's details on the bus.
3260 * In any case, the frame is considered finished and released.
3261 *
3262 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3263 */
3264GstFlowReturn
3265gst_aml_video_decoder_drop_frame (GstAmlVideoDecoder * dec, GstAmlVideoCodecFrame * frame)
3266{
3267 GST_LOG_OBJECT (dec, "drop frame %p", frame);
3268
3269 if (gst_aml_video_decoder_get_subframe_mode (dec))
3270 GST_DEBUG_OBJECT (dec, "Drop subframe %d. Must be the last one.",
3271 frame->abidata.ABI.num_subframes);
3272
3273 GST_AML_VIDEO_DECODER_STREAM_LOCK (dec);
3274
3275 gst_aml_video_decoder_prepare_finish_frame (dec, frame, TRUE);
3276
3277 GST_DEBUG_OBJECT (dec, "dropping frame %" GST_TIME_FORMAT,
3278 GST_TIME_ARGS (frame->pts));
3279
3280 gst_aml_video_decoder_post_qos_drop (dec, frame->pts);
3281
3282 /* now free the frame */
3283 gst_aml_video_decoder_release_frame (dec, frame);
3284
3285 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (dec);
3286
3287 return GST_FLOW_OK;
3288}
3289
3290/**
3291 * gst_aml_video_decoder_drop_subframe:
3292 * @dec: a #GstAmlVideoDecoder
3293 * @frame: (transfer full): the #GstAmlVideoCodecFrame
3294 *
3295 * Drops input data.
3296 * The frame is not considered finished until the whole frame
3297 * is finished or dropped by the subclass.
3298 *
3299 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3300 *
3301 * Since: 1.20
3302 */
3303GstFlowReturn
3304gst_aml_video_decoder_drop_subframe (GstAmlVideoDecoder * dec,
3305 GstAmlVideoCodecFrame * frame)
3306{
3307 g_return_val_if_fail (gst_aml_video_decoder_get_subframe_mode (dec),
3308 GST_FLOW_NOT_SUPPORTED);
3309
3310 GST_LOG_OBJECT (dec, "drop subframe %p num=%d", frame->input_buffer,
3311 gst_aml_video_decoder_get_input_subframe_index (dec, frame));
3312
3313 GST_AML_VIDEO_DECODER_STREAM_LOCK (dec);
3314
3315 gst_aml_video_codec_frame_unref (frame);
3316
3317 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (dec);
3318
3319 return GST_FLOW_OK;
3320}
3321
3322static gboolean
3323gst_aml_video_decoder_transform_meta_default (GstAmlVideoDecoder *
3324 decoder, GstAmlVideoCodecFrame * frame, GstMeta * meta)
3325{
3326 const GstMetaInfo *info = meta->info;
3327 const gchar *const *tags;
3328 const gchar *const supported_tags[] = {
3329 GST_META_TAG_VIDEO_STR,
3330 GST_META_TAG_VIDEO_ORIENTATION_STR,
3331 GST_META_TAG_VIDEO_SIZE_STR,
3332 NULL,
3333 };
3334
3335 tags = gst_meta_api_type_get_tags (info->api);
3336
3337 if (!tags)
3338 return TRUE;
3339
3340 while (*tags) {
3341 if (!g_strv_contains (supported_tags, *tags))
3342 return FALSE;
3343 tags++;
3344 }
3345
3346 return TRUE;
3347}
3348
3349typedef struct
3350{
3351 GstAmlVideoDecoder *decoder;
3352 GstAmlVideoCodecFrame *frame;
3353 GstBuffer *buffer;
3354} CopyMetaData;
3355
3356static gboolean
3357foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
3358{
3359 CopyMetaData *data = user_data;
3360 GstAmlVideoDecoder *decoder = data->decoder;
3361 GstAmlVideoDecoderClass *klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
3362 GstAmlVideoCodecFrame *frame = data->frame;
3363 GstBuffer *buffer = data->buffer;
3364 const GstMetaInfo *info = (*meta)->info;
3365 gboolean do_copy = FALSE;
3366
3367 if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)
3368 || gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory_reference)) {
3369 /* never call the transform_meta with memory specific metadata */
3370 GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
3371 g_type_name (info->api));
3372 do_copy = FALSE;
3373 } else if (klass->transform_meta) {
3374 do_copy = klass->transform_meta (decoder, frame, *meta);
3375 GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
3376 g_type_name (info->api), do_copy);
3377 }
3378
3379 /* we only copy metadata when the subclass implemented a transform_meta
3380 * function and when it returns %TRUE */
3381 if (do_copy && info->transform_func) {
3382 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
3383 GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
3384 /* simply copy then */
3385
3386 info->transform_func (buffer, *meta, inbuf, _gst_meta_transform_copy,
3387 &copy_data);
3388 }
3389 return TRUE;
3390}
3391
3392static void
3393gst_aml_video_decoder_copy_metas (GstAmlVideoDecoder * decoder,
3394 GstAmlVideoCodecFrame * frame, GstBuffer * src_buffer, GstBuffer * dest_buffer)
3395{
3396 GstAmlVideoDecoderClass *decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
3397
3398 if (decoder_class->transform_meta) {
3399 if (G_LIKELY (frame)) {
3400 CopyMetaData data;
3401
3402 data.decoder = decoder;
3403 data.frame = frame;
3404 data.buffer = dest_buffer;
3405 gst_buffer_foreach_meta (src_buffer, foreach_metadata, &data);
3406 } else {
3407 GST_WARNING_OBJECT (decoder,
3408 "Can't copy metadata because input frame disappeared");
3409 }
3410 }
3411}
3412
3413/**
3414 * gst_aml_video_decoder_finish_frame:
3415 * @decoder: a #GstAmlVideoDecoder
3416 * @frame: (transfer full): a decoded #GstAmlVideoCodecFrame
3417 *
3418 * @frame should have a valid decoded data buffer, whose metadata fields
3419 * are then appropriately set according to frame data and pushed downstream.
3420 * If no output data is provided, @frame is considered skipped.
3421 * In any case, the frame is considered finished and released.
3422 *
3423 * After calling this function the output buffer of the frame is to be
3424 * considered read-only. This function will also change the metadata
3425 * of the buffer.
3426 *
3427 * Returns: a #GstFlowReturn resulting from sending data downstream
3428 */
3429GstFlowReturn
3430gst_aml_video_decoder_finish_frame (GstAmlVideoDecoder * decoder,
3431 GstAmlVideoCodecFrame * frame)
3432{
3433 GstFlowReturn ret = GST_FLOW_OK;
3434 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3435 GstBuffer *output_buffer;
3436 gboolean needs_reconfigure = FALSE;
3437
3438 GST_LOG_OBJECT (decoder, "finish frame %p", frame);
3439
3440 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3441
3442 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
3443 if (G_UNLIKELY (priv->output_state_changed || (priv->output_state
3444 && needs_reconfigure))) {
3445 if (!gst_aml_video_decoder_negotiate_unlocked (decoder)) {
3446 gst_pad_mark_reconfigure (decoder->srcpad);
3447 if (GST_PAD_IS_FLUSHING (decoder->srcpad))
3448 ret = GST_FLOW_FLUSHING;
3449 else
3450 ret = GST_FLOW_NOT_NEGOTIATED;
3451 goto done;
3452 }
3453 }
3454
3455 gst_aml_video_decoder_prepare_finish_frame (decoder, frame, FALSE);
3456 priv->processed++;
3457
3458 if (priv->tags_changed) {
3459 GstEvent *tags_event;
3460
3461 tags_event = gst_aml_video_decoder_create_merged_tags_event (decoder);
3462
3463 if (tags_event != NULL)
3464 gst_aml_video_decoder_push_event (decoder, tags_event);
3465
3466 priv->tags_changed = FALSE;
3467 }
3468
3469 /* no buffer data means this frame is skipped */
3470 if (!frame->output_buffer || GST_AML_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame)) {
3471 GST_DEBUG_OBJECT (decoder,
3472 "skipping frame %" GST_TIME_FORMAT " because not output was produced",
3473 GST_TIME_ARGS (frame->pts));
3474 goto done;
3475 }
3476
3477 /* Mark output as corrupted if the subclass requested so and we're either
3478 * still before the sync point after the request, or we don't even know the
3479 * frame number of the sync point yet (it is 0) */
3480 GST_OBJECT_LOCK (decoder);
3481 if (frame->system_frame_number <= priv->request_sync_point_frame_number
3482 && priv->request_sync_point_frame_number != REQUEST_SYNC_POINT_UNSET) {
3483 if (priv->request_sync_point_flags &
3484 GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT) {
3485 GST_DEBUG_OBJECT (decoder,
3486 "marking frame %" GST_TIME_FORMAT
3487 " as corrupted because it is still before the sync point",
3488 GST_TIME_ARGS (frame->pts));
3489 GST_AML_VIDEO_CODEC_FRAME_FLAG_SET (frame,
3490 GST_AML_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3491 }
3492 } else {
3493 /* Reset to -1 to mark it as unset now that we've reached the frame */
3494 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
3495 }
3496 GST_OBJECT_UNLOCK (decoder);
3497
3498 if (priv->discard_corrupted_frames
3499 && (GST_AML_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3500 GST_AML_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)
3501 || GST_BUFFER_FLAG_IS_SET (frame->output_buffer,
3502 GST_BUFFER_FLAG_CORRUPTED))) {
3503 GST_DEBUG_OBJECT (decoder,
3504 "skipping frame %" GST_TIME_FORMAT " because it is corrupted",
3505 GST_TIME_ARGS (frame->pts));
3506 goto done;
3507 }
3508
3509 /* We need a writable buffer for the metadata changes below */
3510 output_buffer = frame->output_buffer =
3511 gst_buffer_make_writable (frame->output_buffer);
3512
3513 GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
3514
3515 GST_BUFFER_PTS (output_buffer) = frame->pts;
3516 GST_BUFFER_DTS (output_buffer) = GST_CLOCK_TIME_NONE;
3517 GST_BUFFER_DURATION (output_buffer) = frame->duration;
3518
3519 GST_BUFFER_OFFSET (output_buffer) = GST_BUFFER_OFFSET_NONE;
3520 GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
3521
3522 if (priv->discont) {
3523 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_DISCONT);
3524 }
3525
3526 if (GST_AML_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3527 GST_AML_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)) {
3528 GST_DEBUG_OBJECT (decoder,
3529 "marking frame %" GST_TIME_FORMAT " as corrupted",
3530 GST_TIME_ARGS (frame->pts));
3531 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_CORRUPTED);
3532 }
3533
3534 gst_aml_video_decoder_copy_metas (decoder, frame, frame->input_buffer,
3535 frame->output_buffer);
3536
3537 /* Get an additional ref to the buffer, which is going to be pushed
3538 * downstream, the original ref is owned by the frame
3539 */
3540 output_buffer = gst_buffer_ref (output_buffer);
3541
3542 /* Release frame so the buffer is writable when we push it downstream
3543 * if possible, i.e. if the subclass does not hold additional references
3544 * to the frame
3545 */
3546 gst_aml_video_decoder_release_frame (decoder, frame);
3547 frame = NULL;
3548
3549 if (decoder->output_segment.rate < 0.0
3550 && !(decoder->output_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)) {
3551 GST_LOG_OBJECT (decoder, "queued frame");
3552 priv->output_queued = g_list_prepend (priv->output_queued, output_buffer);
3553 } else {
3554 ret = gst_aml_video_decoder_clip_and_push_buf (decoder, output_buffer);
3555 }
3556
3557done:
3558 if (frame)
3559 gst_aml_video_decoder_release_frame (decoder, frame);
3560 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3561 return ret;
3562}
3563
3564/**
3565 * gst_aml_video_decoder_finish_subframe:
3566 * @decoder: a #GstAmlVideoDecoder
3567 * @frame: (transfer full): the #GstAmlVideoCodecFrame
3568 *
3569 * Indicate that a subframe has been finished to be decoded
3570 * by the subclass. This method should be called for all subframes
3571 * except the last subframe where @gst_aml_video_decoder_finish_frame
3572 * should be called instead.
3573 *
3574 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3575 *
3576 * Since: 1.20
3577 */
3578GstFlowReturn
3579gst_aml_video_decoder_finish_subframe (GstAmlVideoDecoder * decoder,
3580 GstAmlVideoCodecFrame * frame)
3581{
3582 g_return_val_if_fail (gst_aml_video_decoder_get_subframe_mode (decoder),
3583 GST_FLOW_NOT_SUPPORTED);
3584
3585 GST_LOG_OBJECT (decoder, "finish subframe %p num=%d", frame->input_buffer,
3586 gst_aml_video_decoder_get_input_subframe_index (decoder, frame));
3587
3588 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3589 frame->abidata.ABI.subframes_processed++;
3590 gst_aml_video_codec_frame_unref (frame);
3591
3592 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3593
3594 return GST_FLOW_OK;
3595}
3596
3597/* With stream lock, takes the frame reference */
3598static GstFlowReturn
3599gst_aml_video_decoder_clip_and_push_buf (GstAmlVideoDecoder * decoder, GstBuffer * buf)
3600{
3601 GstFlowReturn ret = GST_FLOW_OK;
3602 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3603 guint64 start, stop;
3604 guint64 cstart, cstop;
3605 GstSegment *segment;
3606 GstClockTime duration;
3607
3608 /* Check for clipping */
3609 start = GST_BUFFER_PTS (buf);
3610 duration = GST_BUFFER_DURATION (buf);
3611
3612 /* store that we have valid decoded data */
3613 priv->had_output_data = TRUE;
3614
3615 stop = GST_CLOCK_TIME_NONE;
3616
3617 if (GST_CLOCK_TIME_IS_VALID (start) && GST_CLOCK_TIME_IS_VALID (duration)) {
3618 stop = start + duration;
3619 } else if (GST_CLOCK_TIME_IS_VALID (start)
3620 && !GST_CLOCK_TIME_IS_VALID (duration)) {
3621 /* If we don't clip away buffers that far before the segment we
3622 * can cause the pipeline to lockup. This can happen if audio is
3623 * properly clipped, and thus the audio sink does not preroll yet
3624 * but the video sink prerolls because we already outputted a
3625 * buffer here... and then queues run full.
3626 *
3627 * In the worst case we will clip one buffer too many here now if no
3628 * framerate is given, no buffer duration is given and the actual
3629 * framerate is lower than 25fps */
3630 stop = start + 40 * GST_MSECOND;
3631 }
3632
3633 segment = &decoder->output_segment;
3634 if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
le.hane46b90e2024-08-19 08:18:58 +00003635 if (!GST_CLOCK_TIME_IS_VALID (start)) {
3636 GST_BUFFER_PTS (buf) = cstart;
3637 }
le.han02c38f02024-08-16 02:35:36 +00003638
3639 if (stop != GST_CLOCK_TIME_NONE && GST_CLOCK_TIME_IS_VALID (duration))
3640 GST_BUFFER_DURATION (buf) = cstop - cstart;
3641
3642 GST_LOG_OBJECT (decoder,
3643 "accepting buffer inside segment: %" GST_TIME_FORMAT " %"
3644 GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3645 " time %" GST_TIME_FORMAT,
3646 GST_TIME_ARGS (cstart),
3647 GST_TIME_ARGS (cstop),
3648 GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop),
3649 GST_TIME_ARGS (segment->time));
3650 } else {
3651 GST_LOG_OBJECT (decoder,
3652 "dropping buffer outside segment: %" GST_TIME_FORMAT
3653 " %" GST_TIME_FORMAT
3654 " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3655 " time %" GST_TIME_FORMAT,
3656 GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
3657 GST_TIME_ARGS (segment->start),
3658 GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
3659 /* only check and return EOS if upstream still
3660 * in the same segment and interested as such */
3661 if (decoder->priv->in_out_segment_sync) {
3662 if (segment->rate >= 0) {
3663 if (GST_BUFFER_PTS (buf) >= segment->stop)
3664 ret = GST_FLOW_EOS;
3665 } else if (GST_BUFFER_PTS (buf) < segment->start) {
3666 ret = GST_FLOW_EOS;
3667 }
3668 }
3669
3670 const char *env = getenv("GST_AML_VIDEO_DECODER_DO_CLIP");
3671 if (env && 0 == atoi(env)) {
3672 GST_DEBUG_OBJECT (decoder, "not clip out of boud buf, send anyway");
3673 /* release STREAM_LOCK not to block upstream
3674 * while pushing buffer downstream */
3675 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3676 ret = gst_pad_push (decoder->srcpad, buf);
3677 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3678 }
3679 else
3680 {
3681 gst_buffer_unref (buf);
3682 }
3683 goto done;
3684 }
3685
3686 /* Is buffer too late (QoS) ? */
3687 if (priv->do_qos && GST_CLOCK_TIME_IS_VALID (priv->earliest_time)
3688 && GST_CLOCK_TIME_IS_VALID (cstart)) {
3689 GstClockTime deadline =
3690 gst_segment_to_running_time (segment, GST_FORMAT_TIME, cstart);
3691 if (GST_CLOCK_TIME_IS_VALID (deadline) && deadline < priv->earliest_time) {
3692 GST_WARNING_OBJECT (decoder,
3693 "Dropping frame due to QoS. start:%" GST_TIME_FORMAT " deadline:%"
3694 GST_TIME_FORMAT " earliest_time:%" GST_TIME_FORMAT,
3695 GST_TIME_ARGS (start), GST_TIME_ARGS (deadline),
3696 GST_TIME_ARGS (priv->earliest_time));
3697 gst_aml_video_decoder_post_qos_drop (decoder, cstart);
3698 gst_buffer_unref (buf);
3699 priv->discont = TRUE;
3700 goto done;
3701 }
3702 }
3703
3704 /* Set DISCONT flag here ! */
3705
3706 if (priv->discont) {
3707 GST_DEBUG_OBJECT (decoder, "Setting discont on output buffer");
3708 GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
3709 priv->discont = FALSE;
3710 }
3711
3712 /* update rate estimate */
3713 GST_OBJECT_LOCK (decoder);
3714 priv->bytes_out += gst_buffer_get_size (buf);
3715 if (GST_CLOCK_TIME_IS_VALID (duration)) {
3716 priv->time += duration;
3717 } else {
3718 /* FIXME : Use difference between current and previous outgoing
3719 * timestamp, and relate to difference between current and previous
3720 * bytes */
3721 /* better none than nothing valid */
3722 priv->time = GST_CLOCK_TIME_NONE;
3723 }
3724 GST_OBJECT_UNLOCK (decoder);
3725
3726 GST_DEBUG_OBJECT (decoder, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
3727 "PTS %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
3728 gst_buffer_get_size (buf),
3729 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
3730 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
3731
3732 /* we got data, so note things are looking up again, reduce
3733 * the error count, if there is one */
3734 if (G_UNLIKELY (priv->error_count))
3735 priv->error_count = 0;
3736
3737#ifndef GST_DISABLE_DEBUG
3738 if (G_UNLIKELY (priv->last_reset_time != GST_CLOCK_TIME_NONE)) {
3739 GstClockTime elapsed = gst_util_get_timestamp () - priv->last_reset_time;
3740
3741 /* First buffer since reset, report how long we took */
3742 GST_INFO_OBJECT (decoder, "First buffer since flush took %" GST_TIME_FORMAT
3743 " to produce", GST_TIME_ARGS (elapsed));
3744 priv->last_reset_time = GST_CLOCK_TIME_NONE;
3745 }
3746#endif
3747
3748 /* release STREAM_LOCK not to block upstream
3749 * while pushing buffer downstream */
3750 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3751 ret = gst_pad_push (decoder->srcpad, buf);
3752 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3753
3754done:
3755 return ret;
3756}
3757
3758/**
3759 * gst_aml_video_decoder_add_to_frame:
3760 * @decoder: a #GstAmlVideoDecoder
3761 * @n_bytes: the number of bytes to add
3762 *
3763 * Removes next @n_bytes of input data and adds it to currently parsed frame.
3764 */
3765void
3766gst_aml_video_decoder_add_to_frame (GstAmlVideoDecoder * decoder, int n_bytes)
3767{
3768 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3769 GstBuffer *buf;
3770
3771 GST_LOG_OBJECT (decoder, "add %d bytes to frame", n_bytes);
3772
3773 if (n_bytes == 0)
3774 return;
3775
3776 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3777 if (gst_adapter_available (priv->output_adapter) == 0) {
3778 priv->frame_offset =
3779 priv->input_offset - gst_adapter_available (priv->input_adapter);
3780 }
3781 buf = gst_adapter_take_buffer (priv->input_adapter, n_bytes);
3782
3783 gst_adapter_push (priv->output_adapter, buf);
3784 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3785}
3786
3787/**
3788 * gst_aml_video_decoder_get_pending_frame_size:
3789 * @decoder: a #GstAmlVideoDecoder
3790 *
3791 * Returns the number of bytes previously added to the current frame
3792 * by calling gst_aml_video_decoder_add_to_frame().
3793 *
3794 * Returns: The number of bytes pending for the current frame
3795 *
3796 * Since: 1.4
3797 */
3798gsize
3799gst_aml_video_decoder_get_pending_frame_size (GstAmlVideoDecoder * decoder)
3800{
3801 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3802 gsize ret;
3803
3804 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3805 ret = gst_adapter_available (priv->output_adapter);
3806 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3807
3808 GST_LOG_OBJECT (decoder, "Current pending frame has %" G_GSIZE_FORMAT "bytes",
3809 ret);
3810
3811 return ret;
3812}
3813
3814static guint64
3815gst_aml_video_decoder_get_frame_duration (GstAmlVideoDecoder * decoder,
3816 GstAmlVideoCodecFrame * frame)
3817{
3818 GstAmlVideoCodecState *state = decoder->priv->output_state;
3819
3820 /* it's possible that we don't have a state yet when we are dropping the
3821 * initial buffers */
3822 if (state == NULL)
3823 return GST_CLOCK_TIME_NONE;
3824
3825 if (state->info.fps_d == 0 || state->info.fps_n == 0) {
3826 return GST_CLOCK_TIME_NONE;
3827 }
3828
3829 /* FIXME: For interlaced frames this needs to take into account
3830 * the number of valid fields in the frame
3831 */
3832
3833 return gst_util_uint64_scale (GST_SECOND, state->info.fps_d,
3834 state->info.fps_n);
3835}
3836
3837/**
3838 * gst_aml_video_decoder_have_frame:
3839 * @decoder: a #GstAmlVideoDecoder
3840 *
3841 * Gathers all data collected for currently parsed frame, gathers corresponding
3842 * metadata and passes it along for further processing, i.e. @handle_frame.
3843 *
3844 * Returns: a #GstFlowReturn
3845 */
3846GstFlowReturn
3847gst_aml_video_decoder_have_frame (GstAmlVideoDecoder * decoder)
3848{
3849 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3850 GstBuffer *buffer;
3851 int n_available;
3852 GstClockTime pts, dts, duration;
3853 guint flags;
3854 GstFlowReturn ret = GST_FLOW_OK;
3855
3856 GST_LOG_OBJECT (decoder, "have_frame at offset %" G_GUINT64_FORMAT,
3857 priv->frame_offset);
3858
3859 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
3860
3861 n_available = gst_adapter_available (priv->output_adapter);
3862 if (n_available) {
3863 buffer = gst_adapter_take_buffer (priv->output_adapter, n_available);
3864 } else {
3865 buffer = gst_buffer_new_and_alloc (0);
3866 }
3867
3868 if (priv->current_frame->input_buffer) {
3869 gst_aml_video_decoder_copy_metas (decoder, priv->current_frame,
3870 priv->current_frame->input_buffer, buffer);
3871 gst_buffer_unref (priv->current_frame->input_buffer);
3872 }
3873 priv->current_frame->input_buffer = buffer;
3874
3875 gst_aml_video_decoder_get_buffer_info_at_offset (decoder,
3876 priv->frame_offset, &pts, &dts, &duration, &flags);
3877
3878 GST_BUFFER_PTS (buffer) = pts;
3879 GST_BUFFER_DTS (buffer) = dts;
3880 GST_BUFFER_DURATION (buffer) = duration;
3881 GST_BUFFER_FLAGS (buffer) = flags;
3882
3883 GST_LOG_OBJECT (decoder, "collected frame size %d, "
3884 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
3885 GST_TIME_FORMAT, n_available, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts),
3886 GST_TIME_ARGS (duration));
3887
3888 if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
3889 GST_DEBUG_OBJECT (decoder, "Marking as sync point");
3890 GST_AML_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
3891 }
3892
3893 if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_CORRUPTED)) {
3894 GST_DEBUG_OBJECT (decoder, "Marking as corrupted");
3895 GST_AML_VIDEO_CODEC_FRAME_FLAG_SET (priv->current_frame,
3896 GST_AML_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3897 }
3898
3899 /* In reverse playback, just capture and queue frames for later processing */
3900 if (decoder->input_segment.rate < 0.0) {
3901 priv->parse_gather =
3902 g_list_prepend (priv->parse_gather, priv->current_frame);
3903 priv->current_frame = NULL;
3904 } else {
3905 GstAmlVideoCodecFrame *frame = priv->current_frame;
3906 frame->abidata.ABI.num_subframes++;
3907 /* In subframe mode, we keep a ref for ourselves
3908 * as this frame will be kept during the data collection
3909 * in parsed mode. The frame reference will be released by
3910 * finish_(sub)frame or drop_(sub)frame.*/
3911 if (gst_aml_video_decoder_get_subframe_mode (decoder))
3912 gst_aml_video_codec_frame_ref (priv->current_frame);
3913 else
3914 priv->current_frame = NULL;
3915
3916 /* Decode the frame, which gives away our ref */
3917 ret = gst_aml_video_decoder_decode_frame (decoder, frame);
3918 }
3919
3920 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3921
3922 return ret;
3923}
3924
3925/* Pass the frame in priv->current_frame through the
3926 * handle_frame() callback for decoding and passing to gvd_finish_frame(),
3927 * or dropping by passing to gvd_drop_frame() */
3928static GstFlowReturn
3929gst_aml_video_decoder_decode_frame (GstAmlVideoDecoder * decoder,
3930 GstAmlVideoCodecFrame * frame)
3931{
3932 GstAmlVideoDecoderPrivate *priv = decoder->priv;
3933 GstAmlVideoDecoderClass *decoder_class;
3934 GstFlowReturn ret = GST_FLOW_OK;
3935
3936 decoder_class = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
3937
3938 /* FIXME : This should only have to be checked once (either the subclass has an
3939 * implementation, or it doesn't) */
3940 g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
3941 g_return_val_if_fail (frame != NULL, GST_FLOW_ERROR);
3942
3943 frame->pts = GST_BUFFER_PTS (frame->input_buffer);
3944 frame->dts = GST_BUFFER_DTS (frame->input_buffer);
3945 frame->duration = GST_BUFFER_DURATION (frame->input_buffer);
3946 frame->deadline =
3947 gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
3948 frame->pts);
3949
3950 /* For keyframes, PTS = DTS + constant_offset, usually 0 to 3 frame
3951 * durations. */
3952 /* FIXME upstream can be quite wrong about the keyframe aspect,
3953 * so we could be going off here as well,
3954 * maybe let subclass decide if it really is/was a keyframe */
3955 if (GST_AML_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
3956 priv->distance_from_sync = 0;
3957
3958 GST_OBJECT_LOCK (decoder);
3959 priv->request_sync_point_flags &=
3960 ~GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT;
3961 if (priv->request_sync_point_frame_number == REQUEST_SYNC_POINT_PENDING)
3962 priv->request_sync_point_frame_number = frame->system_frame_number;
3963 GST_OBJECT_UNLOCK (decoder);
3964
3965 if (GST_CLOCK_TIME_IS_VALID (frame->pts)
3966 && GST_CLOCK_TIME_IS_VALID (frame->dts)) {
3967 /* just in case they are not equal as might ideally be,
3968 * e.g. quicktime has a (positive) delta approach */
3969 priv->pts_delta = frame->pts - frame->dts;
3970 GST_DEBUG_OBJECT (decoder, "PTS delta %d ms",
3971 (gint) (priv->pts_delta / GST_MSECOND));
3972 }
3973 } else {
3974 if (priv->distance_from_sync == -1 && priv->automatic_request_sync_points) {
3975 GST_DEBUG_OBJECT (decoder,
3976 "Didn't receive a keyframe yet, requesting sync point");
3977 gst_aml_video_decoder_request_sync_point (decoder, frame,
3978 priv->automatic_request_sync_point_flags);
3979 }
3980
3981 GST_OBJECT_LOCK (decoder);
3982 if ((priv->needs_sync_point && priv->distance_from_sync == -1)
3983 || (priv->request_sync_point_flags &
3984 GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT)) {
3985 GST_WARNING_OBJECT (decoder,
3986 "Subclass requires a sync point but we didn't receive one yet, discarding input");
3987 GST_OBJECT_UNLOCK (decoder);
3988 if (priv->automatic_request_sync_points) {
3989 gst_aml_video_decoder_request_sync_point (decoder, frame,
3990 priv->automatic_request_sync_point_flags);
3991 }
3992 gst_aml_video_decoder_release_frame (decoder, frame);
3993 return GST_FLOW_OK;
3994 }
3995 GST_OBJECT_UNLOCK (decoder);
3996
3997 priv->distance_from_sync++;
3998 }
3999
4000 frame->distance_from_sync = priv->distance_from_sync;
4001
4002 if (frame->abidata.ABI.num_subframes == 1) {
4003 frame->abidata.ABI.ts = frame->dts;
4004 frame->abidata.ABI.ts2 = frame->pts;
4005 }
4006
4007 GST_LOG_OBJECT (decoder,
4008 "frame %p PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dist %d",
4009 frame, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
4010 frame->distance_from_sync);
4011 /* FIXME: suboptimal way to add a unique frame to the list, in case of subframe mode. */
4012 if (!g_queue_find (&priv->frames, frame)) {
4013 g_queue_push_tail (&priv->frames, gst_aml_video_codec_frame_ref (frame));
4014 } else {
4015 GST_LOG_OBJECT (decoder,
4016 "Do not add an existing frame used to decode subframes");
4017 }
4018
4019 if (priv->frames.length > 10) {
4020 GST_DEBUG_OBJECT (decoder, "decoder frame list getting long: %d frames,"
4021 "possible internal leaking?", priv->frames.length);
4022 }
4023
4024 /* do something with frame */
4025 ret = decoder_class->handle_frame (decoder, frame);
4026 if (ret != GST_FLOW_OK)
4027 GST_DEBUG_OBJECT (decoder, "flow error %s", gst_flow_get_name (ret));
4028
4029 /* the frame has either been added to parse_gather or sent to
4030 handle frame so there is no need to unref it */
4031 return ret;
4032}
4033
4034
4035/**
4036 * gst_aml_video_decoder_get_output_state:
4037 * @decoder: a #GstAmlVideoDecoder
4038 *
4039 * Get the #GstAmlVideoCodecState currently describing the output stream.
4040 *
4041 * Returns: (transfer full): #GstAmlVideoCodecState describing format of video data.
4042 */
4043GstAmlVideoCodecState *
4044gst_aml_video_decoder_get_output_state (GstAmlVideoDecoder * decoder)
4045{
4046 GstAmlVideoCodecState *state = NULL;
4047
4048 GST_OBJECT_LOCK (decoder);
4049 if (decoder->priv->output_state)
4050 state = gst_aml_video_codec_state_ref (decoder->priv->output_state);
4051 GST_OBJECT_UNLOCK (decoder);
4052
4053 return state;
4054}
4055
4056static GstAmlVideoCodecState *
4057_set_interlaced_output_state (GstAmlVideoDecoder * decoder,
4058 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
4059 guint height, GstAmlVideoCodecState * reference, gboolean copy_interlace_mode)
4060{
4061 GstAmlVideoDecoderPrivate *priv = decoder->priv;
4062 GstAmlVideoCodecState *state;
4063
4064 g_assert ((copy_interlace_mode
4065 && interlace_mode == GST_VIDEO_INTERLACE_MODE_PROGRESSIVE)
4066 || !copy_interlace_mode);
4067
4068 GST_DEBUG_OBJECT (decoder,
4069 "fmt:%d, width:%d, height:%d, interlace-mode: %s, reference:%p", fmt,
4070 width, height, gst_video_interlace_mode_to_string (interlace_mode),
4071 reference);
4072
4073 /* Create the new output state */
4074 state =
4075 _new_output_state (fmt, interlace_mode, width, height, reference,
4076 copy_interlace_mode);
4077 if (!state)
4078 return NULL;
4079
4080 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4081
4082 GST_OBJECT_LOCK (decoder);
4083 /* Replace existing output state by new one */
4084 if (priv->output_state)
4085 gst_aml_video_codec_state_unref (priv->output_state);
4086 priv->output_state = gst_aml_video_codec_state_ref (state);
4087
4088 if (priv->output_state != NULL && priv->output_state->info.fps_n > 0) {
4089 priv->qos_frame_duration =
4090 gst_util_uint64_scale (GST_SECOND, priv->output_state->info.fps_d,
4091 priv->output_state->info.fps_n);
4092 } else {
4093 priv->qos_frame_duration = 0;
4094 }
4095 priv->output_state_changed = TRUE;
4096 GST_OBJECT_UNLOCK (decoder);
4097
4098 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4099
4100 return state;
4101}
4102
4103/**
4104 * gst_aml_video_decoder_set_output_state:
4105 * @decoder: a #GstAmlVideoDecoder
4106 * @fmt: a #GstVideoFormat
4107 * @width: The width in pixels
4108 * @height: The height in pixels
4109 * @reference: (allow-none) (transfer none): An optional reference #GstAmlVideoCodecState
4110 *
4111 * Creates a new #GstAmlVideoCodecState with the specified @fmt, @width and @height
4112 * as the output state for the decoder.
4113 * Any previously set output state on @decoder will be replaced by the newly
4114 * created one.
4115 *
4116 * If the subclass wishes to copy over existing fields (like pixel aspec ratio,
4117 * or framerate) from an existing #GstAmlVideoCodecState, it can be provided as a
4118 * @reference.
4119 *
4120 * If the subclass wishes to override some fields from the output state (like
4121 * pixel-aspect-ratio or framerate) it can do so on the returned #GstAmlVideoCodecState.
4122 *
4123 * The new output state will only take effect (set on pads and buffers) starting
4124 * from the next call to #gst_aml_video_decoder_finish_frame().
4125 *
4126 * Returns: (transfer full): the newly configured output state.
4127 */
4128GstAmlVideoCodecState *
4129gst_aml_video_decoder_set_output_state (GstAmlVideoDecoder * decoder,
4130 GstVideoFormat fmt, guint width, guint height,
4131 GstAmlVideoCodecState * reference)
4132{
4133 return _set_interlaced_output_state (decoder, fmt,
4134 GST_VIDEO_INTERLACE_MODE_PROGRESSIVE, width, height, reference, TRUE);
4135}
4136
4137/**
4138 * gst_aml_video_decoder_set_interlaced_output_state:
4139 * @decoder: a #GstAmlVideoDecoder
4140 * @fmt: a #GstVideoFormat
4141 * @width: The width in pixels
4142 * @height: The height in pixels
4143 * @interlace_mode: A #GstVideoInterlaceMode
4144 * @reference: (allow-none) (transfer none): An optional reference #GstAmlVideoCodecState
4145 *
4146 * Same as #gst_aml_video_decoder_set_output_state() but also allows you to also set
4147 * the interlacing mode.
4148 *
4149 * Returns: (transfer full): the newly configured output state.
4150 *
4151 * Since: 1.16.
4152 */
4153GstAmlVideoCodecState *
4154gst_aml_video_decoder_set_interlaced_output_state (GstAmlVideoDecoder * decoder,
4155 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
4156 guint height, GstAmlVideoCodecState * reference)
4157{
4158 return _set_interlaced_output_state (decoder, fmt, interlace_mode, width,
4159 height, reference, FALSE);
4160}
4161
4162
4163/**
4164 * gst_aml_video_decoder_get_oldest_frame:
4165 * @decoder: a #GstAmlVideoDecoder
4166 *
4167 * Get the oldest pending unfinished #GstAmlVideoCodecFrame
4168 *
4169 * Returns: (transfer full): oldest pending unfinished #GstAmlVideoCodecFrame.
4170 */
4171GstAmlVideoCodecFrame *
4172gst_aml_video_decoder_get_oldest_frame (GstAmlVideoDecoder * decoder)
4173{
4174 GstAmlVideoCodecFrame *frame = NULL;
4175
4176 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4177 if (decoder->priv->frames.head)
4178 frame = gst_aml_video_codec_frame_ref (decoder->priv->frames.head->data);
4179 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4180
4181 return (GstAmlVideoCodecFrame *) frame;
4182}
4183
4184/**
4185 * gst_aml_video_decoder_get_frame:
4186 * @decoder: a #GstAmlVideoDecoder
4187 * @frame_number: system_frame_number of a frame
4188 *
4189 * Get a pending unfinished #GstAmlVideoCodecFrame
4190 *
4191 * Returns: (transfer full): pending unfinished #GstAmlVideoCodecFrame identified by @frame_number.
4192 */
4193GstAmlVideoCodecFrame *
4194gst_aml_video_decoder_get_frame (GstAmlVideoDecoder * decoder, int frame_number)
4195{
4196 GList *g;
4197 GstAmlVideoCodecFrame *frame = NULL;
4198
4199 GST_DEBUG_OBJECT (decoder, "frame_number : %d", frame_number);
4200
4201 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4202 for (g = decoder->priv->frames.head; g; g = g->next) {
4203 GstAmlVideoCodecFrame *tmp = g->data;
4204
4205 if (tmp->system_frame_number == frame_number) {
4206 frame = gst_aml_video_codec_frame_ref (tmp);
4207 break;
4208 }
4209 }
4210 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4211
4212 return frame;
4213}
4214
4215/**
4216 * gst_aml_video_decoder_get_frames:
4217 * @decoder: a #GstAmlVideoDecoder
4218 *
4219 * Get all pending unfinished #GstAmlVideoCodecFrame
4220 *
4221 * Returns: (transfer full) (element-type GstAmlVideoCodecFrame): pending unfinished #GstAmlVideoCodecFrame.
4222 */
4223GList *
4224gst_aml_video_decoder_get_frames (GstAmlVideoDecoder * decoder)
4225{
4226 GList *frames;
4227
4228 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4229 frames =
4230 g_list_copy_deep (decoder->priv->frames.head,
4231 (GCopyFunc) gst_aml_video_codec_frame_ref, NULL);
4232 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4233
4234 return frames;
4235}
4236
4237static gboolean
4238gst_aml_video_decoder_decide_allocation_default (GstAmlVideoDecoder * decoder,
4239 GstQuery * query)
4240{
4241 GstCaps *outcaps = NULL;
4242 GstBufferPool *pool = NULL;
4243 guint size, min, max;
4244 GstAllocator *allocator = NULL;
4245 GstAllocationParams params;
4246 GstStructure *config;
4247 gboolean update_pool, update_allocator;
4248 GstVideoInfo vinfo;
4249
4250 gst_query_parse_allocation (query, &outcaps, NULL);
4251 gst_video_info_init (&vinfo);
4252 if (outcaps)
4253 gst_video_info_from_caps (&vinfo, outcaps);
4254
4255 /* we got configuration from our peer or the decide_allocation method,
4256 * parse them */
4257 if (gst_query_get_n_allocation_params (query) > 0) {
4258 /* try the allocator */
4259 gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
4260 update_allocator = TRUE;
4261 } else {
4262 allocator = NULL;
4263 gst_allocation_params_init (&params);
4264 update_allocator = FALSE;
4265 }
4266
4267 if (gst_query_get_n_allocation_pools (query) > 0) {
4268 gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
4269 size = MAX (size, vinfo.size);
4270 update_pool = TRUE;
4271 } else {
4272 pool = NULL;
4273 size = vinfo.size;
4274 min = max = 0;
4275
4276 update_pool = FALSE;
4277 }
4278
4279 if (pool == NULL) {
4280 /* no pool, we can make our own */
4281 GST_DEBUG_OBJECT (decoder, "no pool, making new pool");
4282 pool = gst_video_buffer_pool_new ();
4283 }
4284
4285 /* now configure */
4286 config = gst_buffer_pool_get_config (pool);
4287 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
4288 gst_buffer_pool_config_set_allocator (config, allocator, &params);
4289
4290 GST_DEBUG_OBJECT (decoder,
4291 "setting config %" GST_PTR_FORMAT " in pool %" GST_PTR_FORMAT, config,
4292 pool);
4293 if (!gst_buffer_pool_set_config (pool, config)) {
4294 config = gst_buffer_pool_get_config (pool);
4295
4296 /* If change are not acceptable, fallback to generic pool */
4297 if (!gst_buffer_pool_config_validate_params (config, outcaps, size, min,
4298 max)) {
4299 GST_DEBUG_OBJECT (decoder, "unsupported pool, making new pool");
4300
4301 gst_object_unref (pool);
4302 pool = gst_video_buffer_pool_new ();
4303 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
4304 gst_buffer_pool_config_set_allocator (config, allocator, &params);
4305 }
4306
4307 if (!gst_buffer_pool_set_config (pool, config))
4308 goto config_failed;
4309 }
4310
4311 if (update_allocator)
4312 gst_query_set_nth_allocation_param (query, 0, allocator, &params);
4313 else
4314 gst_query_add_allocation_param (query, allocator, &params);
4315 if (allocator)
4316 gst_object_unref (allocator);
4317
4318 if (update_pool)
4319 gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
4320 else
4321 gst_query_add_allocation_pool (query, pool, size, min, max);
4322
4323 if (pool)
4324 gst_object_unref (pool);
4325
4326 return TRUE;
4327
4328config_failed:
4329 if (allocator)
4330 gst_object_unref (allocator);
4331 if (pool)
4332 gst_object_unref (pool);
4333 GST_ELEMENT_ERROR (decoder, RESOURCE, SETTINGS,
4334 ("Failed to configure the buffer pool"),
4335 ("Configuration is most likely invalid, please report this issue."));
4336 return FALSE;
4337}
4338
4339static gboolean
4340gst_aml_video_decoder_propose_allocation_default (GstAmlVideoDecoder * decoder,
4341 GstQuery * query)
4342{
4343 return TRUE;
4344}
4345
4346static gboolean
4347gst_aml_video_decoder_negotiate_pool (GstAmlVideoDecoder * decoder, GstCaps * caps)
4348{
4349 GstAmlVideoDecoderClass *klass;
4350 GstQuery *query = NULL;
4351 GstBufferPool *pool = NULL;
4352 GstAllocator *allocator;
4353 GstAllocationParams params;
4354 gboolean ret = TRUE;
4355
4356 klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
4357
4358 query = gst_query_new_allocation (caps, TRUE);
4359
4360 GST_DEBUG_OBJECT (decoder, "do query ALLOCATION");
4361
4362 if (!gst_pad_peer_query (decoder->srcpad, query)) {
4363 GST_DEBUG_OBJECT (decoder, "didn't get downstream ALLOCATION hints");
4364 }
4365
4366 g_assert (klass->decide_allocation != NULL);
4367 ret = klass->decide_allocation (decoder, query);
4368
4369 GST_DEBUG_OBJECT (decoder, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, ret,
4370 query);
4371
4372 if (!ret)
4373 goto no_decide_allocation;
4374
4375 /* we got configuration from our peer or the decide_allocation method,
4376 * parse them */
4377 if (gst_query_get_n_allocation_params (query) > 0) {
4378 gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
4379 } else {
4380 allocator = NULL;
4381 gst_allocation_params_init (&params);
4382 }
4383
4384 if (gst_query_get_n_allocation_pools (query) > 0)
4385 gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL);
4386 if (!pool) {
4387 if (allocator)
4388 gst_object_unref (allocator);
4389 ret = FALSE;
4390 goto no_decide_allocation;
4391 }
4392
4393 if (decoder->priv->allocator)
4394 gst_object_unref (decoder->priv->allocator);
4395 decoder->priv->allocator = allocator;
4396 decoder->priv->params = params;
4397
4398 if (decoder->priv->pool) {
4399 /* do not set the bufferpool to inactive here, it will be done
4400 * on its finalize function. As videodecoder do late renegotiation
4401 * it might happen that some element downstream is already using this
4402 * same bufferpool and deactivating it will make it fail.
4403 * Happens when a downstream element changes from passthrough to
4404 * non-passthrough and gets this same bufferpool to use */
4405 GST_DEBUG_OBJECT (decoder, "unref pool %" GST_PTR_FORMAT,
4406 decoder->priv->pool);
4407 gst_object_unref (decoder->priv->pool);
4408 }
4409 decoder->priv->pool = pool;
4410
4411 /* and activate */
4412 GST_DEBUG_OBJECT (decoder, "activate pool %" GST_PTR_FORMAT, pool);
4413 gst_buffer_pool_set_active (pool, TRUE);
4414
4415done:
4416 if (query)
4417 gst_query_unref (query);
4418
4419 return ret;
4420
4421 /* Errors */
4422no_decide_allocation:
4423 {
4424 GST_WARNING_OBJECT (decoder, "Subclass failed to decide allocation");
4425 goto done;
4426 }
4427}
4428
4429static gboolean
4430gst_aml_video_decoder_negotiate_default (GstAmlVideoDecoder * decoder)
4431{
4432 GstAmlVideoCodecState *state = decoder->priv->output_state;
4433 gboolean ret = TRUE;
4434 GstAmlVideoCodecFrame *frame;
4435 GstCaps *prevcaps;
4436 GstCaps *incaps;
4437
4438 if (!state) {
4439 GST_DEBUG_OBJECT (decoder,
4440 "Trying to negotiate the pool with out setting the o/p format");
4441 ret = gst_aml_video_decoder_negotiate_pool (decoder, NULL);
4442 goto done;
4443 }
4444
4445 g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (&state->info) != 0, FALSE);
4446 g_return_val_if_fail (GST_VIDEO_INFO_HEIGHT (&state->info) != 0, FALSE);
4447
4448 /* If the base class didn't set any multiview params, assume mono
4449 * now */
4450 if (GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) ==
4451 GST_VIDEO_MULTIVIEW_MODE_NONE) {
4452 GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) =
4453 GST_VIDEO_MULTIVIEW_MODE_MONO;
4454 GST_VIDEO_INFO_MULTIVIEW_FLAGS (&state->info) =
4455 GST_VIDEO_MULTIVIEW_FLAGS_NONE;
4456 }
4457
4458 GST_DEBUG_OBJECT (decoder, "output_state par %d/%d fps %d/%d",
4459 state->info.par_n, state->info.par_d,
4460 state->info.fps_n, state->info.fps_d);
4461
4462 if (state->caps == NULL)
4463 state->caps = gst_video_info_to_caps (&state->info);
4464
4465 incaps = gst_pad_get_current_caps (GST_AML_VIDEO_DECODER_SINK_PAD (decoder));
4466 if (incaps) {
4467 GstStructure *in_struct;
4468
4469 in_struct = gst_caps_get_structure (incaps, 0);
4470 if (gst_structure_has_field (in_struct, "mastering-display-info") ||
4471 gst_structure_has_field (in_struct, "content-light-level")) {
4472 const gchar *s;
4473
4474 /* prefer upstream information */
4475 state->caps = gst_caps_make_writable (state->caps);
4476 if ((s = gst_structure_get_string (in_struct, "mastering-display-info"))) {
4477 gst_caps_set_simple (state->caps,
4478 "mastering-display-info", G_TYPE_STRING, s, NULL);
4479 }
4480
4481 if ((s = gst_structure_get_string (in_struct, "content-light-level"))) {
4482 gst_caps_set_simple (state->caps,
4483 "content-light-level", G_TYPE_STRING, s, NULL);
4484 }
4485 }
4486
4487 gst_caps_unref (incaps);
4488 }
4489
4490 if (state->allocation_caps == NULL)
4491 state->allocation_caps = gst_caps_ref (state->caps);
4492
4493 GST_DEBUG_OBJECT (decoder, "setting caps %" GST_PTR_FORMAT, state->caps);
4494
4495 /* Push all pending pre-caps events of the oldest frame before
4496 * setting caps */
4497 frame = decoder->priv->frames.head ? decoder->priv->frames.head->data : NULL;
4498 if (frame || decoder->priv->current_frame_events) {
4499 GList **events, *l;
4500
4501 if (frame) {
4502 events = &frame->events;
4503 } else {
4504 events = &decoder->priv->current_frame_events;
4505 }
4506
4507 for (l = g_list_last (*events); l;) {
4508 GstEvent *event = GST_EVENT (l->data);
4509 GList *tmp;
4510
4511 if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
4512 gst_aml_video_decoder_push_event (decoder, event);
4513 tmp = l;
4514 l = l->prev;
4515 *events = g_list_delete_link (*events, tmp);
4516 } else {
4517 l = l->prev;
4518 }
4519 }
4520 }
4521
4522 prevcaps = gst_pad_get_current_caps (decoder->srcpad);
4523 if (!prevcaps || !gst_caps_is_equal (prevcaps, state->caps)) {
4524 if (!prevcaps) {
4525 GST_DEBUG_OBJECT (decoder, "decoder src pad has currently NULL caps");
4526 }
4527 ret = gst_pad_set_caps (decoder->srcpad, state->caps);
4528 } else {
4529 ret = TRUE;
4530 GST_DEBUG_OBJECT (decoder,
4531 "current src pad and output state caps are the same");
4532 }
4533 if (prevcaps)
4534 gst_caps_unref (prevcaps);
4535
4536 if (!ret)
4537 goto done;
4538 decoder->priv->output_state_changed = FALSE;
4539 /* Negotiate pool */
4540 ret = gst_aml_video_decoder_negotiate_pool (decoder, state->allocation_caps);
4541
4542done:
4543 return ret;
4544}
4545
4546static gboolean
4547gst_aml_video_decoder_negotiate_unlocked (GstAmlVideoDecoder * decoder)
4548{
4549 GstAmlVideoDecoderClass *klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
4550 gboolean ret = TRUE;
4551
4552 if (G_LIKELY (klass->negotiate))
4553 ret = klass->negotiate (decoder);
4554
4555 return ret;
4556}
4557
4558/**
4559 * gst_aml_video_decoder_negotiate:
4560 * @decoder: a #GstAmlVideoDecoder
4561 *
4562 * Negotiate with downstream elements to currently configured #GstAmlVideoCodecState.
4563 * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
4564 * negotiate fails.
4565 *
4566 * Returns: %TRUE if the negotiation succeeded, else %FALSE.
4567 */
4568gboolean
4569gst_aml_video_decoder_negotiate (GstAmlVideoDecoder * decoder)
4570{
4571 GstAmlVideoDecoderClass *klass;
4572 gboolean ret = TRUE;
4573
4574 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (decoder), FALSE);
4575
4576 klass = GST_AML_VIDEO_DECODER_GET_CLASS (decoder);
4577
4578 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4579 gst_pad_check_reconfigure (decoder->srcpad);
4580 if (klass->negotiate) {
4581 ret = klass->negotiate (decoder);
4582 if (!ret)
4583 gst_pad_mark_reconfigure (decoder->srcpad);
4584 }
4585 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4586
4587 return ret;
4588}
4589
4590/**
4591 * gst_aml_video_decoder_allocate_output_buffer:
4592 * @decoder: a #GstAmlVideoDecoder
4593 *
4594 * Helper function that allocates a buffer to hold a video frame for @decoder's
4595 * current #GstAmlVideoCodecState.
4596 *
4597 * You should use gst_aml_video_decoder_allocate_output_frame() instead of this
4598 * function, if possible at all.
4599 *
4600 * Returns: (transfer full): allocated buffer, or NULL if no buffer could be
4601 * allocated (e.g. when downstream is flushing or shutting down)
4602 */
4603GstBuffer *
4604gst_aml_video_decoder_allocate_output_buffer (GstAmlVideoDecoder * decoder)
4605{
4606 GstFlowReturn flow;
4607 GstBuffer *buffer = NULL;
4608 gboolean needs_reconfigure = FALSE;
4609
4610 GST_DEBUG ("alloc src buffer");
4611
4612 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4613 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4614 if (G_UNLIKELY (!decoder->priv->output_state
4615 || decoder->priv->output_state_changed || needs_reconfigure)) {
4616 if (!gst_aml_video_decoder_negotiate_unlocked (decoder)) {
4617 if (decoder->priv->output_state) {
4618 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
4619 gst_pad_mark_reconfigure (decoder->srcpad);
4620 goto fallback;
4621 } else {
4622 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, output_buffer=NULL");
4623 goto failed_allocation;
4624 }
4625 }
4626 }
4627
4628 flow = gst_buffer_pool_acquire_buffer (decoder->priv->pool, &buffer, NULL);
4629
4630 if (flow != GST_FLOW_OK) {
4631 GST_INFO_OBJECT (decoder, "couldn't allocate output buffer, flow %s",
4632 gst_flow_get_name (flow));
4633 if (decoder->priv->output_state && decoder->priv->output_state->info.size)
4634 goto fallback;
4635 else
4636 goto failed_allocation;
4637 }
4638 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4639
4640 return buffer;
4641
4642fallback:
4643 GST_INFO_OBJECT (decoder,
4644 "Fallback allocation, creating new buffer which doesn't belongs to any buffer pool");
4645 buffer =
4646 gst_buffer_new_allocate (NULL, decoder->priv->output_state->info.size,
4647 NULL);
4648
4649failed_allocation:
4650 GST_ERROR_OBJECT (decoder, "Failed to allocate the buffer..");
4651 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4652
4653 return buffer;
4654}
4655
4656/**
4657 * gst_aml_video_decoder_allocate_output_frame:
4658 * @decoder: a #GstAmlVideoDecoder
4659 * @frame: a #GstAmlVideoCodecFrame
4660 *
4661 * Helper function that allocates a buffer to hold a video frame for @decoder's
4662 * current #GstAmlVideoCodecState. Subclass should already have configured video
4663 * state and set src pad caps.
4664 *
4665 * The buffer allocated here is owned by the frame and you should only
4666 * keep references to the frame, not the buffer.
4667 *
4668 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4669 */
4670GstFlowReturn
4671gst_aml_video_decoder_allocate_output_frame (GstAmlVideoDecoder *
4672 decoder, GstAmlVideoCodecFrame * frame)
4673{
4674 return gst_aml_video_decoder_allocate_output_frame_with_params (decoder, frame,
4675 NULL);
4676}
4677
4678/**
4679 * gst_aml_video_decoder_allocate_output_frame_with_params:
4680 * @decoder: a #GstAmlVideoDecoder
4681 * @frame: a #GstAmlVideoCodecFrame
4682 * @params: a #GstBufferPoolAcquireParams
4683 *
4684 * Same as #gst_aml_video_decoder_allocate_output_frame except it allows passing
4685 * #GstBufferPoolAcquireParams to the sub call gst_buffer_pool_acquire_buffer.
4686 *
4687 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4688 *
4689 * Since: 1.12
4690 */
4691GstFlowReturn
4692gst_aml_video_decoder_allocate_output_frame_with_params (GstAmlVideoDecoder *
4693 decoder, GstAmlVideoCodecFrame * frame, GstBufferPoolAcquireParams * params)
4694{
4695 GstFlowReturn flow_ret;
4696 GstAmlVideoCodecState *state;
4697 int num_bytes;
4698 gboolean needs_reconfigure = FALSE;
4699
4700 g_return_val_if_fail (decoder->priv->output_state, GST_FLOW_NOT_NEGOTIATED);
4701 g_return_val_if_fail (frame->output_buffer == NULL, GST_FLOW_ERROR);
4702
4703 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4704
4705 state = decoder->priv->output_state;
4706 if (state == NULL) {
4707 g_warning ("Output state should be set before allocating frame");
4708 goto error;
4709 }
4710 num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
4711 if (num_bytes == 0) {
4712 g_warning ("Frame size should not be 0");
4713 goto error;
4714 }
4715
4716 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4717 if (G_UNLIKELY (decoder->priv->output_state_changed || needs_reconfigure)) {
4718 if (!gst_aml_video_decoder_negotiate_unlocked (decoder)) {
4719 gst_pad_mark_reconfigure (decoder->srcpad);
4720 if (GST_PAD_IS_FLUSHING (decoder->srcpad)) {
4721 GST_DEBUG_OBJECT (decoder,
4722 "Failed to negotiate a pool: pad is flushing");
4723 goto flushing;
4724 } else if (!decoder->priv->pool || decoder->priv->output_state_changed) {
4725 GST_DEBUG_OBJECT (decoder,
4726 "Failed to negotiate a pool and no previous pool to reuse");
4727 goto error;
4728 } else {
4729 GST_DEBUG_OBJECT (decoder,
4730 "Failed to negotiate a pool, falling back to the previous pool");
4731 }
4732 }
4733 }
4734
4735 GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
4736
4737 flow_ret = gst_buffer_pool_acquire_buffer (decoder->priv->pool,
4738 &frame->output_buffer, params);
4739
4740 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4741
4742 return flow_ret;
4743
4744flushing:
4745 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4746 return GST_FLOW_FLUSHING;
4747
4748error:
4749 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4750 return GST_FLOW_ERROR;
4751}
4752
4753/**
4754 * gst_aml_video_decoder_get_max_decode_time:
4755 * @decoder: a #GstAmlVideoDecoder
4756 * @frame: a #GstAmlVideoCodecFrame
4757 *
4758 * Determines maximum possible decoding time for @frame that will
4759 * allow it to decode and arrive in time (as determined by QoS events).
4760 * In particular, a negative result means decoding in time is no longer possible
4761 * and should therefore occur as soon/skippy as possible.
4762 *
4763 * Returns: max decoding time.
4764 */
4765GstClockTimeDiff
4766gst_aml_video_decoder_get_max_decode_time (GstAmlVideoDecoder *
4767 decoder, GstAmlVideoCodecFrame * frame)
4768{
4769 GstClockTimeDiff deadline;
4770 GstClockTime earliest_time;
4771
4772 GST_OBJECT_LOCK (decoder);
4773 earliest_time = decoder->priv->earliest_time;
4774 if (GST_CLOCK_TIME_IS_VALID (earliest_time)
4775 && GST_CLOCK_TIME_IS_VALID (frame->deadline))
4776 deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline);
4777 else
4778 deadline = G_MAXINT64;
4779
4780 GST_LOG_OBJECT (decoder, "earliest %" GST_TIME_FORMAT
4781 ", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_STIME_FORMAT,
4782 GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline),
4783 GST_STIME_ARGS (deadline));
4784
4785 GST_OBJECT_UNLOCK (decoder);
4786
4787 return deadline;
4788}
4789
4790/**
4791 * gst_aml_video_decoder_get_qos_proportion:
4792 * @decoder: a #GstAmlVideoDecoder
4793 * current QoS proportion, or %NULL
4794 *
4795 * Returns: The current QoS proportion.
4796 *
4797 * Since: 1.0.3
4798 */
4799gdouble
4800gst_aml_video_decoder_get_qos_proportion (GstAmlVideoDecoder * decoder)
4801{
4802 gdouble proportion;
4803
4804 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (decoder), 1.0);
4805
4806 GST_OBJECT_LOCK (decoder);
4807 proportion = decoder->priv->proportion;
4808 GST_OBJECT_UNLOCK (decoder);
4809
4810 return proportion;
4811}
4812
4813GstFlowReturn
4814_gst_aml_video_decoder_error (GstAmlVideoDecoder * dec, gint weight,
4815 GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
4816 const gchar * function, gint line)
4817{
4818 if (txt)
4819 GST_WARNING_OBJECT (dec, "error: %s", txt);
4820 if (dbg)
4821 GST_WARNING_OBJECT (dec, "error: %s", dbg);
4822 dec->priv->error_count += weight;
4823 dec->priv->discont = TRUE;
4824 if (dec->priv->max_errors >= 0 &&
4825 dec->priv->error_count > dec->priv->max_errors) {
4826 gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR,
4827 domain, code, txt, dbg, file, function, line);
4828 return GST_FLOW_ERROR;
4829 } else {
4830 g_free (txt);
4831 g_free (dbg);
4832 return GST_FLOW_OK;
4833 }
4834}
4835
4836/**
4837 * gst_aml_video_decoder_set_max_errors:
4838 * @dec: a #GstAmlVideoDecoder
4839 * @num: max tolerated errors
4840 *
4841 * Sets numbers of tolerated decoder errors, where a tolerated one is then only
4842 * warned about, but more than tolerated will lead to fatal error. You can set
4843 * -1 for never returning fatal errors. Default is set to
4844 * GST_AML_VIDEO_DECODER_MAX_ERRORS.
4845 *
4846 * The '-1' option was added in 1.4
4847 */
4848void
4849gst_aml_video_decoder_set_max_errors (GstAmlVideoDecoder * dec, gint num)
4850{
4851 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
4852
4853 dec->priv->max_errors = num;
4854}
4855
4856/**
4857 * gst_aml_video_decoder_get_max_errors:
4858 * @dec: a #GstAmlVideoDecoder
4859 *
4860 * Returns: currently configured decoder tolerated error count.
4861 */
4862gint
4863gst_aml_video_decoder_get_max_errors (GstAmlVideoDecoder * dec)
4864{
4865 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (dec), 0);
4866
4867 return dec->priv->max_errors;
4868}
4869
4870/**
4871 * gst_aml_video_decoder_set_needs_format:
4872 * @dec: a #GstAmlVideoDecoder
4873 * @enabled: new state
4874 *
4875 * Configures decoder format needs. If enabled, subclass needs to be
4876 * negotiated with format caps before it can process any data. It will then
4877 * never be handed any data before it has been configured.
4878 * Otherwise, it might be handed data without having been configured and
4879 * is then expected being able to do so either by default
4880 * or based on the input data.
4881 *
4882 * Since: 1.4
4883 */
4884void
4885gst_aml_video_decoder_set_needs_format (GstAmlVideoDecoder * dec, gboolean enabled)
4886{
4887 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
4888
4889 dec->priv->needs_format = enabled;
4890}
4891
4892/**
4893 * gst_aml_video_decoder_get_needs_format:
4894 * @dec: a #GstAmlVideoDecoder
4895 *
4896 * Queries decoder required format handling.
4897 *
4898 * Returns: %TRUE if required format handling is enabled.
4899 *
4900 * Since: 1.4
4901 */
4902gboolean
4903gst_aml_video_decoder_get_needs_format (GstAmlVideoDecoder * dec)
4904{
4905 gboolean result;
4906
4907 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (dec), FALSE);
4908
4909 result = dec->priv->needs_format;
4910
4911 return result;
4912}
4913
4914/**
4915 * gst_aml_video_decoder_set_packetized:
4916 * @decoder: a #GstAmlVideoDecoder
4917 * @packetized: whether the input data should be considered as packetized.
4918 *
4919 * Allows baseclass to consider input data as packetized or not. If the
4920 * input is packetized, then the @parse method will not be called.
4921 */
4922void
4923gst_aml_video_decoder_set_packetized (GstAmlVideoDecoder * decoder,
4924 gboolean packetized)
4925{
4926 decoder->priv->packetized = packetized;
4927}
4928
4929/**
4930 * gst_aml_video_decoder_get_packetized:
4931 * @decoder: a #GstAmlVideoDecoder
4932 *
4933 * Queries whether input data is considered packetized or not by the
4934 * base class.
4935 *
4936 * Returns: TRUE if input data is considered packetized.
4937 */
4938gboolean
4939gst_aml_video_decoder_get_packetized (GstAmlVideoDecoder * decoder)
4940{
4941 return decoder->priv->packetized;
4942}
4943
4944/**
4945 * gst_aml_video_decoder_have_last_subframe:
4946 * @decoder: a #GstAmlVideoDecoder
4947 * @frame: (transfer none): the #GstAmlVideoCodecFrame to update
4948 *
4949 * Indicates that the last subframe has been processed by the decoder
4950 * in @frame. This will release the current frame in video decoder
4951 * allowing to receive new frames from upstream elements. This method
4952 * must be called in the subclass @handle_frame callback.
4953 *
4954 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
4955 *
4956 * Since: 1.20
4957 */
4958GstFlowReturn
4959gst_aml_video_decoder_have_last_subframe (GstAmlVideoDecoder * decoder,
4960 GstAmlVideoCodecFrame * frame)
4961{
4962 g_return_val_if_fail (gst_aml_video_decoder_get_subframe_mode (decoder),
4963 GST_FLOW_OK);
4964 /* unref once from the list */
4965 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
4966 if (decoder->priv->current_frame == frame) {
4967 gst_aml_video_codec_frame_unref (decoder->priv->current_frame);
4968 decoder->priv->current_frame = NULL;
4969 }
4970 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4971
4972 return GST_FLOW_OK;
4973}
4974
4975/**
4976 * gst_aml_video_decoder_set_subframe_mode:
4977 * @decoder: a #GstAmlVideoDecoder
4978 * @subframe_mode: whether the input data should be considered as subframes.
4979 *
4980 * If this is set to TRUE, it informs the base class that the subclass
4981 * can receive the data at a granularity lower than one frame.
4982 *
4983 * Note that in this mode, the subclass has two options. It can either
4984 * require the presence of a GST_VIDEO_BUFFER_FLAG_MARKER to mark the
4985 * end of a frame. Or it can operate in such a way that it will decode
4986 * a single frame at a time. In this second case, every buffer that
4987 * arrives to the element is considered part of the same frame until
4988 * gst_aml_video_decoder_finish_frame() is called.
4989 *
4990 * In either case, the same #GstAmlVideoCodecFrame will be passed to the
4991 * GstAmlVideoDecoderClass:handle_frame vmethod repeatedly with a
4992 * different GstAmlVideoCodecFrame:input_buffer every time until the end of the
4993 * frame has been signaled using either method.
4994 * This method must be called during the decoder subclass @set_format call.
4995 *
4996 * Since: 1.20
4997 */
4998void
4999gst_aml_video_decoder_set_subframe_mode (GstAmlVideoDecoder * decoder,
5000 gboolean subframe_mode)
5001{
5002 decoder->priv->subframe_mode = subframe_mode;
5003}
5004
5005/**
5006 * gst_aml_video_decoder_get_subframe_mode:
5007 * @decoder: a #GstAmlVideoDecoder
5008 *
5009 * Queries whether input data is considered as subframes or not by the
5010 * base class. If FALSE, each input buffer will be considered as a full
5011 * frame.
5012 *
5013 * Returns: TRUE if input data is considered as sub frames.
5014 *
5015 * Since: 1.20
5016 */
5017gboolean
5018gst_aml_video_decoder_get_subframe_mode (GstAmlVideoDecoder * decoder)
5019{
5020 return decoder->priv->subframe_mode;
5021}
5022
5023/**
5024 * gst_aml_video_decoder_get_input_subframe_index:
5025 * @decoder: a #GstAmlVideoDecoder
5026 * @frame: (transfer none): the #GstAmlVideoCodecFrame to update
5027 *
5028 * Queries the number of the last subframe received by
5029 * the decoder baseclass in the @frame.
5030 *
5031 * Returns: the current subframe index received in subframe mode, 1 otherwise.
5032 *
5033 * Since: 1.20
5034 */
5035guint
5036gst_aml_video_decoder_get_input_subframe_index (GstAmlVideoDecoder * decoder,
5037 GstAmlVideoCodecFrame * frame)
5038{
5039 return frame->abidata.ABI.num_subframes;
5040}
5041
5042/**
5043 * gst_aml_video_decoder_get_processed_subframe_index:
5044 * @decoder: a #GstAmlVideoDecoder
5045 * @frame: (transfer none): the #GstAmlVideoCodecFrame to update
5046 *
5047 * Queries the number of subframes in the frame processed by
5048 * the decoder baseclass.
5049 *
5050 * Returns: the current subframe processed received in subframe mode.
5051 *
5052 * Since: 1.20
5053 */
5054guint
5055gst_aml_video_decoder_get_processed_subframe_index (GstAmlVideoDecoder * decoder,
5056 GstAmlVideoCodecFrame * frame)
5057{
5058 return frame->abidata.ABI.subframes_processed;
5059}
5060
5061/**
5062 * gst_aml_video_decoder_set_estimate_rate:
5063 * @dec: a #GstAmlVideoDecoder
5064 * @enabled: whether to enable byte to time conversion
5065 *
5066 * Allows baseclass to perform byte to time estimated conversion.
5067 */
5068void
5069gst_aml_video_decoder_set_estimate_rate (GstAmlVideoDecoder * dec, gboolean enabled)
5070{
5071 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
5072
5073 dec->priv->do_estimate_rate = enabled;
5074}
5075
5076/**
5077 * gst_aml_video_decoder_get_estimate_rate:
5078 * @dec: a #GstAmlVideoDecoder
5079 *
5080 * Returns: currently configured byte to time conversion setting
5081 */
5082gboolean
5083gst_aml_video_decoder_get_estimate_rate (GstAmlVideoDecoder * dec)
5084{
5085 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (dec), 0);
5086
5087 return dec->priv->do_estimate_rate;
5088}
5089
5090/**
5091 * gst_aml_video_decoder_set_latency:
5092 * @decoder: a #GstAmlVideoDecoder
5093 * @min_latency: minimum latency
5094 * @max_latency: maximum latency
5095 *
5096 * Lets #GstAmlVideoDecoder sub-classes tell the baseclass what the decoder latency
5097 * is. If the provided values changed from previously provided ones, this will
5098 * also post a LATENCY message on the bus so the pipeline can reconfigure its
5099 * global latency.
5100 */
5101void
5102gst_aml_video_decoder_set_latency (GstAmlVideoDecoder * decoder,
5103 GstClockTime min_latency, GstClockTime max_latency)
5104{
5105 gboolean post_message = FALSE;
5106 g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
5107 g_return_if_fail (max_latency >= min_latency);
5108
5109 GST_DEBUG_OBJECT (decoder,
5110 "min_latency:%" GST_TIME_FORMAT " max_latency:%" GST_TIME_FORMAT,
5111 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
5112
5113 GST_OBJECT_LOCK (decoder);
5114 if (decoder->priv->min_latency != min_latency) {
5115 decoder->priv->min_latency = min_latency;
5116 post_message = TRUE;
5117 }
5118 if (decoder->priv->max_latency != max_latency) {
5119 decoder->priv->max_latency = max_latency;
5120 post_message = TRUE;
5121 }
5122 if (!decoder->priv->posted_latency_msg) {
5123 decoder->priv->posted_latency_msg = TRUE;
5124 post_message = TRUE;
5125 }
5126 GST_OBJECT_UNLOCK (decoder);
5127
5128 if (post_message)
5129 gst_element_post_message (GST_ELEMENT_CAST (decoder),
5130 gst_message_new_latency (GST_OBJECT_CAST (decoder)));
5131}
5132
5133/**
5134 * gst_aml_video_decoder_get_latency:
5135 * @decoder: a #GstAmlVideoDecoder
5136 * @min_latency: (out) (allow-none): address of variable in which to store the
5137 * configured minimum latency, or %NULL
5138 * @max_latency: (out) (allow-none): address of variable in which to store the
5139 * configured mximum latency, or %NULL
5140 *
5141 * Query the configured decoder latency. Results will be returned via
5142 * @min_latency and @max_latency.
5143 */
5144void
5145gst_aml_video_decoder_get_latency (GstAmlVideoDecoder * decoder,
5146 GstClockTime * min_latency, GstClockTime * max_latency)
5147{
5148 GST_OBJECT_LOCK (decoder);
5149 if (min_latency)
5150 *min_latency = decoder->priv->min_latency;
5151 if (max_latency)
5152 *max_latency = decoder->priv->max_latency;
5153 GST_OBJECT_UNLOCK (decoder);
5154}
5155
5156/**
5157 * gst_aml_video_decoder_merge_tags:
5158 * @decoder: a #GstAmlVideoDecoder
5159 * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
5160 * previously-set tags
5161 * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
5162 *
5163 * Sets the audio decoder tags and how they should be merged with any
5164 * upstream stream tags. This will override any tags previously-set
5165 * with gst_audio_decoder_merge_tags().
5166 *
5167 * Note that this is provided for convenience, and the subclass is
5168 * not required to use this and can still do tag handling on its own.
5169 *
5170 * MT safe.
5171 */
5172void
5173gst_aml_video_decoder_merge_tags (GstAmlVideoDecoder * decoder,
5174 const GstTagList * tags, GstTagMergeMode mode)
5175{
5176 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (decoder));
5177 g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
5178 g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
5179
5180 GST_AML_VIDEO_DECODER_STREAM_LOCK (decoder);
5181 if (decoder->priv->tags != tags) {
5182 if (decoder->priv->tags) {
5183 gst_tag_list_unref (decoder->priv->tags);
5184 decoder->priv->tags = NULL;
5185 decoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
5186 }
5187 if (tags) {
5188 decoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
5189 decoder->priv->tags_merge_mode = mode;
5190 }
5191
5192 GST_DEBUG_OBJECT (decoder, "set decoder tags to %" GST_PTR_FORMAT, tags);
5193 decoder->priv->tags_changed = TRUE;
5194 }
5195 GST_AML_VIDEO_DECODER_STREAM_UNLOCK (decoder);
5196}
5197
5198/**
5199 * gst_aml_video_decoder_get_buffer_pool:
5200 * @decoder: a #GstAmlVideoDecoder
5201 *
5202 * Returns: (transfer full): the instance of the #GstBufferPool used
5203 * by the decoder; free it after use it
5204 */
5205GstBufferPool *
5206gst_aml_video_decoder_get_buffer_pool (GstAmlVideoDecoder * decoder)
5207{
5208 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (decoder), NULL);
5209
5210 if (decoder->priv->pool)
5211 return gst_object_ref (decoder->priv->pool);
5212
5213 return NULL;
5214}
5215
5216/**
5217 * gst_aml_video_decoder_get_allocator:
5218 * @decoder: a #GstAmlVideoDecoder
5219 * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
5220 * used
5221 * @params: (out) (allow-none) (transfer full): the
5222 * #GstAllocationParams of @allocator
5223 *
5224 * Lets #GstAmlVideoDecoder sub-classes to know the memory @allocator
5225 * used by the base class and its @params.
5226 *
5227 * Unref the @allocator after use it.
5228 */
5229void
5230gst_aml_video_decoder_get_allocator (GstAmlVideoDecoder * decoder,
5231 GstAllocator ** allocator, GstAllocationParams * params)
5232{
5233 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (decoder));
5234
5235 if (allocator)
5236 *allocator = decoder->priv->allocator ?
5237 gst_object_ref (decoder->priv->allocator) : NULL;
5238
5239 if (params)
5240 *params = decoder->priv->params;
5241}
5242
5243/**
5244 * gst_aml_video_decoder_set_use_default_pad_acceptcaps:
5245 * @decoder: a #GstAmlVideoDecoder
5246 * @use: if the default pad accept-caps query handling should be used
5247 *
5248 * Lets #GstAmlVideoDecoder sub-classes decide if they want the sink pad
5249 * to use the default pad query handler to reply to accept-caps queries.
5250 *
5251 * By setting this to true it is possible to further customize the default
5252 * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
5253 * %GST_PAD_SET_ACCEPT_TEMPLATE
5254 *
5255 * Since: 1.6
5256 */
5257void
5258gst_aml_video_decoder_set_use_default_pad_acceptcaps (GstAmlVideoDecoder * decoder,
5259 gboolean use)
5260{
5261 decoder->priv->use_default_pad_acceptcaps = use;
5262}
5263
5264static void
5265gst_aml_video_decoder_request_sync_point_internal (GstAmlVideoDecoder * dec,
5266 GstClockTime deadline, GstAmlVideoDecoderRequestSyncPointFlags flags)
5267{
5268 GstEvent *fku = NULL;
5269 GstAmlVideoDecoderPrivate *priv;
5270
5271 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
5272
5273 priv = dec->priv;
5274
5275 GST_OBJECT_LOCK (dec);
5276
5277 /* Check if we're allowed to send a new force-keyunit event.
5278 * frame->deadline is set to the running time of the PTS. */
5279 if (priv->min_force_key_unit_interval == 0 ||
5280 deadline == GST_CLOCK_TIME_NONE ||
5281 (priv->min_force_key_unit_interval != GST_CLOCK_TIME_NONE &&
5282 (priv->last_force_key_unit_time == GST_CLOCK_TIME_NONE
5283 || (priv->last_force_key_unit_time +
5284 priv->min_force_key_unit_interval <= deadline)))) {
5285 GST_DEBUG_OBJECT (dec,
5286 "Requesting a new key-unit for frame with deadline %" GST_TIME_FORMAT,
5287 GST_TIME_ARGS (deadline));
5288 fku =
5289 gst_video_event_new_upstream_force_key_unit (GST_CLOCK_TIME_NONE, FALSE,
5290 0);
5291 priv->last_force_key_unit_time = deadline;
5292 } else {
5293 GST_DEBUG_OBJECT (dec,
5294 "Can't request a new key-unit for frame with deadline %"
5295 GST_TIME_FORMAT, GST_TIME_ARGS (deadline));
5296 }
5297 priv->request_sync_point_flags |= flags;
5298 /* We don't know yet the frame number of the sync point so set it to a
5299 * frame number higher than any allowed frame number */
5300 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_PENDING;
5301 GST_OBJECT_UNLOCK (dec);
5302
5303 if (fku)
5304 gst_pad_push_event (dec->sinkpad, fku);
5305}
5306
5307/**
5308 * gst_aml_video_decoder_request_sync_point:
5309 * @dec: a #GstAmlVideoDecoder
5310 * @frame: a #GstAmlVideoCodecFrame
5311 * @flags: #GstAmlVideoDecoderRequestSyncPointFlags
5312 *
5313 * Allows the #GstAmlVideoDecoder subclass to request from the base class that
5314 * a new sync should be requested from upstream, and that @frame was the frame
5315 * when the subclass noticed that a new sync point is required. A reason for
5316 * the subclass to do this could be missing reference frames, for example.
5317 *
5318 * The base class will then request a new sync point from upstream as long as
5319 * the time that passed since the last one is exceeding
5320 * #GstAmlVideoDecoder:min-force-key-unit-interval.
5321 *
5322 * The subclass can signal via @flags how the frames until the next sync point
5323 * should be handled:
5324 *
5325 * * If %GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT is selected then
5326 * all following input frames until the next sync point are discarded.
5327 * This can be useful if the lack of a sync point will prevent all further
5328 * decoding and the decoder implementation is not very robust in handling
5329 * missing references frames.
5330 * * If %GST_AML_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT is selected
5331 * then all output frames following @frame are marked as corrupted via
5332 * %GST_BUFFER_FLAG_CORRUPTED. Corrupted frames can be automatically
5333 * dropped by the base class, see #GstAmlVideoDecoder:discard-corrupted-frames.
5334 * Subclasses can manually mark frames as corrupted via %GST_AML_VIDEO_CODEC_FRAME_FLAG_CORRUPTED
5335 * before calling gst_aml_video_decoder_finish_frame().
5336 *
5337 * Since: 1.20
5338 */
5339void
5340gst_aml_video_decoder_request_sync_point (GstAmlVideoDecoder * dec,
5341 GstAmlVideoCodecFrame * frame, GstAmlVideoDecoderRequestSyncPointFlags flags)
5342{
5343 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
5344 g_return_if_fail (frame != NULL);
5345
5346 gst_aml_video_decoder_request_sync_point_internal (dec, frame->deadline, flags);
5347}
5348
5349/**
5350 * gst_aml_video_decoder_set_needs_sync_point:
5351 * @dec: a #GstAmlVideoDecoder
5352 * @enabled: new state
5353 *
5354 * Configures whether the decoder requires a sync point before it starts
5355 * outputting data in the beginning. If enabled, the base class will discard
5356 * all non-sync point frames in the beginning and after a flush and does not
5357 * pass it to the subclass.
5358 *
5359 * If the first frame is not a sync point, the base class will request a sync
5360 * point via the force-key-unit event.
5361 *
5362 * Since: 1.20
5363 */
5364void
5365gst_aml_video_decoder_set_needs_sync_point (GstAmlVideoDecoder * dec, gboolean enabled)
5366{
5367 g_return_if_fail (GST_IS_AML_VIDEO_DECODER (dec));
5368
5369 dec->priv->needs_sync_point = enabled;
5370}
5371
5372/**
5373 * gst_aml_video_decoder_get_needs_sync_point:
5374 * @dec: a #GstAmlVideoDecoder
5375 *
5376 * Queries if the decoder requires a sync point before it starts outputting
5377 * data in the beginning.
5378 *
5379 * Returns: %TRUE if a sync point is required in the beginning.
5380 *
5381 * Since: 1.20
5382 */
5383gboolean
5384gst_aml_video_decoder_get_needs_sync_point (GstAmlVideoDecoder * dec)
5385{
5386 gboolean result;
5387
5388 g_return_val_if_fail (GST_IS_AML_VIDEO_DECODER (dec), FALSE);
5389
5390 result = dec->priv->needs_sync_point;
5391
5392 return result;
5393}