/src/gstreamer/subprojects/gst-plugins-base/gst-libs/gst/audio/gstaudiodecoder.c
Line | Count | Source |
1 | | /* GStreamer |
2 | | * Copyright (C) 2009 Igalia S.L. |
3 | | * Author: Iago Toral Quiroga <itoral@igalia.com> |
4 | | * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>. |
5 | | * Copyright (C) 2011 Nokia Corporation. All rights reserved. |
6 | | * Contact: Stefan Kost <stefan.kost@nokia.com> |
7 | | * |
8 | | * This library is free software; you can redistribute it and/or |
9 | | * modify it under the terms of the GNU Library General Public |
10 | | * License as published by the Free Software Foundation; either |
11 | | * version 2 of the License, or (at your option) any later version. |
12 | | * |
13 | | * This library is distributed in the hope that it will be useful, |
14 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | | * Library General Public License for more details. |
17 | | * |
18 | | * You should have received a copy of the GNU Library General Public |
19 | | * License along with this library; if not, write to the |
20 | | * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, |
21 | | * Boston, MA 02110-1301, USA. |
22 | | */ |
23 | | |
24 | | /** |
25 | | * SECTION:gstaudiodecoder |
26 | | * @title: GstAudioDecoder |
27 | | * @short_description: Base class for audio decoders |
28 | | * @see_also: #GstBaseTransform |
29 | | * |
30 | | * This base class is for audio decoders turning encoded data into |
31 | | * raw audio samples. |
32 | | * |
33 | | * GstAudioDecoder and subclass should cooperate as follows. |
34 | | * |
35 | | * ## Configuration |
36 | | * |
37 | | * * Initially, GstAudioDecoder calls @start when the decoder element |
38 | | * is activated, which allows subclass to perform any global setup. |
39 | | * Base class (context) parameters can already be set according to subclass |
40 | | * capabilities (or possibly upon receive more information in subsequent |
41 | | * @set_format). |
42 | | * * GstAudioDecoder calls @set_format to inform subclass of the format |
43 | | * of input audio data that it is about to receive. |
44 | | * While unlikely, it might be called more than once, if changing input |
45 | | * parameters require reconfiguration. |
46 | | * * GstAudioDecoder calls @stop at end of all processing. |
47 | | * |
48 | | * As of configuration stage, and throughout processing, GstAudioDecoder |
49 | | * provides various (context) parameters, e.g. describing the format of |
50 | | * output audio data (valid when output caps have been set) or current parsing state. |
51 | | * Conversely, subclass can and should configure context to inform |
52 | | * base class of its expectation w.r.t. buffer handling. |
53 | | * |
54 | | * ## Data processing |
55 | | * * Base class gathers input data, and optionally allows subclass |
56 | | * to parse this into subsequently manageable (as defined by subclass) |
57 | | * chunks. Such chunks are subsequently referred to as 'frames', |
58 | | * though they may or may not correspond to 1 (or more) audio format frame. |
59 | | * * Input frame is provided to subclass' @handle_frame. |
60 | | * * If codec processing results in decoded data, subclass should call |
61 | | * @gst_audio_decoder_finish_frame to have decoded data pushed |
62 | | * downstream. |
63 | | * * Just prior to actually pushing a buffer downstream, |
64 | | * it is passed to @pre_push. Subclass should either use this callback |
65 | | * to arrange for additional downstream pushing or otherwise ensure such |
66 | | * custom pushing occurs after at least a method call has finished since |
67 | | * setting src pad caps. |
68 | | * * During the parsing process GstAudioDecoderClass will handle both |
69 | | * srcpad and sinkpad events. Sink events will be passed to subclass |
70 | | * if @event callback has been provided. |
71 | | * |
72 | | * ## Shutdown phase |
73 | | * |
74 | | * * GstAudioDecoder class calls @stop to inform the subclass that data |
75 | | * parsing will be stopped. |
76 | | * |
77 | | * Subclass is responsible for providing pad template caps for |
78 | | * source and sink pads. The pads need to be named "sink" and "src". It also |
79 | | * needs to set the fixed caps on srcpad, when the format is ensured. This |
80 | | * is typically when base class calls subclass' @set_format function, though |
81 | | * it might be delayed until calling @gst_audio_decoder_finish_frame. |
82 | | * |
83 | | * In summary, above process should have subclass concentrating on |
84 | | * codec data processing while leaving other matters to base class, |
85 | | * such as most notably timestamp handling. While it may exert more control |
86 | | * in this area (see e.g. @pre_push), it is very much not recommended. |
87 | | * |
88 | | * In particular, base class will try to arrange for perfect output timestamps |
89 | | * as much as possible while tracking upstream timestamps. |
90 | | * To this end, if deviation between the next ideal expected perfect timestamp |
91 | | * and upstream exceeds #GstAudioDecoder:tolerance, then resync to upstream |
92 | | * occurs (which would happen always if the tolerance mechanism is disabled). |
93 | | * |
94 | | * In non-live pipelines, baseclass can also (configurably) arrange for |
95 | | * output buffer aggregation which may help to redue large(r) numbers of |
96 | | * small(er) buffers being pushed and processed downstream. Note that this |
97 | | * feature is only available if the buffer layout is interleaved. For planar |
98 | | * buffers, the decoder implementation is fully responsible for the output |
99 | | * buffer size. |
100 | | * |
101 | | * On the other hand, it should be noted that baseclass only provides limited |
102 | | * seeking support (upon explicit subclass request), as full-fledged support |
103 | | * should rather be left to upstream demuxer, parser or alike. This simple |
104 | | * approach caters for seeking and duration reporting using estimated input |
105 | | * bitrates. |
106 | | * |
107 | | * Things that subclass need to take care of: |
108 | | * |
109 | | * * Provide pad templates |
110 | | * * Set source pad caps when appropriate |
111 | | * * Set user-configurable properties to sane defaults for format and |
112 | | * implementing codec at hand, and convey some subclass capabilities and |
113 | | * expectations in context. |
114 | | * |
115 | | * * Accept data in @handle_frame and provide encoded results to |
116 | | * @gst_audio_decoder_finish_frame. If it is prepared to perform |
117 | | * PLC, it should also accept NULL data in @handle_frame and provide for |
118 | | * data for indicated duration. |
119 | | * |
120 | | */ |
121 | | |
122 | | #ifdef HAVE_CONFIG_H |
123 | | #include "config.h" |
124 | | #endif |
125 | | |
126 | | #include "gstaudiodecoder.h" |
127 | | #include "gstaudioutilsprivate.h" |
128 | | #include <gst/pbutils/descriptions.h> |
129 | | |
130 | | #include <string.h> |
131 | | |
132 | | GST_DEBUG_CATEGORY (audiodecoder_debug); |
133 | | #define GST_CAT_DEFAULT audiodecoder_debug |
134 | | |
135 | | enum |
136 | | { |
137 | | LAST_SIGNAL |
138 | | }; |
139 | | |
140 | | enum |
141 | | { |
142 | | PROP_0, |
143 | | PROP_LATENCY, |
144 | | PROP_TOLERANCE, |
145 | | PROP_PLC, |
146 | | PROP_MAX_ERRORS |
147 | | }; |
148 | | |
149 | 88 | #define DEFAULT_LATENCY 0 |
150 | 88 | #define DEFAULT_TOLERANCE 0 |
151 | 88 | #define DEFAULT_PLC FALSE |
152 | 86 | #define DEFAULT_DRAINABLE TRUE |
153 | 86 | #define DEFAULT_NEEDS_FORMAT FALSE |
154 | 2 | #define DEFAULT_MAX_ERRORS GST_AUDIO_DECODER_MAX_ERRORS |
155 | | |
156 | | typedef struct _GstAudioDecoderContext |
157 | | { |
158 | | /* last negotiated input caps */ |
159 | | GstCaps *input_caps; |
160 | | |
161 | | /* (output) audio format */ |
162 | | GstAudioInfo info; |
163 | | GstCaps *caps; |
164 | | gboolean output_format_changed; |
165 | | |
166 | | /* parsing state */ |
167 | | gboolean eos; |
168 | | gboolean sync; |
169 | | |
170 | | gboolean had_output_data; |
171 | | gboolean had_input_data; |
172 | | |
173 | | /* misc */ |
174 | | gint delay; |
175 | | |
176 | | /* output */ |
177 | | gboolean do_plc; |
178 | | gboolean do_estimate_rate; |
179 | | GstCaps *allocation_caps; |
180 | | /* MT-protected (with LOCK) */ |
181 | | GstClockTime min_latency; |
182 | | GstClockTime max_latency; |
183 | | /* Tracks whether the latency message was posted at least once */ |
184 | | gboolean posted_latency_msg; |
185 | | |
186 | | GstAllocator *allocator; |
187 | | GstAllocationParams params; |
188 | | } GstAudioDecoderContext; |
189 | | |
190 | | struct _GstAudioDecoderPrivate |
191 | | { |
192 | | /* activation status */ |
193 | | gboolean active; |
194 | | |
195 | | /* input base/first ts as basis for output ts */ |
196 | | GstClockTime base_ts; |
197 | | /* input samples processed and sent downstream so far (w.r.t. base_ts) */ |
198 | | guint64 samples; |
199 | | |
200 | | /* collected input data */ |
201 | | GstAdapter *adapter; |
202 | | /* tracking input ts for changes */ |
203 | | GstClockTime prev_ts; |
204 | | guint64 prev_distance; |
205 | | /* frames obtained from input */ |
206 | | GQueue frames; |
207 | | /* collected output data */ |
208 | | GstAdapter *adapter_out; |
209 | | /* ts and duration for output data collected above */ |
210 | | GstClockTime out_ts, out_dur; |
211 | | /* mark outgoing discont */ |
212 | | gboolean discont; |
213 | | |
214 | | /* subclass gave all it could already */ |
215 | | gboolean drained; |
216 | | /* subclass currently being forcibly drained */ |
217 | | gboolean force; |
218 | | /* input_segment are output_segment identical */ |
219 | | gboolean in_out_segment_sync; |
220 | | /* TRUE if we have an active set of instant rate flags */ |
221 | | gboolean decode_flags_override; |
222 | | GstSegmentFlags decode_flags; |
223 | | |
224 | | /* expecting the buffer with DISCONT flag */ |
225 | | gboolean expecting_discont_buf; |
226 | | |
227 | | /* number of samples pushed out via _finish_subframe(), resets on _finish_frame() */ |
228 | | guint subframe_samples; |
229 | | |
230 | | /* input bps estimatation */ |
231 | | /* global in bytes seen */ |
232 | | guint64 bytes_in; |
233 | | /* global samples sent out */ |
234 | | guint64 samples_out; |
235 | | /* bytes flushed during parsing */ |
236 | | guint sync_flush; |
237 | | /* error count */ |
238 | | gint error_count; |
239 | | /* max errors */ |
240 | | gint max_errors; |
241 | | |
242 | | /* upstream stream tags (global tags are passed through as-is) */ |
243 | | GstTagList *upstream_tags; |
244 | | |
245 | | /* subclass tags */ |
246 | | GstTagList *taglist; /* FIXME: rename to decoder_tags */ |
247 | | GstTagMergeMode decoder_tags_merge_mode; |
248 | | |
249 | | gboolean taglist_changed; /* FIXME: rename to tags_changed */ |
250 | | |
251 | | /* whether circumstances allow output aggregation */ |
252 | | gint agg; |
253 | | |
254 | | /* reverse playback queues */ |
255 | | /* collect input */ |
256 | | GList *gather; |
257 | | /* to-be-decoded */ |
258 | | GList *decode; |
259 | | /* reversed output */ |
260 | | GList *queued; |
261 | | |
262 | | /* context storage */ |
263 | | GstAudioDecoderContext ctx; |
264 | | |
265 | | /* properties */ |
266 | | GstClockTime latency; |
267 | | GstClockTime tolerance; |
268 | | gboolean plc; |
269 | | gboolean drainable; |
270 | | gboolean needs_format; |
271 | | |
272 | | /* pending serialized sink events, will be sent from finish_frame() */ |
273 | | GList *pending_events; |
274 | | |
275 | | /* flags */ |
276 | | gboolean use_default_pad_acceptcaps; |
277 | | }; |
278 | | |
279 | | /* cached quark to avoid contention on the global quark table lock */ |
280 | | #define META_TAG_AUDIO meta_tag_audio_quark |
281 | | static GQuark meta_tag_audio_quark; |
282 | | |
283 | | static void gst_audio_decoder_finalize (GObject * object); |
284 | | static void gst_audio_decoder_set_property (GObject * object, |
285 | | guint prop_id, const GValue * value, GParamSpec * pspec); |
286 | | static void gst_audio_decoder_get_property (GObject * object, |
287 | | guint prop_id, GValue * value, GParamSpec * pspec); |
288 | | |
289 | | static void gst_audio_decoder_clear_queues (GstAudioDecoder * dec); |
290 | | static GstFlowReturn gst_audio_decoder_chain_reverse (GstAudioDecoder * |
291 | | dec, GstBuffer * buf); |
292 | | |
293 | | static GstStateChangeReturn gst_audio_decoder_change_state (GstElement * |
294 | | element, GstStateChange transition); |
295 | | static gboolean gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec, |
296 | | GstEvent * event); |
297 | | static gboolean gst_audio_decoder_src_eventfunc (GstAudioDecoder * dec, |
298 | | GstEvent * event); |
299 | | static gboolean gst_audio_decoder_sink_event (GstPad * pad, GstObject * parent, |
300 | | GstEvent * event); |
301 | | static gboolean gst_audio_decoder_src_event (GstPad * pad, GstObject * parent, |
302 | | GstEvent * event); |
303 | | static gboolean gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec, |
304 | | GstCaps * caps); |
305 | | static GstFlowReturn gst_audio_decoder_chain (GstPad * pad, GstObject * parent, |
306 | | GstBuffer * buf); |
307 | | static gboolean gst_audio_decoder_src_query (GstPad * pad, GstObject * parent, |
308 | | GstQuery * query); |
309 | | static gboolean gst_audio_decoder_sink_query (GstPad * pad, GstObject * parent, |
310 | | GstQuery * query); |
311 | | static void gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full); |
312 | | |
313 | | static gboolean gst_audio_decoder_decide_allocation_default (GstAudioDecoder * |
314 | | dec, GstQuery * query); |
315 | | static gboolean gst_audio_decoder_propose_allocation_default (GstAudioDecoder * |
316 | | dec, GstQuery * query); |
317 | | static gboolean gst_audio_decoder_negotiate_default (GstAudioDecoder * dec); |
318 | | static gboolean gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec); |
319 | | static gboolean gst_audio_decoder_handle_gap (GstAudioDecoder * dec, |
320 | | GstEvent * event); |
321 | | static gboolean gst_audio_decoder_sink_query_default (GstAudioDecoder * dec, |
322 | | GstQuery * query); |
323 | | static gboolean gst_audio_decoder_src_query_default (GstAudioDecoder * dec, |
324 | | GstQuery * query); |
325 | | |
326 | | static gboolean gst_audio_decoder_transform_meta_default (GstAudioDecoder * |
327 | | decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf); |
328 | | |
329 | | static GstFlowReturn |
330 | | gst_audio_decoder_finish_frame_or_subframe (GstAudioDecoder * dec, |
331 | | GstBuffer * buf, gint frames); |
332 | | |
333 | | static GstElementClass *parent_class = NULL; |
334 | | static gint private_offset = 0; |
335 | | |
336 | | static void gst_audio_decoder_class_init (GstAudioDecoderClass * klass); |
337 | | static void gst_audio_decoder_init (GstAudioDecoder * dec, |
338 | | GstAudioDecoderClass * klass); |
339 | | |
340 | | GType |
341 | | gst_audio_decoder_get_type (void) |
342 | 767 | { |
343 | 767 | static gsize audio_decoder_type = 0; |
344 | | |
345 | 767 | if (g_once_init_enter (&audio_decoder_type)) { |
346 | 2 | GType _type; |
347 | 2 | static const GTypeInfo audio_decoder_info = { |
348 | 2 | sizeof (GstAudioDecoderClass), |
349 | 2 | NULL, |
350 | 2 | NULL, |
351 | 2 | (GClassInitFunc) gst_audio_decoder_class_init, |
352 | 2 | NULL, |
353 | 2 | NULL, |
354 | 2 | sizeof (GstAudioDecoder), |
355 | 2 | 0, |
356 | 2 | (GInstanceInitFunc) gst_audio_decoder_init, |
357 | 2 | }; |
358 | | |
359 | 2 | _type = g_type_register_static (GST_TYPE_ELEMENT, |
360 | 2 | "GstAudioDecoder", &audio_decoder_info, G_TYPE_FLAG_ABSTRACT); |
361 | | |
362 | 2 | private_offset = |
363 | 2 | g_type_add_instance_private (_type, sizeof (GstAudioDecoderPrivate)); |
364 | | |
365 | 2 | g_once_init_leave (&audio_decoder_type, _type); |
366 | 2 | } |
367 | 767 | return audio_decoder_type; |
368 | 767 | } |
369 | | |
370 | | static inline GstAudioDecoderPrivate * |
371 | | gst_audio_decoder_get_instance_private (GstAudioDecoder * self) |
372 | 86 | { |
373 | 86 | return (G_STRUCT_MEMBER_P (self, private_offset)); |
374 | 86 | } |
375 | | |
376 | | static void |
377 | | gst_audio_decoder_class_init (GstAudioDecoderClass * klass) |
378 | 2 | { |
379 | 2 | GObjectClass *gobject_class; |
380 | 2 | GstElementClass *element_class; |
381 | 2 | GstAudioDecoderClass *audiodecoder_class; |
382 | | |
383 | 2 | gobject_class = G_OBJECT_CLASS (klass); |
384 | 2 | element_class = GST_ELEMENT_CLASS (klass); |
385 | 2 | audiodecoder_class = GST_AUDIO_DECODER_CLASS (klass); |
386 | | |
387 | 2 | parent_class = g_type_class_peek_parent (klass); |
388 | | |
389 | 2 | if (private_offset != 0) |
390 | 2 | g_type_class_adjust_private_offset (klass, &private_offset); |
391 | | |
392 | 2 | GST_DEBUG_CATEGORY_INIT (audiodecoder_debug, "audiodecoder", 0, |
393 | 2 | "audio decoder base class"); |
394 | | |
395 | 2 | gobject_class->set_property = gst_audio_decoder_set_property; |
396 | 2 | gobject_class->get_property = gst_audio_decoder_get_property; |
397 | 2 | gobject_class->finalize = gst_audio_decoder_finalize; |
398 | | |
399 | 2 | element_class->change_state = |
400 | 2 | GST_DEBUG_FUNCPTR (gst_audio_decoder_change_state); |
401 | | |
402 | | /* Properties */ |
403 | 2 | g_object_class_install_property (gobject_class, PROP_LATENCY, |
404 | 2 | g_param_spec_int64 ("min-latency", "Minimum Latency", |
405 | 2 | "Aggregate output data to a minimum of latency time (ns)", |
406 | 2 | 0, G_MAXINT64, DEFAULT_LATENCY, |
407 | 2 | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); |
408 | | |
409 | 2 | g_object_class_install_property (gobject_class, PROP_TOLERANCE, |
410 | 2 | g_param_spec_int64 ("tolerance", "Tolerance", |
411 | 2 | "Perfect ts while timestamp jitter/imperfection within tolerance (ns)", |
412 | 2 | 0, G_MAXINT64, DEFAULT_TOLERANCE, |
413 | 2 | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); |
414 | | |
415 | 2 | g_object_class_install_property (gobject_class, PROP_PLC, |
416 | 2 | g_param_spec_boolean ("plc", "Packet Loss Concealment", |
417 | 2 | "Perform packet loss concealment (if supported)", |
418 | 2 | DEFAULT_PLC, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); |
419 | | |
420 | | /** |
421 | | * GstAudioDecoder:max-errors: |
422 | | * |
423 | | * Maximum number of tolerated consecutive decode errors. See |
424 | | * gst_audio_decoder_set_max_errors() for more details. |
425 | | * |
426 | | * Since: 1.18 |
427 | | */ |
428 | 2 | g_object_class_install_property (gobject_class, PROP_MAX_ERRORS, |
429 | 2 | g_param_spec_int ("max-errors", "Max errors", |
430 | 2 | "Max consecutive decoder errors before returning flow error", |
431 | 2 | -1, G_MAXINT, DEFAULT_MAX_ERRORS, |
432 | 2 | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); |
433 | | |
434 | 2 | audiodecoder_class->sink_event = |
435 | 2 | GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_eventfunc); |
436 | 2 | audiodecoder_class->src_event = |
437 | 2 | GST_DEBUG_FUNCPTR (gst_audio_decoder_src_eventfunc); |
438 | 2 | audiodecoder_class->propose_allocation = |
439 | 2 | GST_DEBUG_FUNCPTR (gst_audio_decoder_propose_allocation_default); |
440 | 2 | audiodecoder_class->decide_allocation = |
441 | 2 | GST_DEBUG_FUNCPTR (gst_audio_decoder_decide_allocation_default); |
442 | 2 | audiodecoder_class->negotiate = |
443 | 2 | GST_DEBUG_FUNCPTR (gst_audio_decoder_negotiate_default); |
444 | 2 | audiodecoder_class->sink_query = |
445 | 2 | GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query_default); |
446 | 2 | audiodecoder_class->src_query = |
447 | 2 | GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query_default); |
448 | 2 | audiodecoder_class->transform_meta = |
449 | 2 | GST_DEBUG_FUNCPTR (gst_audio_decoder_transform_meta_default); |
450 | | |
451 | 2 | meta_tag_audio_quark = g_quark_from_static_string (GST_META_TAG_AUDIO_STR); |
452 | 2 | } |
453 | | |
454 | | static void |
455 | | gst_audio_decoder_init (GstAudioDecoder * dec, GstAudioDecoderClass * klass) |
456 | 86 | { |
457 | 86 | GstPadTemplate *pad_template; |
458 | | |
459 | 86 | GST_DEBUG_OBJECT (dec, "gst_audio_decoder_init"); |
460 | | |
461 | 86 | dec->priv = gst_audio_decoder_get_instance_private (dec); |
462 | | |
463 | | /* Setup sink pad */ |
464 | 86 | pad_template = |
465 | 86 | gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink"); |
466 | 86 | g_return_if_fail (pad_template != NULL); |
467 | | |
468 | 86 | dec->sinkpad = gst_pad_new_from_template (pad_template, "sink"); |
469 | 86 | gst_pad_set_event_function (dec->sinkpad, |
470 | 86 | GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_event)); |
471 | 86 | gst_pad_set_chain_function (dec->sinkpad, |
472 | 86 | GST_DEBUG_FUNCPTR (gst_audio_decoder_chain)); |
473 | 86 | gst_pad_set_query_function (dec->sinkpad, |
474 | 86 | GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query)); |
475 | 86 | gst_element_add_pad (GST_ELEMENT (dec), dec->sinkpad); |
476 | 86 | GST_DEBUG_OBJECT (dec, "sinkpad created"); |
477 | | |
478 | | /* Setup source pad */ |
479 | 86 | pad_template = |
480 | 86 | gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src"); |
481 | 86 | g_return_if_fail (pad_template != NULL); |
482 | | |
483 | 86 | dec->srcpad = gst_pad_new_from_template (pad_template, "src"); |
484 | 86 | gst_pad_set_event_function (dec->srcpad, |
485 | 86 | GST_DEBUG_FUNCPTR (gst_audio_decoder_src_event)); |
486 | 86 | gst_pad_set_query_function (dec->srcpad, |
487 | 86 | GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query)); |
488 | 86 | gst_element_add_pad (GST_ELEMENT (dec), dec->srcpad); |
489 | 86 | GST_DEBUG_OBJECT (dec, "srcpad created"); |
490 | | |
491 | 86 | dec->priv->adapter = gst_adapter_new (); |
492 | 86 | dec->priv->adapter_out = gst_adapter_new (); |
493 | 86 | g_queue_init (&dec->priv->frames); |
494 | | |
495 | 86 | g_rec_mutex_init (&dec->stream_lock); |
496 | | |
497 | | /* property default */ |
498 | 86 | dec->priv->latency = DEFAULT_LATENCY; |
499 | 86 | dec->priv->tolerance = DEFAULT_TOLERANCE; |
500 | 86 | dec->priv->plc = DEFAULT_PLC; |
501 | 86 | dec->priv->drainable = DEFAULT_DRAINABLE; |
502 | 86 | dec->priv->needs_format = DEFAULT_NEEDS_FORMAT; |
503 | 86 | dec->priv->max_errors = GST_AUDIO_DECODER_MAX_ERRORS; |
504 | | |
505 | | /* init state */ |
506 | 86 | dec->priv->ctx.min_latency = 0; |
507 | 86 | dec->priv->ctx.max_latency = 0; |
508 | 86 | gst_audio_decoder_reset (dec, TRUE); |
509 | 86 | GST_DEBUG_OBJECT (dec, "init ok"); |
510 | 86 | } |
511 | | |
512 | | static void |
513 | | gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full) |
514 | 811 | { |
515 | 811 | GST_DEBUG_OBJECT (dec, "gst_audio_decoder_reset"); |
516 | | |
517 | 811 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
518 | | |
519 | 811 | if (full) { |
520 | 258 | dec->priv->active = FALSE; |
521 | 258 | GST_OBJECT_LOCK (dec); |
522 | 258 | dec->priv->bytes_in = 0; |
523 | 258 | dec->priv->samples_out = 0; |
524 | 258 | GST_OBJECT_UNLOCK (dec); |
525 | 258 | dec->priv->agg = -1; |
526 | 258 | dec->priv->error_count = 0; |
527 | 258 | gst_audio_decoder_clear_queues (dec); |
528 | | |
529 | 258 | if (dec->priv->taglist) { |
530 | 72 | gst_tag_list_unref (dec->priv->taglist); |
531 | 72 | dec->priv->taglist = NULL; |
532 | 72 | } |
533 | 258 | dec->priv->decoder_tags_merge_mode = GST_TAG_MERGE_KEEP_ALL; |
534 | 258 | if (dec->priv->upstream_tags) { |
535 | 72 | gst_tag_list_unref (dec->priv->upstream_tags); |
536 | 72 | dec->priv->upstream_tags = NULL; |
537 | 72 | } |
538 | 258 | dec->priv->taglist_changed = FALSE; |
539 | | |
540 | 258 | gst_segment_init (&dec->input_segment, GST_FORMAT_TIME); |
541 | 258 | gst_segment_init (&dec->output_segment, GST_FORMAT_TIME); |
542 | 258 | dec->priv->in_out_segment_sync = TRUE; |
543 | | |
544 | 258 | g_list_foreach (dec->priv->pending_events, (GFunc) gst_event_unref, NULL); |
545 | 258 | g_list_free (dec->priv->pending_events); |
546 | 258 | dec->priv->pending_events = NULL; |
547 | | |
548 | 258 | if (dec->priv->ctx.allocator) |
549 | 0 | gst_object_unref (dec->priv->ctx.allocator); |
550 | | |
551 | 258 | GST_OBJECT_LOCK (dec); |
552 | 258 | dec->priv->decode_flags_override = FALSE; |
553 | 258 | gst_caps_replace (&dec->priv->ctx.input_caps, NULL); |
554 | 258 | gst_caps_replace (&dec->priv->ctx.caps, NULL); |
555 | 258 | gst_caps_replace (&dec->priv->ctx.allocation_caps, NULL); |
556 | | |
557 | 258 | memset (&dec->priv->ctx, 0, sizeof (dec->priv->ctx)); |
558 | | |
559 | 258 | gst_audio_info_init (&dec->priv->ctx.info); |
560 | 258 | dec->priv->ctx.posted_latency_msg = FALSE; |
561 | 258 | GST_OBJECT_UNLOCK (dec); |
562 | 258 | dec->priv->ctx.had_output_data = FALSE; |
563 | 258 | dec->priv->ctx.had_input_data = FALSE; |
564 | 258 | } |
565 | | |
566 | 811 | g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL); |
567 | 811 | g_queue_clear (&dec->priv->frames); |
568 | 811 | gst_adapter_clear (dec->priv->adapter); |
569 | 811 | gst_adapter_clear (dec->priv->adapter_out); |
570 | 811 | dec->priv->out_ts = GST_CLOCK_TIME_NONE; |
571 | 811 | dec->priv->out_dur = 0; |
572 | 811 | dec->priv->prev_ts = GST_CLOCK_TIME_NONE; |
573 | 811 | dec->priv->prev_distance = 0; |
574 | 811 | dec->priv->drained = TRUE; |
575 | 811 | dec->priv->base_ts = GST_CLOCK_TIME_NONE; |
576 | 811 | dec->priv->samples = 0; |
577 | 811 | dec->priv->discont = TRUE; |
578 | 811 | dec->priv->sync_flush = FALSE; |
579 | | |
580 | 811 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
581 | 811 | } |
582 | | |
583 | | static void |
584 | | gst_audio_decoder_finalize (GObject * object) |
585 | 86 | { |
586 | 86 | GstAudioDecoder *dec; |
587 | | |
588 | 86 | g_return_if_fail (GST_IS_AUDIO_DECODER (object)); |
589 | 86 | dec = GST_AUDIO_DECODER (object); |
590 | | |
591 | 86 | if (dec->priv->adapter) { |
592 | 86 | g_object_unref (dec->priv->adapter); |
593 | 86 | } |
594 | 86 | if (dec->priv->adapter_out) { |
595 | 86 | g_object_unref (dec->priv->adapter_out); |
596 | 86 | } |
597 | | |
598 | 86 | g_rec_mutex_clear (&dec->stream_lock); |
599 | | |
600 | 86 | G_OBJECT_CLASS (parent_class)->finalize (object); |
601 | 86 | } |
602 | | |
603 | | static GstEvent * |
604 | | gst_audio_decoder_create_merged_tags_event (GstAudioDecoder * dec) |
605 | 140 | { |
606 | 140 | GstTagList *merged_tags; |
607 | | |
608 | 140 | GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags); |
609 | 140 | GST_LOG_OBJECT (dec, "decoder : %" GST_PTR_FORMAT, dec->priv->taglist); |
610 | 140 | GST_LOG_OBJECT (dec, "mode : %d", dec->priv->decoder_tags_merge_mode); |
611 | | |
612 | 140 | merged_tags = |
613 | 140 | gst_tag_list_merge (dec->priv->upstream_tags, |
614 | 140 | dec->priv->taglist, dec->priv->decoder_tags_merge_mode); |
615 | | |
616 | 140 | GST_DEBUG_OBJECT (dec, "merged : %" GST_PTR_FORMAT, merged_tags); |
617 | | |
618 | 140 | if (merged_tags == NULL) |
619 | 0 | return NULL; |
620 | | |
621 | 140 | if (gst_tag_list_is_empty (merged_tags)) { |
622 | 0 | gst_tag_list_unref (merged_tags); |
623 | 0 | return NULL; |
624 | 0 | } |
625 | | |
626 | 140 | return gst_event_new_tag (merged_tags); |
627 | 140 | } |
628 | | |
629 | | static gboolean |
630 | | gst_audio_decoder_push_event (GstAudioDecoder * dec, GstEvent * event) |
631 | 498 | { |
632 | 498 | switch (GST_EVENT_TYPE (event)) { |
633 | 81 | case GST_EVENT_SEGMENT:{ |
634 | 81 | GstSegment seg; |
635 | | |
636 | 81 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
637 | 81 | gst_event_copy_segment (event, &seg); |
638 | | |
639 | 81 | GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg); |
640 | | |
641 | 81 | dec->output_segment = seg; |
642 | 81 | dec->priv->in_out_segment_sync = |
643 | 81 | gst_segment_is_equal (&dec->input_segment, &seg); |
644 | 81 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
645 | 81 | break; |
646 | 0 | } |
647 | 417 | default: |
648 | 417 | break; |
649 | 498 | } |
650 | | |
651 | 498 | return gst_pad_push_event (dec->srcpad, event); |
652 | 498 | } |
653 | | |
654 | | static gboolean |
655 | | gst_audio_decoder_negotiate_default (GstAudioDecoder * dec) |
656 | 85 | { |
657 | 85 | GstAudioDecoderClass *klass; |
658 | 85 | gboolean res = TRUE; |
659 | 85 | GstCaps *caps; |
660 | 85 | GstCaps *prevcaps; |
661 | 85 | GstQuery *query = NULL; |
662 | 85 | GstAllocator *allocator; |
663 | 85 | GstAllocationParams params; |
664 | | |
665 | 85 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE); |
666 | 85 | g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info), FALSE); |
667 | 85 | g_return_val_if_fail (GST_IS_CAPS (dec->priv->ctx.caps), FALSE); |
668 | | |
669 | 85 | klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
670 | | |
671 | 85 | caps = dec->priv->ctx.caps; |
672 | 85 | if (dec->priv->ctx.allocation_caps == NULL) |
673 | 68 | dec->priv->ctx.allocation_caps = gst_caps_ref (caps); |
674 | | |
675 | 85 | GST_DEBUG_OBJECT (dec, "setting src caps %" GST_PTR_FORMAT, caps); |
676 | | |
677 | 85 | if (dec->priv->pending_events) { |
678 | 85 | GList **pending_events, *l; |
679 | | |
680 | 85 | pending_events = &dec->priv->pending_events; |
681 | | |
682 | 85 | GST_DEBUG_OBJECT (dec, "Pushing pending events"); |
683 | 340 | for (l = *pending_events; l;) { |
684 | 255 | GstEvent *event = GST_EVENT (l->data); |
685 | 255 | GList *tmp; |
686 | | |
687 | 255 | if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) { |
688 | 0 | gst_audio_decoder_push_event (dec, l->data); |
689 | 0 | tmp = l; |
690 | 0 | l = l->next; |
691 | 0 | *pending_events = g_list_delete_link (*pending_events, tmp); |
692 | 255 | } else { |
693 | 255 | l = l->next; |
694 | 255 | } |
695 | 255 | } |
696 | 85 | } |
697 | | |
698 | 85 | prevcaps = gst_pad_get_current_caps (dec->srcpad); |
699 | 85 | if (!prevcaps || !gst_caps_is_equal (prevcaps, caps)) |
700 | 68 | res = gst_pad_set_caps (dec->srcpad, caps); |
701 | 85 | if (prevcaps) |
702 | 17 | gst_caps_unref (prevcaps); |
703 | | |
704 | 85 | if (!res) |
705 | 0 | goto done; |
706 | 85 | dec->priv->ctx.output_format_changed = FALSE; |
707 | | |
708 | 85 | query = gst_query_new_allocation (dec->priv->ctx.allocation_caps, TRUE); |
709 | 85 | if (!gst_pad_peer_query (dec->srcpad, query)) { |
710 | 85 | GST_DEBUG_OBJECT (dec, "didn't get downstream ALLOCATION hints"); |
711 | 85 | } |
712 | | |
713 | 85 | g_assert (klass->decide_allocation != NULL); |
714 | 85 | res = klass->decide_allocation (dec, query); |
715 | | |
716 | 85 | GST_DEBUG_OBJECT (dec, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, res, |
717 | 85 | query); |
718 | | |
719 | 85 | if (!res) |
720 | 0 | goto no_decide_allocation; |
721 | | |
722 | | /* we got configuration from our peer or the decide_allocation method, |
723 | | * parse them */ |
724 | 85 | if (gst_query_get_n_allocation_params (query) > 0) { |
725 | 85 | gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms); |
726 | 85 | } else { |
727 | 0 | allocator = NULL; |
728 | 0 | gst_allocation_params_init (¶ms); |
729 | 0 | } |
730 | | |
731 | 85 | if (dec->priv->ctx.allocator) |
732 | 0 | gst_object_unref (dec->priv->ctx.allocator); |
733 | 85 | dec->priv->ctx.allocator = allocator; |
734 | 85 | dec->priv->ctx.params = params; |
735 | | |
736 | 85 | done: |
737 | | |
738 | 85 | if (query) |
739 | 85 | gst_query_unref (query); |
740 | | |
741 | 85 | return res; |
742 | | |
743 | | /* ERRORS */ |
744 | 0 | no_decide_allocation: |
745 | 0 | { |
746 | 0 | GST_WARNING_OBJECT (dec, "Subclass failed to decide allocation"); |
747 | 0 | goto done; |
748 | 85 | } |
749 | 85 | } |
750 | | |
751 | | static gboolean |
752 | | gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec) |
753 | 85 | { |
754 | 85 | GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
755 | 85 | gboolean ret = TRUE; |
756 | | |
757 | 85 | if (G_LIKELY (klass->negotiate)) |
758 | 85 | ret = klass->negotiate (dec); |
759 | | |
760 | 85 | return ret; |
761 | 85 | } |
762 | | |
763 | | /** |
764 | | * gst_audio_decoder_negotiate: |
765 | | * @dec: a #GstAudioDecoder |
766 | | * |
767 | | * Negotiate with downstream elements to currently configured #GstAudioInfo. |
768 | | * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if |
769 | | * negotiate fails. |
770 | | * |
771 | | * Returns: %TRUE if the negotiation succeeded, else %FALSE. |
772 | | */ |
773 | | gboolean |
774 | | gst_audio_decoder_negotiate (GstAudioDecoder * dec) |
775 | 0 | { |
776 | 0 | GstAudioDecoderClass *klass; |
777 | 0 | gboolean res = TRUE; |
778 | |
|
779 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE); |
780 | | |
781 | 0 | klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
782 | |
|
783 | 0 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
784 | 0 | gst_pad_check_reconfigure (dec->srcpad); |
785 | 0 | if (klass->negotiate) { |
786 | 0 | res = klass->negotiate (dec); |
787 | 0 | if (!res) |
788 | 0 | gst_pad_mark_reconfigure (dec->srcpad); |
789 | 0 | } |
790 | 0 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
791 | |
|
792 | 0 | return res; |
793 | 0 | } |
794 | | |
795 | | /** |
796 | | * gst_audio_decoder_set_output_format: |
797 | | * @dec: a #GstAudioDecoder |
798 | | * @info: #GstAudioInfo |
799 | | * |
800 | | * Configure output info on the srcpad of @dec. |
801 | | * |
802 | | * Returns: %TRUE on success. |
803 | | **/ |
804 | | gboolean |
805 | | gst_audio_decoder_set_output_format (GstAudioDecoder * dec, |
806 | | const GstAudioInfo * info) |
807 | 73 | { |
808 | 73 | gboolean res = TRUE; |
809 | 73 | GstCaps *caps = NULL; |
810 | | |
811 | 73 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE); |
812 | 73 | g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (info), FALSE); |
813 | | |
814 | | /* If the audio info can't be converted to caps, |
815 | | * it was invalid */ |
816 | 73 | caps = gst_audio_info_to_caps (info); |
817 | 73 | if (!caps) { |
818 | 0 | GST_WARNING_OBJECT (dec, "invalid output format"); |
819 | 0 | return FALSE; |
820 | 0 | } |
821 | | |
822 | 73 | res = gst_audio_decoder_set_output_caps (dec, caps); |
823 | 73 | gst_caps_unref (caps); |
824 | | |
825 | 73 | return res; |
826 | 73 | } |
827 | | |
828 | | /** |
829 | | * gst_audio_decoder_set_output_caps: |
830 | | * @dec: a #GstAudioDecoder |
831 | | * @caps: (transfer none): (fixed) #GstCaps |
832 | | * |
833 | | * Configure output caps on the srcpad of @dec. Similar to |
834 | | * gst_audio_decoder_set_output_format(), but allows subclasses to specify |
835 | | * output caps that can't be expressed via #GstAudioInfo e.g. caps that have |
836 | | * caps features. |
837 | | * |
838 | | * Returns: %TRUE on success. |
839 | | * |
840 | | * Since: 1.16 |
841 | | **/ |
842 | | gboolean |
843 | | gst_audio_decoder_set_output_caps (GstAudioDecoder * dec, GstCaps * caps) |
844 | 73 | { |
845 | 73 | gboolean res = TRUE; |
846 | 73 | guint old_rate; |
847 | 73 | GstCaps *templ_caps; |
848 | 73 | GstAudioInfo info; |
849 | | |
850 | 73 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE); |
851 | | |
852 | 73 | GST_DEBUG_OBJECT (dec, "Setting srcpad caps %" GST_PTR_FORMAT, caps); |
853 | | |
854 | 73 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
855 | | |
856 | 73 | if (!gst_caps_is_fixed (caps)) |
857 | 0 | goto refuse_caps; |
858 | | |
859 | | /* check if caps can be parsed */ |
860 | 73 | if (!gst_audio_info_from_caps (&info, caps)) |
861 | 0 | goto refuse_caps; |
862 | | |
863 | | /* Only allow caps that are a subset of the template caps */ |
864 | 73 | templ_caps = gst_pad_get_pad_template_caps (dec->srcpad); |
865 | 73 | if (!gst_caps_is_subset (caps, templ_caps)) { |
866 | 0 | GST_WARNING_OBJECT (dec, "Requested output format %" GST_PTR_FORMAT |
867 | 0 | " do not match template %" GST_PTR_FORMAT, caps, templ_caps); |
868 | 0 | gst_caps_unref (templ_caps); |
869 | 0 | goto refuse_caps; |
870 | 0 | } |
871 | 73 | gst_caps_unref (templ_caps); |
872 | | |
873 | | /* adjust ts tracking to new sample rate */ |
874 | 73 | old_rate = GST_AUDIO_INFO_RATE (&dec->priv->ctx.info); |
875 | 73 | if (GST_CLOCK_TIME_IS_VALID (dec->priv->base_ts) && old_rate) { |
876 | 0 | dec->priv->base_ts += |
877 | 0 | GST_FRAMES_TO_CLOCK_TIME (dec->priv->samples, old_rate); |
878 | 0 | dec->priv->samples = 0; |
879 | 0 | } |
880 | | |
881 | | /* copy the GstAudioInfo */ |
882 | 73 | GST_OBJECT_LOCK (dec); |
883 | 73 | dec->priv->ctx.info = info; |
884 | 73 | GST_OBJECT_UNLOCK (dec); |
885 | | |
886 | 73 | gst_caps_replace (&dec->priv->ctx.caps, caps); |
887 | 73 | dec->priv->ctx.output_format_changed = TRUE; |
888 | | |
889 | 73 | done: |
890 | 73 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
891 | | |
892 | 73 | return res; |
893 | | |
894 | | /* ERRORS */ |
895 | 0 | refuse_caps: |
896 | 0 | { |
897 | 0 | GST_WARNING_OBJECT (dec, "invalid output format"); |
898 | 0 | res = FALSE; |
899 | 0 | goto done; |
900 | 73 | } |
901 | 73 | } |
902 | | |
903 | | static gboolean |
904 | | gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec, GstCaps * caps) |
905 | 167 | { |
906 | 167 | GstAudioDecoderClass *klass; |
907 | 167 | gboolean res = TRUE; |
908 | | |
909 | 167 | klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
910 | | |
911 | 167 | GST_DEBUG_OBJECT (dec, "caps: %" GST_PTR_FORMAT, caps); |
912 | | |
913 | 167 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
914 | | |
915 | 167 | if (dec->priv->ctx.input_caps |
916 | 83 | && gst_caps_is_equal (dec->priv->ctx.input_caps, caps)) { |
917 | 83 | GST_DEBUG_OBJECT (dec, "Caps did not change, not setting again"); |
918 | 83 | goto done; |
919 | 83 | } |
920 | | |
921 | | /* NOTE pbutils only needed here */ |
922 | | /* TODO maybe (only) upstream demuxer/parser etc should handle this ? */ |
923 | | #if 0 |
924 | | if (!dec->priv->taglist) |
925 | | dec->priv->taglist = gst_tag_list_new (); |
926 | | dec->priv->taglist = gst_tag_list_make_writable (dec->priv->taglist); |
927 | | gst_pb_utils_add_codec_description_to_tag_list (dec->priv->taglist, |
928 | | GST_TAG_AUDIO_CODEC, caps); |
929 | | dec->priv->taglist_changed = TRUE; |
930 | | #endif |
931 | | |
932 | 84 | if (klass->set_format) |
933 | 84 | res = klass->set_format (dec, caps); |
934 | | |
935 | 84 | if (res) |
936 | 84 | gst_caps_replace (&dec->priv->ctx.input_caps, caps); |
937 | | |
938 | 167 | done: |
939 | 167 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
940 | | |
941 | 167 | return res; |
942 | 84 | } |
943 | | |
944 | | static void |
945 | | gst_audio_decoder_setup (GstAudioDecoder * dec) |
946 | 83 | { |
947 | 83 | GstQuery *query; |
948 | 83 | gboolean res; |
949 | | |
950 | | /* check if in live pipeline, then latency messing is no-no */ |
951 | 83 | query = gst_query_new_latency (); |
952 | 83 | res = gst_pad_peer_query (dec->sinkpad, query); |
953 | 83 | if (res) { |
954 | 82 | gst_query_parse_latency (query, &res, NULL, NULL); |
955 | 82 | res = !res; |
956 | 82 | } |
957 | 83 | gst_query_unref (query); |
958 | | |
959 | | /* normalize to bool */ |
960 | 83 | dec->priv->agg = !!res; |
961 | 83 | } |
962 | | |
963 | | static GstFlowReturn |
964 | | gst_audio_decoder_push_forward (GstAudioDecoder * dec, GstBuffer * buf) |
965 | 68 | { |
966 | 68 | GstAudioDecoderClass *klass; |
967 | 68 | GstAudioDecoderPrivate *priv; |
968 | 68 | GstAudioDecoderContext *ctx; |
969 | 68 | GstFlowReturn ret = GST_FLOW_OK; |
970 | 68 | GstClockTime ts; |
971 | | |
972 | 68 | klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
973 | 68 | priv = dec->priv; |
974 | 68 | ctx = &dec->priv->ctx; |
975 | | |
976 | 68 | g_return_val_if_fail (ctx->info.bpf != 0, GST_FLOW_ERROR); |
977 | | |
978 | 68 | if (G_UNLIKELY (!buf)) { |
979 | 0 | g_assert_not_reached (); |
980 | 0 | return GST_FLOW_OK; |
981 | 0 | } |
982 | | |
983 | 68 | ctx->had_output_data = TRUE; |
984 | 68 | ts = GST_BUFFER_PTS (buf); |
985 | | |
986 | 68 | GST_LOG_OBJECT (dec, |
987 | 68 | "clipping buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT |
988 | 68 | ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf), |
989 | 68 | GST_TIME_ARGS (GST_BUFFER_PTS (buf)), |
990 | 68 | GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); |
991 | | |
992 | | /* clip buffer */ |
993 | 68 | buf = gst_audio_buffer_clip (buf, &dec->output_segment, ctx->info.rate, |
994 | 68 | ctx->info.bpf); |
995 | 68 | if (G_UNLIKELY (!buf)) { |
996 | 0 | GST_DEBUG_OBJECT (dec, "no data after clipping to segment"); |
997 | | /* only check and return EOS if upstream still |
998 | | * in the same segment and interested as such */ |
999 | 0 | if (dec->priv->in_out_segment_sync) { |
1000 | 0 | if (dec->output_segment.rate >= 0) { |
1001 | 0 | if (ts >= dec->output_segment.stop) |
1002 | 0 | ret = GST_FLOW_EOS; |
1003 | 0 | } else if (ts < dec->output_segment.start) { |
1004 | 0 | ret = GST_FLOW_EOS; |
1005 | 0 | } |
1006 | 0 | } |
1007 | 0 | goto exit; |
1008 | 0 | } |
1009 | | |
1010 | | /* decorate */ |
1011 | 68 | if (G_UNLIKELY (priv->discont)) { |
1012 | 68 | GST_LOG_OBJECT (dec, "marking discont"); |
1013 | 68 | GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT); |
1014 | 68 | priv->discont = FALSE; |
1015 | 68 | } |
1016 | | |
1017 | | /* track where we are */ |
1018 | 68 | if (G_LIKELY (GST_BUFFER_PTS_IS_VALID (buf))) { |
1019 | | /* duration should always be valid for raw audio */ |
1020 | 68 | g_assert (GST_BUFFER_DURATION_IS_VALID (buf)); |
1021 | 68 | dec->output_segment.position = |
1022 | 68 | GST_BUFFER_PTS (buf) + GST_BUFFER_DURATION (buf); |
1023 | 68 | } |
1024 | | |
1025 | 68 | if (klass->pre_push) { |
1026 | | /* last chance for subclass to do some dirty stuff */ |
1027 | 0 | ret = klass->pre_push (dec, &buf); |
1028 | 0 | if (ret != GST_FLOW_OK || !buf) { |
1029 | 0 | GST_DEBUG_OBJECT (dec, "subclass returned %s, buf %p", |
1030 | 0 | gst_flow_get_name (ret), buf); |
1031 | 0 | if (buf) |
1032 | 0 | gst_buffer_unref (buf); |
1033 | 0 | goto exit; |
1034 | 0 | } |
1035 | 0 | } |
1036 | | |
1037 | 68 | GST_LOG_OBJECT (dec, |
1038 | 68 | "pushing buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT |
1039 | 68 | ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf), |
1040 | 68 | GST_TIME_ARGS (GST_BUFFER_PTS (buf)), |
1041 | 68 | GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); |
1042 | | |
1043 | 68 | ret = gst_pad_push (dec->srcpad, buf); |
1044 | | |
1045 | 68 | exit: |
1046 | 68 | return ret; |
1047 | 68 | } |
1048 | | |
1049 | | /* mini aggregator combining output buffers into fewer larger ones, |
1050 | | * if so allowed/configured */ |
1051 | | static GstFlowReturn |
1052 | | gst_audio_decoder_output (GstAudioDecoder * dec, GstBuffer * buf) |
1053 | 391 | { |
1054 | 391 | GstAudioDecoderPrivate *priv; |
1055 | 391 | GstFlowReturn ret = GST_FLOW_OK; |
1056 | 391 | GstBuffer *inbuf = NULL; |
1057 | | |
1058 | 391 | priv = dec->priv; |
1059 | | |
1060 | 391 | if (G_UNLIKELY (priv->agg < 0)) |
1061 | 83 | gst_audio_decoder_setup (dec); |
1062 | | |
1063 | 391 | if (G_LIKELY (buf)) { |
1064 | 68 | GST_LOG_OBJECT (dec, |
1065 | 68 | "output buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT |
1066 | 68 | ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf), |
1067 | 68 | GST_TIME_ARGS (GST_BUFFER_PTS (buf)), |
1068 | 68 | GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); |
1069 | 68 | } |
1070 | | |
1071 | 391 | again: |
1072 | 391 | inbuf = NULL; |
1073 | 391 | if (priv->agg && dec->priv->latency > 0 && |
1074 | 0 | priv->ctx.info.layout == GST_AUDIO_LAYOUT_INTERLEAVED) { |
1075 | 0 | gint av; |
1076 | 0 | gboolean assemble = FALSE; |
1077 | 0 | const GstClockTimeDiff tol = 10 * GST_MSECOND; |
1078 | 0 | GstClockTimeDiff diff = -100 * GST_MSECOND; |
1079 | |
|
1080 | 0 | av = gst_adapter_available (priv->adapter_out); |
1081 | 0 | if (G_UNLIKELY (!buf)) { |
1082 | | /* forcibly send current */ |
1083 | 0 | assemble = TRUE; |
1084 | 0 | GST_LOG_OBJECT (dec, "forcing fragment flush"); |
1085 | 0 | } else if (av && (!GST_BUFFER_PTS_IS_VALID (buf) || |
1086 | 0 | !GST_CLOCK_TIME_IS_VALID (priv->out_ts) || |
1087 | 0 | ((diff = GST_CLOCK_DIFF (GST_BUFFER_PTS (buf), |
1088 | 0 | priv->out_ts + priv->out_dur)) > tol) || diff < -tol)) { |
1089 | 0 | assemble = TRUE; |
1090 | 0 | GST_LOG_OBJECT (dec, "buffer %d ms apart from current fragment", |
1091 | 0 | (gint) (diff / GST_MSECOND)); |
1092 | 0 | } else { |
1093 | | /* add or start collecting */ |
1094 | 0 | if (!av) { |
1095 | 0 | GST_LOG_OBJECT (dec, "starting new fragment"); |
1096 | 0 | priv->out_ts = GST_BUFFER_PTS (buf); |
1097 | 0 | } else { |
1098 | 0 | GST_LOG_OBJECT (dec, "adding to fragment"); |
1099 | 0 | } |
1100 | 0 | gst_adapter_push (priv->adapter_out, buf); |
1101 | 0 | priv->out_dur += GST_BUFFER_DURATION (buf); |
1102 | 0 | av += gst_buffer_get_size (buf); |
1103 | 0 | buf = NULL; |
1104 | 0 | } |
1105 | 0 | if (priv->out_dur > dec->priv->latency) |
1106 | 0 | assemble = TRUE; |
1107 | 0 | if (av && assemble) { |
1108 | 0 | GST_LOG_OBJECT (dec, "assembling fragment"); |
1109 | 0 | inbuf = buf; |
1110 | 0 | buf = gst_adapter_take_buffer (priv->adapter_out, av); |
1111 | 0 | GST_BUFFER_PTS (buf) = priv->out_ts; |
1112 | 0 | GST_BUFFER_DURATION (buf) = priv->out_dur; |
1113 | 0 | priv->out_ts = GST_CLOCK_TIME_NONE; |
1114 | 0 | priv->out_dur = 0; |
1115 | 0 | } |
1116 | 0 | } |
1117 | | |
1118 | 391 | if (G_LIKELY (buf)) { |
1119 | 68 | if (dec->output_segment.rate > 0.0) { |
1120 | 68 | ret = gst_audio_decoder_push_forward (dec, buf); |
1121 | 68 | GST_LOG_OBJECT (dec, "buffer pushed: %s", gst_flow_get_name (ret)); |
1122 | 68 | } else { |
1123 | 0 | ret = GST_FLOW_OK; |
1124 | 0 | priv->queued = g_list_prepend (priv->queued, buf); |
1125 | 0 | GST_LOG_OBJECT (dec, "buffer queued"); |
1126 | 0 | } |
1127 | | |
1128 | 68 | if (inbuf) { |
1129 | 0 | buf = inbuf; |
1130 | 0 | goto again; |
1131 | 0 | } |
1132 | 68 | } |
1133 | | |
1134 | 391 | return ret; |
1135 | 391 | } |
1136 | | |
1137 | | static void |
1138 | | send_pending_events (GstAudioDecoder * dec) |
1139 | 81 | { |
1140 | 81 | GstAudioDecoderPrivate *priv = dec->priv; |
1141 | 81 | GList *pending_events, *l; |
1142 | | |
1143 | 81 | pending_events = priv->pending_events; |
1144 | 81 | priv->pending_events = NULL; |
1145 | | |
1146 | 81 | GST_DEBUG_OBJECT (dec, "Pushing pending events"); |
1147 | 314 | for (l = pending_events; l; l = l->next) |
1148 | 233 | gst_audio_decoder_push_event (dec, l->data); |
1149 | 81 | g_list_free (pending_events); |
1150 | 81 | } |
1151 | | |
1152 | | /* Iterate the list of pending events, and ensure |
1153 | | * the current output segment is up to date for |
1154 | | * decoding */ |
1155 | | static void |
1156 | | apply_pending_events (GstAudioDecoder * dec) |
1157 | 323 | { |
1158 | 323 | GstAudioDecoderPrivate *priv = dec->priv; |
1159 | 323 | GList *l; |
1160 | | |
1161 | 323 | GST_DEBUG_OBJECT (dec, "Applying pending segments"); |
1162 | 1.23k | for (l = priv->pending_events; l; l = l->next) { |
1163 | 907 | GstEvent *event = GST_EVENT (l->data); |
1164 | 907 | switch (GST_EVENT_TYPE (event)) { |
1165 | 306 | case GST_EVENT_SEGMENT:{ |
1166 | 306 | GstSegment seg; |
1167 | | |
1168 | 306 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
1169 | 306 | gst_event_copy_segment (event, &seg); |
1170 | | |
1171 | 306 | GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg); |
1172 | | |
1173 | 306 | dec->output_segment = seg; |
1174 | 306 | dec->priv->in_out_segment_sync = |
1175 | 306 | gst_segment_is_equal (&dec->input_segment, &seg); |
1176 | 306 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
1177 | 306 | break; |
1178 | 0 | } |
1179 | 601 | default: |
1180 | 601 | break; |
1181 | 907 | } |
1182 | 907 | } |
1183 | 323 | } |
1184 | | |
1185 | | static GstFlowReturn |
1186 | | check_pending_reconfigure (GstAudioDecoder * dec) |
1187 | 68 | { |
1188 | 68 | GstFlowReturn ret = GST_FLOW_OK; |
1189 | 68 | GstAudioDecoderContext *ctx; |
1190 | 68 | gboolean needs_reconfigure; |
1191 | | |
1192 | 68 | ctx = &dec->priv->ctx; |
1193 | | |
1194 | 68 | needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad); |
1195 | 68 | if (G_UNLIKELY (ctx->output_format_changed || |
1196 | 68 | (GST_AUDIO_INFO_IS_VALID (&ctx->info) |
1197 | 68 | && needs_reconfigure))) { |
1198 | 17 | if (!gst_audio_decoder_negotiate_unlocked (dec)) { |
1199 | 0 | gst_pad_mark_reconfigure (dec->srcpad); |
1200 | 0 | if (GST_PAD_IS_FLUSHING (dec->srcpad)) |
1201 | 0 | ret = GST_FLOW_FLUSHING; |
1202 | 0 | else |
1203 | 0 | ret = GST_FLOW_NOT_NEGOTIATED; |
1204 | 0 | } |
1205 | 17 | } |
1206 | 68 | return ret; |
1207 | 68 | } |
1208 | | |
1209 | | static gboolean |
1210 | | gst_audio_decoder_transform_meta_default (GstAudioDecoder * |
1211 | | decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf) |
1212 | 0 | { |
1213 | 0 | const GstMetaInfo *info = meta->info; |
1214 | 0 | const gchar *const *tags; |
1215 | 0 | const gchar *const supported_tags[] = { |
1216 | 0 | GST_META_TAG_AUDIO_STR, |
1217 | 0 | GST_META_TAG_AUDIO_CHANNELS_STR, |
1218 | 0 | NULL, |
1219 | 0 | }; |
1220 | |
|
1221 | 0 | tags = gst_meta_api_type_get_tags (info->api); |
1222 | |
|
1223 | 0 | if (!tags) |
1224 | 0 | return TRUE; |
1225 | | |
1226 | 0 | while (*tags) { |
1227 | 0 | if (!g_strv_contains (supported_tags, *tags)) |
1228 | 0 | return FALSE; |
1229 | 0 | tags++; |
1230 | 0 | } |
1231 | | |
1232 | 0 | return TRUE; |
1233 | 0 | } |
1234 | | |
1235 | | typedef struct |
1236 | | { |
1237 | | GstAudioDecoder *decoder; |
1238 | | GstBuffer *outbuf; |
1239 | | } CopyMetaData; |
1240 | | |
1241 | | static gboolean |
1242 | | foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data) |
1243 | 0 | { |
1244 | 0 | CopyMetaData *data = user_data; |
1245 | 0 | GstAudioDecoder *decoder = data->decoder; |
1246 | 0 | GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (decoder); |
1247 | 0 | GstBuffer *outbuf = data->outbuf; |
1248 | 0 | const GstMetaInfo *info = (*meta)->info; |
1249 | 0 | gboolean do_copy = FALSE; |
1250 | |
|
1251 | 0 | if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory) |
1252 | 0 | || gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory_reference)) { |
1253 | | /* never call the transform_meta with memory specific metadata */ |
1254 | 0 | GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s", |
1255 | 0 | g_type_name (info->api)); |
1256 | 0 | do_copy = FALSE; |
1257 | 0 | } else if (klass->transform_meta) { |
1258 | 0 | do_copy = klass->transform_meta (decoder, outbuf, *meta, inbuf); |
1259 | 0 | GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d", |
1260 | 0 | g_type_name (info->api), do_copy); |
1261 | 0 | } |
1262 | | |
1263 | | /* we only copy metadata when the subclass implemented a transform_meta |
1264 | | * function and when it returns %TRUE */ |
1265 | 0 | if (do_copy && info->transform_func) { |
1266 | 0 | GstMetaTransformCopy copy_data = { FALSE, 0, -1 }; |
1267 | 0 | GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api)); |
1268 | | /* simply copy then */ |
1269 | 0 | info->transform_func (outbuf, *meta, inbuf, |
1270 | 0 | _gst_meta_transform_copy, ©_data); |
1271 | 0 | } |
1272 | 0 | return TRUE; |
1273 | 0 | } |
1274 | | |
1275 | | /** |
1276 | | * gst_audio_decoder_finish_subframe: |
1277 | | * @dec: a #GstAudioDecoder |
1278 | | * @buf: (transfer full) (nullable): decoded data |
1279 | | * |
1280 | | * Collects decoded data and pushes it downstream. This function may be called |
1281 | | * multiple times for a given input frame. |
1282 | | * |
1283 | | * @buf may be NULL in which case it is assumed that the current input frame is |
1284 | | * finished. This is equivalent to calling gst_audio_decoder_finish_subframe() |
1285 | | * with a NULL buffer and frames=1 after having pushed out all decoded audio |
1286 | | * subframes using this function. |
1287 | | * |
1288 | | * When called with valid data in @buf the source pad caps must have been set |
1289 | | * already. |
1290 | | * |
1291 | | * Note that a frame received in #GstAudioDecoderClass.handle_frame() may be |
1292 | | * invalidated by a call to this function. |
1293 | | * |
1294 | | * Returns: a #GstFlowReturn that should be escalated to caller (of caller) |
1295 | | * |
1296 | | * Since: 1.16 |
1297 | | */ |
1298 | | GstFlowReturn |
1299 | | gst_audio_decoder_finish_subframe (GstAudioDecoder * dec, GstBuffer * buf) |
1300 | 0 | { |
1301 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), GST_FLOW_ERROR); |
1302 | | |
1303 | 0 | if (buf == NULL) |
1304 | 0 | return gst_audio_decoder_finish_frame_or_subframe (dec, NULL, 1); |
1305 | 0 | else |
1306 | 0 | return gst_audio_decoder_finish_frame_or_subframe (dec, buf, 0); |
1307 | 0 | } |
1308 | | |
1309 | | /** |
1310 | | * gst_audio_decoder_finish_frame: |
1311 | | * @dec: a #GstAudioDecoder |
1312 | | * @buf: (transfer full) (nullable): decoded data |
1313 | | * @frames: number of decoded frames represented by decoded data |
1314 | | * |
1315 | | * Collects decoded data and pushes it downstream. |
1316 | | * |
1317 | | * @buf may be NULL in which case the indicated number of frames |
1318 | | * are discarded and considered to have produced no output |
1319 | | * (e.g. lead-in or setup frames). |
1320 | | * Otherwise, source pad caps must be set when it is called with valid |
1321 | | * data in @buf. |
1322 | | * |
1323 | | * Note that a frame received in #GstAudioDecoderClass.handle_frame() may be |
1324 | | * invalidated by a call to this function. |
1325 | | * |
1326 | | * Returns: a #GstFlowReturn that should be escalated to caller (of caller) |
1327 | | */ |
1328 | | GstFlowReturn |
1329 | | gst_audio_decoder_finish_frame (GstAudioDecoder * dec, GstBuffer * buf, |
1330 | | gint frames) |
1331 | 375 | { |
1332 | 375 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), GST_FLOW_ERROR); |
1333 | | |
1334 | | /* no dummy calls please */ |
1335 | 375 | g_return_val_if_fail (frames != 0, GST_FLOW_ERROR); |
1336 | | |
1337 | 375 | return gst_audio_decoder_finish_frame_or_subframe (dec, buf, frames); |
1338 | 375 | } |
1339 | | |
1340 | | /* frames == 0 indicates that this is a sub-frame and further sub-frames may |
1341 | | * follow for the current input frame. */ |
1342 | | static GstFlowReturn |
1343 | | gst_audio_decoder_finish_frame_or_subframe (GstAudioDecoder * dec, |
1344 | | GstBuffer * buf, gint frames) |
1345 | 375 | { |
1346 | 375 | GstAudioDecoderPrivate *priv; |
1347 | 375 | GstAudioDecoderContext *ctx; |
1348 | 375 | GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
1349 | 375 | GstAudioMeta *meta; |
1350 | 375 | GstClockTime ts, next_ts; |
1351 | 375 | gsize size, samples = 0; |
1352 | 375 | GstFlowReturn ret = GST_FLOW_OK; |
1353 | 375 | GQueue inbufs = G_QUEUE_INIT; |
1354 | 375 | gboolean is_subframe = (frames == 0); |
1355 | 375 | gboolean do_check_resync; |
1356 | | |
1357 | | /* subclass should not hand us no data */ |
1358 | 375 | g_return_val_if_fail (buf == NULL || gst_buffer_get_size (buf) > 0, |
1359 | 375 | GST_FLOW_ERROR); |
1360 | | |
1361 | | /* if it's a subframe (frames == 0) we must have a valid buffer */ |
1362 | 375 | g_assert (!is_subframe || buf != NULL); |
1363 | | |
1364 | 375 | priv = dec->priv; |
1365 | 375 | ctx = &dec->priv->ctx; |
1366 | 375 | meta = buf ? gst_buffer_get_audio_meta (buf) : NULL; |
1367 | 375 | size = buf ? gst_buffer_get_size (buf) : 0; |
1368 | 375 | samples = buf ? (meta ? meta->samples : size / ctx->info.bpf) : 0; |
1369 | | |
1370 | | /* must know the output format by now */ |
1371 | 375 | g_return_val_if_fail (buf == NULL || GST_AUDIO_INFO_IS_VALID (&ctx->info), |
1372 | 375 | GST_FLOW_ERROR); |
1373 | | |
1374 | 375 | GST_LOG_OBJECT (dec, |
1375 | 375 | "accepting %" G_GSIZE_FORMAT " bytes == %" G_GSIZE_FORMAT |
1376 | 375 | " samples for %d frames", buf ? size : 0, samples, frames); |
1377 | | |
1378 | 375 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
1379 | | |
1380 | 375 | if (buf != NULL && priv->subframe_samples == 0) { |
1381 | 68 | ret = check_pending_reconfigure (dec); |
1382 | 68 | if (ret == GST_FLOW_FLUSHING || ret == GST_FLOW_NOT_NEGOTIATED) { |
1383 | 0 | gst_buffer_unref (buf); |
1384 | 0 | goto exit; |
1385 | 0 | } |
1386 | | |
1387 | 68 | if (priv->pending_events) |
1388 | 68 | send_pending_events (dec); |
1389 | 68 | } |
1390 | | |
1391 | | /* sanity checking */ |
1392 | 375 | if (G_LIKELY (buf && ctx->info.bpf)) { |
1393 | 68 | if (!meta || meta->info.layout == GST_AUDIO_LAYOUT_INTERLEAVED) { |
1394 | | /* output should be whole number of sample frames */ |
1395 | 68 | if (size % ctx->info.bpf) |
1396 | 0 | goto wrong_buffer; |
1397 | | /* output should have no additional padding */ |
1398 | 68 | if (samples != size / ctx->info.bpf) |
1399 | 0 | goto wrong_samples; |
1400 | 68 | } else { |
1401 | | /* can't have more samples than what the buffer fits */ |
1402 | 0 | if (samples > size / ctx->info.bpf) |
1403 | 0 | goto wrong_samples; |
1404 | 0 | } |
1405 | 68 | } |
1406 | | |
1407 | | /* frame and ts book-keeping */ |
1408 | 375 | if (G_UNLIKELY (frames < 0)) { |
1409 | 0 | if (G_UNLIKELY (-frames - 1 > priv->frames.length)) { |
1410 | 0 | GST_ELEMENT_WARNING (dec, STREAM, DECODE, |
1411 | 0 | ("received more decoded frames %d than provided %d", frames, |
1412 | 0 | priv->frames.length), (NULL)); |
1413 | 0 | frames = 0; |
1414 | 0 | } else { |
1415 | 0 | frames = priv->frames.length + frames + 1; |
1416 | 0 | } |
1417 | 375 | } else if (G_UNLIKELY (frames > priv->frames.length)) { |
1418 | 0 | if (G_LIKELY (!priv->force)) { |
1419 | 0 | GST_ELEMENT_WARNING (dec, STREAM, DECODE, |
1420 | 0 | ("received more decoded frames %d than provided %d", frames, |
1421 | 0 | priv->frames.length), (NULL)); |
1422 | 0 | } |
1423 | 0 | frames = priv->frames.length; |
1424 | 0 | } |
1425 | | |
1426 | 375 | if (G_LIKELY (buf)) |
1427 | 68 | buf = gst_buffer_make_writable (buf); |
1428 | | |
1429 | 375 | if (G_LIKELY (priv->frames.length)) { |
1430 | 375 | ts = GST_BUFFER_PTS (priv->frames.head->data); |
1431 | 375 | if (G_LIKELY (buf)) { |
1432 | | /* propagate RESYNC flag to output buffer */ |
1433 | 68 | if (GST_BUFFER_FLAG_IS_SET (priv->frames.head->data, |
1434 | 68 | GST_BUFFER_FLAG_RESYNC)) |
1435 | 68 | GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_RESYNC); |
1436 | 68 | if (is_subframe) { |
1437 | 0 | priv->frames.head->data = |
1438 | 0 | gst_buffer_make_writable (priv->frames.head->data); |
1439 | 0 | GST_BUFFER_FLAG_UNSET (priv->frames.head->data, GST_BUFFER_FLAG_RESYNC); |
1440 | 0 | } |
1441 | 68 | } |
1442 | 375 | } else |
1443 | 0 | ts = GST_CLOCK_TIME_NONE; |
1444 | | |
1445 | 375 | GST_DEBUG_OBJECT (dec, "leading frame ts %" GST_TIME_FORMAT, |
1446 | 375 | GST_TIME_ARGS (ts)); |
1447 | | |
1448 | 375 | if (is_subframe && priv->frames.length == 0) |
1449 | 0 | goto subframe_without_pending_input_frame; |
1450 | | |
1451 | | /* this will be skipped in the is_subframe case because frames will be 0 */ |
1452 | 750 | while (priv->frames.length && frames) { |
1453 | 375 | g_queue_push_tail (&inbufs, g_queue_pop_head (&priv->frames)); |
1454 | 375 | dec->priv->ctx.delay = dec->priv->frames.length; |
1455 | 375 | frames--; |
1456 | 375 | } |
1457 | | |
1458 | 375 | if (G_UNLIKELY (!buf)) |
1459 | 307 | goto exit; |
1460 | | |
1461 | | /* lock on */ |
1462 | 68 | if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts))) { |
1463 | 68 | priv->base_ts = ts; |
1464 | 68 | GST_DEBUG_OBJECT (dec, "base_ts now %" GST_TIME_FORMAT, GST_TIME_ARGS (ts)); |
1465 | 68 | } |
1466 | | |
1467 | | /* still no valid ts, track the segment one */ |
1468 | 68 | if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts)) && |
1469 | 0 | dec->output_segment.rate > 0.0) { |
1470 | 0 | priv->base_ts = dec->output_segment.start; |
1471 | 0 | } |
1472 | | |
1473 | | /* only check for resync at the beginning of an input/output frame */ |
1474 | 68 | do_check_resync = !is_subframe || priv->subframe_samples == 0; |
1475 | | |
1476 | | /* slightly convoluted approach caters for perfect ts if subclass desires. */ |
1477 | 68 | if (do_check_resync && GST_CLOCK_TIME_IS_VALID (ts)) { |
1478 | 68 | if (dec->priv->tolerance > 0) { |
1479 | 0 | GstClockTimeDiff diff; |
1480 | |
|
1481 | 0 | g_assert (GST_CLOCK_TIME_IS_VALID (priv->base_ts)); |
1482 | 0 | next_ts = priv->base_ts + |
1483 | 0 | gst_util_uint64_scale (priv->samples, GST_SECOND, ctx->info.rate); |
1484 | 0 | GST_LOG_OBJECT (dec, |
1485 | 0 | "buffer is %" G_GUINT64_FORMAT " samples past base_ts %" |
1486 | 0 | GST_TIME_FORMAT ", expected ts %" GST_TIME_FORMAT, priv->samples, |
1487 | 0 | GST_TIME_ARGS (priv->base_ts), GST_TIME_ARGS (next_ts)); |
1488 | 0 | diff = GST_CLOCK_DIFF (next_ts, ts); |
1489 | 0 | GST_LOG_OBJECT (dec, "ts diff %d ms", (gint) (diff / GST_MSECOND)); |
1490 | | /* if within tolerance, |
1491 | | * discard buffer ts and carry on producing perfect stream, |
1492 | | * otherwise resync to ts */ |
1493 | 0 | if (G_UNLIKELY (diff < (gint64) - dec->priv->tolerance || |
1494 | 0 | diff > (gint64) dec->priv->tolerance || |
1495 | 0 | GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_RESYNC))) { |
1496 | 0 | GST_DEBUG_OBJECT (dec, "base_ts resync"); |
1497 | 0 | priv->base_ts = ts; |
1498 | 0 | priv->samples = 0; |
1499 | 0 | } |
1500 | 68 | } else { |
1501 | 68 | GST_DEBUG_OBJECT (dec, "base_ts resync"); |
1502 | 68 | priv->base_ts = ts; |
1503 | 68 | priv->samples = 0; |
1504 | 68 | } |
1505 | 68 | } |
1506 | | |
1507 | | /* delayed one-shot stuff until confirmed data */ |
1508 | 68 | if (priv->taglist && priv->taglist_changed) { |
1509 | 68 | GstEvent *tags_event; |
1510 | | |
1511 | 68 | tags_event = gst_audio_decoder_create_merged_tags_event (dec); |
1512 | | |
1513 | 68 | if (tags_event != NULL) |
1514 | 68 | gst_audio_decoder_push_event (dec, tags_event); |
1515 | | |
1516 | 68 | priv->taglist_changed = FALSE; |
1517 | 68 | } |
1518 | | |
1519 | 68 | if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (priv->base_ts))) { |
1520 | 68 | GST_BUFFER_PTS (buf) = |
1521 | 68 | priv->base_ts + |
1522 | 68 | GST_FRAMES_TO_CLOCK_TIME (priv->samples, ctx->info.rate); |
1523 | 68 | GST_BUFFER_DURATION (buf) = priv->base_ts + |
1524 | 68 | GST_FRAMES_TO_CLOCK_TIME (priv->samples + samples, ctx->info.rate) - |
1525 | 68 | GST_BUFFER_PTS (buf); |
1526 | 68 | } else { |
1527 | 0 | GST_BUFFER_PTS (buf) = GST_CLOCK_TIME_NONE; |
1528 | 0 | GST_BUFFER_DURATION (buf) = |
1529 | 0 | GST_FRAMES_TO_CLOCK_TIME (samples, ctx->info.rate); |
1530 | 0 | } |
1531 | | |
1532 | 68 | if (klass->transform_meta) { |
1533 | 68 | if (inbufs.length) { |
1534 | 68 | GList *l; |
1535 | 136 | for (l = inbufs.head; l; l = l->next) { |
1536 | 68 | CopyMetaData data; |
1537 | | |
1538 | 68 | data.decoder = dec; |
1539 | 68 | data.outbuf = buf; |
1540 | 68 | gst_buffer_foreach_meta (l->data, foreach_metadata, &data); |
1541 | 68 | } |
1542 | 68 | } else if (is_subframe) { |
1543 | 0 | CopyMetaData data; |
1544 | 0 | GstBuffer *in_buf; |
1545 | | |
1546 | | /* For subframes we assume a 1:N relationship for now, so we just take |
1547 | | * metas from the first pending input buf */ |
1548 | 0 | in_buf = g_queue_peek_head (&priv->frames); |
1549 | 0 | data.decoder = dec; |
1550 | 0 | data.outbuf = buf; |
1551 | 0 | gst_buffer_foreach_meta (in_buf, foreach_metadata, &data); |
1552 | 0 | } else { |
1553 | 0 | GST_WARNING_OBJECT (dec, |
1554 | 0 | "Can't copy metadata because input buffers disappeared"); |
1555 | 0 | } |
1556 | 68 | } |
1557 | | |
1558 | 68 | GST_OBJECT_LOCK (dec); |
1559 | 68 | priv->samples += samples; |
1560 | 68 | priv->samples_out += samples; |
1561 | 68 | GST_OBJECT_UNLOCK (dec); |
1562 | | |
1563 | | /* we got data, so note things are looking up */ |
1564 | 68 | if (G_UNLIKELY (dec->priv->error_count)) |
1565 | 0 | dec->priv->error_count = 0; |
1566 | | |
1567 | 68 | ret = gst_audio_decoder_output (dec, buf); |
1568 | | |
1569 | 375 | exit: |
1570 | 375 | g_queue_foreach (&inbufs, (GFunc) gst_buffer_unref, NULL); |
1571 | 375 | g_queue_clear (&inbufs); |
1572 | | |
1573 | 375 | if (is_subframe) |
1574 | 0 | dec->priv->subframe_samples += samples; |
1575 | 375 | else |
1576 | 375 | dec->priv->subframe_samples = 0; |
1577 | | |
1578 | 375 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
1579 | | |
1580 | 375 | return ret; |
1581 | | |
1582 | | /* ERRORS */ |
1583 | 0 | wrong_buffer: |
1584 | 0 | { |
1585 | | /* arguably more of a programming error? */ |
1586 | 0 | GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), |
1587 | 0 | ("buffer size %" G_GSIZE_FORMAT " not a multiple of %d", size, |
1588 | 0 | ctx->info.bpf)); |
1589 | 0 | gst_buffer_unref (buf); |
1590 | 0 | ret = GST_FLOW_ERROR; |
1591 | 0 | goto exit; |
1592 | 68 | } |
1593 | 0 | wrong_samples: |
1594 | 0 | { |
1595 | | /* arguably more of a programming error? */ |
1596 | 0 | GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), |
1597 | 0 | ("GstAudioMeta samples (%" G_GSIZE_FORMAT ") are inconsistent with " |
1598 | 0 | "the buffer size and layout (size/bpf = %" G_GSIZE_FORMAT ")", |
1599 | 0 | meta->samples, size / ctx->info.bpf)); |
1600 | 0 | gst_buffer_unref (buf); |
1601 | 0 | ret = GST_FLOW_ERROR; |
1602 | 0 | goto exit; |
1603 | 68 | } |
1604 | 0 | subframe_without_pending_input_frame: |
1605 | 0 | { |
1606 | | /* arguably more of a programming error? */ |
1607 | 0 | GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), |
1608 | 0 | ("Received decoded subframe, but no pending frame")); |
1609 | 0 | gst_buffer_unref (buf); |
1610 | 0 | ret = GST_FLOW_ERROR; |
1611 | 0 | goto exit; |
1612 | 68 | } |
1613 | 68 | } |
1614 | | |
1615 | | static GstFlowReturn |
1616 | | gst_audio_decoder_handle_frame (GstAudioDecoder * dec, |
1617 | | GstAudioDecoderClass * klass, GstBuffer * buffer) |
1618 | 699 | { |
1619 | | /* Skip decoding and send a GAP instead if |
1620 | | * GST_SEGMENT_FLAG_TRICKMODE_NO_AUDIO is set and we have timestamps |
1621 | | * FIXME: We only do this for forward playback atm, because reverse |
1622 | | * playback would require accumulating GAP events and pushing them |
1623 | | * out in reverse order as for normal audio samples |
1624 | | */ |
1625 | 699 | if (G_UNLIKELY (dec->input_segment.rate > 0.0 |
1626 | 699 | && dec->input_segment.flags & GST_SEGMENT_FLAG_TRICKMODE_NO_AUDIO)) { |
1627 | 0 | if (buffer) { |
1628 | 0 | GstClockTime ts = GST_BUFFER_PTS (buffer); |
1629 | 0 | if (GST_CLOCK_TIME_IS_VALID (ts)) { |
1630 | 0 | GstEvent *event = gst_event_new_gap (ts, GST_BUFFER_DURATION (buffer)); |
1631 | |
|
1632 | 0 | gst_buffer_unref (buffer); |
1633 | 0 | GST_LOG_OBJECT (dec, "Skipping decode in trickmode and sending gap"); |
1634 | 0 | gst_audio_decoder_handle_gap (dec, event); |
1635 | 0 | return GST_FLOW_OK; |
1636 | 0 | } |
1637 | 0 | } |
1638 | 0 | } |
1639 | | |
1640 | 699 | if (G_LIKELY (buffer)) { |
1641 | 376 | gsize size = gst_buffer_get_size (buffer); |
1642 | | /* keep around for admin */ |
1643 | 376 | GST_LOG_OBJECT (dec, |
1644 | 376 | "tracking frame size %" G_GSIZE_FORMAT ", ts %" GST_TIME_FORMAT, size, |
1645 | 376 | GST_TIME_ARGS (GST_BUFFER_PTS (buffer))); |
1646 | 376 | g_queue_push_tail (&dec->priv->frames, buffer); |
1647 | 376 | dec->priv->ctx.delay = dec->priv->frames.length; |
1648 | 376 | GST_OBJECT_LOCK (dec); |
1649 | 376 | dec->priv->bytes_in += size; |
1650 | 376 | GST_OBJECT_UNLOCK (dec); |
1651 | 376 | } else { |
1652 | 323 | GST_LOG_OBJECT (dec, "providing subclass with NULL frame"); |
1653 | 323 | } |
1654 | | |
1655 | 699 | return klass->handle_frame (dec, buffer); |
1656 | 699 | } |
1657 | | |
1658 | | /* maybe subclass configurable instead, but this allows for a whole lot of |
1659 | | * raw samples, so at least quite some encoded ... */ |
1660 | 0 | #define GST_AUDIO_DECODER_MAX_SYNC 10 * 8 * 2 * 1024 |
1661 | | |
1662 | | static GstFlowReturn |
1663 | | gst_audio_decoder_push_buffers (GstAudioDecoder * dec, gboolean force) |
1664 | 699 | { |
1665 | 699 | GstAudioDecoderClass *klass; |
1666 | 699 | GstAudioDecoderPrivate *priv; |
1667 | 699 | GstAudioDecoderContext *ctx; |
1668 | 699 | GstFlowReturn ret = GST_FLOW_OK; |
1669 | 699 | GstBuffer *buffer; |
1670 | 699 | gint av, flush; |
1671 | | |
1672 | 699 | klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
1673 | 699 | priv = dec->priv; |
1674 | 699 | ctx = &dec->priv->ctx; |
1675 | | |
1676 | 699 | g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR); |
1677 | | |
1678 | 699 | av = gst_adapter_available (priv->adapter); |
1679 | 699 | GST_DEBUG_OBJECT (dec, "available: %d", av); |
1680 | | |
1681 | 1.07k | while (ret == GST_FLOW_OK) { |
1682 | | |
1683 | 1.02k | flush = 0; |
1684 | 1.02k | ctx->eos = force; |
1685 | | |
1686 | 1.02k | if (G_LIKELY (av)) { |
1687 | 376 | gint len; |
1688 | 376 | GstClockTime ts; |
1689 | 376 | guint64 distance; |
1690 | | |
1691 | | /* parse if needed */ |
1692 | 376 | if (klass->parse) { |
1693 | 0 | gint offset = 0; |
1694 | | |
1695 | | /* limited (legacy) parsing; avoid whole of baseparse */ |
1696 | 0 | GST_DEBUG_OBJECT (dec, "parsing available: %d", av); |
1697 | | /* piggyback sync state on discont */ |
1698 | 0 | ctx->sync = !priv->discont; |
1699 | 0 | ret = klass->parse (dec, priv->adapter, &offset, &len); |
1700 | |
|
1701 | 0 | g_assert (offset <= av); |
1702 | 0 | if (offset) { |
1703 | | /* jumped a bit */ |
1704 | 0 | GST_DEBUG_OBJECT (dec, "skipped %d; setting DISCONT", offset); |
1705 | 0 | gst_adapter_flush (priv->adapter, offset); |
1706 | 0 | flush = offset; |
1707 | | /* avoid parsing indefinitely */ |
1708 | 0 | priv->sync_flush += offset; |
1709 | 0 | if (priv->sync_flush > GST_AUDIO_DECODER_MAX_SYNC) |
1710 | 0 | goto parse_failed; |
1711 | 0 | } |
1712 | | |
1713 | 0 | if (ret == GST_FLOW_EOS) { |
1714 | 0 | GST_LOG_OBJECT (dec, "no frame yet"); |
1715 | 0 | ret = GST_FLOW_OK; |
1716 | 0 | break; |
1717 | 0 | } else if (ret == GST_FLOW_OK) { |
1718 | 0 | GST_LOG_OBJECT (dec, "frame at offset %d of length %d", offset, len); |
1719 | 0 | g_assert (len); |
1720 | 0 | g_assert (offset + len <= av); |
1721 | 0 | priv->sync_flush = 0; |
1722 | 0 | } else { |
1723 | 0 | break; |
1724 | 0 | } |
1725 | 376 | } else { |
1726 | 376 | len = av; |
1727 | 376 | } |
1728 | | /* track upstream ts, but do not get stuck if nothing new upstream */ |
1729 | 376 | ts = gst_adapter_prev_pts (priv->adapter, &distance); |
1730 | 376 | if (ts != priv->prev_ts || distance <= priv->prev_distance) { |
1731 | 376 | priv->prev_ts = ts; |
1732 | 376 | priv->prev_distance = distance; |
1733 | 376 | } else { |
1734 | 0 | GST_LOG_OBJECT (dec, "ts == prev_ts; discarding"); |
1735 | 0 | ts = GST_CLOCK_TIME_NONE; |
1736 | 0 | } |
1737 | 376 | buffer = gst_adapter_take_buffer (priv->adapter, len); |
1738 | 376 | buffer = gst_buffer_make_writable (buffer); |
1739 | 376 | GST_BUFFER_PTS (buffer) = ts; |
1740 | 376 | flush += len; |
1741 | 376 | priv->force = FALSE; |
1742 | 646 | } else { |
1743 | 646 | if (!force) |
1744 | 323 | break; |
1745 | 323 | if (!priv->drainable) { |
1746 | 0 | priv->drained = TRUE; |
1747 | 0 | break; |
1748 | 0 | } |
1749 | 323 | buffer = NULL; |
1750 | 323 | priv->force = TRUE; |
1751 | 323 | } |
1752 | | |
1753 | 699 | ret = gst_audio_decoder_handle_frame (dec, klass, buffer); |
1754 | | |
1755 | | /* do not keep pushing it ... */ |
1756 | 699 | if (G_UNLIKELY (!av)) { |
1757 | 323 | priv->drained = TRUE; |
1758 | 323 | break; |
1759 | 323 | } |
1760 | | |
1761 | 376 | av -= flush; |
1762 | 376 | g_assert (av >= 0); |
1763 | 376 | } |
1764 | | |
1765 | 699 | GST_LOG_OBJECT (dec, "done pushing to subclass"); |
1766 | 699 | return ret; |
1767 | | |
1768 | | /* ERRORS */ |
1769 | 0 | parse_failed: |
1770 | 0 | { |
1771 | 0 | GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("failed to parse stream")); |
1772 | 0 | return GST_FLOW_ERROR; |
1773 | 699 | } |
1774 | 699 | } |
1775 | | |
1776 | | static GstFlowReturn |
1777 | | gst_audio_decoder_drain (GstAudioDecoder * dec) |
1778 | 750 | { |
1779 | 750 | GstFlowReturn ret; |
1780 | | |
1781 | 750 | if (dec->priv->drained && !dec->priv->gather) |
1782 | 427 | return GST_FLOW_OK; |
1783 | | |
1784 | | /* Apply any pending events before draining, as that |
1785 | | * may update the pending segment info */ |
1786 | 323 | apply_pending_events (dec); |
1787 | | |
1788 | | /* dispatch reverse pending buffers */ |
1789 | | /* chain eventually calls upon drain as well, but by that time |
1790 | | * gather list should be clear, so ok ... */ |
1791 | 323 | if (dec->output_segment.rate < 0.0 && dec->priv->gather) |
1792 | 0 | gst_audio_decoder_chain_reverse (dec, NULL); |
1793 | | /* have subclass give all it can */ |
1794 | 323 | ret = gst_audio_decoder_push_buffers (dec, TRUE); |
1795 | 323 | if (ret != GST_FLOW_OK) { |
1796 | 0 | GST_WARNING_OBJECT (dec, "audio decoder push buffers failed"); |
1797 | 0 | goto drain_failed; |
1798 | 0 | } |
1799 | | /* ensure all output sent */ |
1800 | 323 | ret = gst_audio_decoder_output (dec, NULL); |
1801 | 323 | if (ret != GST_FLOW_OK) |
1802 | 323 | GST_WARNING_OBJECT (dec, "audio decoder output failed"); |
1803 | | |
1804 | 323 | drain_failed: |
1805 | | /* everything should be away now */ |
1806 | 323 | if (dec->priv->frames.length) { |
1807 | | /* not fatal/impossible though if subclass/codec eats stuff */ |
1808 | 0 | GST_WARNING_OBJECT (dec, "still %d frames left after draining", |
1809 | 0 | dec->priv->frames.length); |
1810 | 0 | g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL); |
1811 | 0 | g_queue_clear (&dec->priv->frames); |
1812 | 0 | } |
1813 | | |
1814 | | /* discard (unparsed) leftover */ |
1815 | 323 | gst_adapter_clear (dec->priv->adapter); |
1816 | 323 | return ret; |
1817 | 323 | } |
1818 | | |
1819 | | /* hard == FLUSH, otherwise discont */ |
1820 | | static GstFlowReturn |
1821 | | gst_audio_decoder_flush (GstAudioDecoder * dec, gboolean hard) |
1822 | 553 | { |
1823 | 553 | GstAudioDecoderClass *klass; |
1824 | 553 | GstFlowReturn ret = GST_FLOW_OK; |
1825 | | |
1826 | 553 | klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
1827 | | |
1828 | 553 | GST_LOG_OBJECT (dec, "flush hard %d", hard); |
1829 | | |
1830 | 553 | if (!hard) { |
1831 | 553 | ret = gst_audio_decoder_drain (dec); |
1832 | 553 | } else { |
1833 | 0 | gst_audio_decoder_clear_queues (dec); |
1834 | 0 | gst_segment_init (&dec->input_segment, GST_FORMAT_TIME); |
1835 | 0 | gst_segment_init (&dec->output_segment, GST_FORMAT_TIME); |
1836 | 0 | dec->priv->error_count = 0; |
1837 | 0 | } |
1838 | | /* only bother subclass with flushing if known it is already alive |
1839 | | * and kicking out stuff */ |
1840 | 553 | if (klass->flush && dec->priv->samples_out > 0) |
1841 | 0 | klass->flush (dec, hard); |
1842 | | /* and get (re)set for the sequel */ |
1843 | 553 | gst_audio_decoder_reset (dec, FALSE); |
1844 | | |
1845 | 553 | return ret; |
1846 | 553 | } |
1847 | | |
1848 | | static GstFlowReturn |
1849 | | gst_audio_decoder_chain_forward (GstAudioDecoder * dec, GstBuffer * buffer) |
1850 | 420 | { |
1851 | 420 | GstFlowReturn ret = GST_FLOW_OK; |
1852 | | |
1853 | | /* discard silly case, though maybe ts may be of value ?? */ |
1854 | 420 | if (G_UNLIKELY (gst_buffer_get_size (buffer) == 0)) { |
1855 | 44 | GST_DEBUG_OBJECT (dec, "discarding empty buffer"); |
1856 | 44 | gst_buffer_unref (buffer); |
1857 | 44 | goto exit; |
1858 | 44 | } |
1859 | | |
1860 | | /* grab buffer */ |
1861 | 376 | gst_adapter_push (dec->priv->adapter, buffer); |
1862 | 376 | buffer = NULL; |
1863 | | /* new stuff, so we can push subclass again */ |
1864 | 376 | dec->priv->drained = FALSE; |
1865 | | |
1866 | | /* hand to subclass */ |
1867 | 376 | ret = gst_audio_decoder_push_buffers (dec, FALSE); |
1868 | | |
1869 | 420 | exit: |
1870 | 420 | GST_LOG_OBJECT (dec, "chain-done"); |
1871 | 420 | return ret; |
1872 | 376 | } |
1873 | | |
1874 | | static void |
1875 | | gst_audio_decoder_clear_queues (GstAudioDecoder * dec) |
1876 | 258 | { |
1877 | 258 | GstAudioDecoderPrivate *priv = dec->priv; |
1878 | | |
1879 | 258 | g_list_foreach (priv->queued, (GFunc) gst_mini_object_unref, NULL); |
1880 | 258 | g_list_free (priv->queued); |
1881 | 258 | priv->queued = NULL; |
1882 | 258 | g_list_foreach (priv->gather, (GFunc) gst_mini_object_unref, NULL); |
1883 | 258 | g_list_free (priv->gather); |
1884 | 258 | priv->gather = NULL; |
1885 | 258 | g_list_foreach (priv->decode, (GFunc) gst_mini_object_unref, NULL); |
1886 | 258 | g_list_free (priv->decode); |
1887 | 258 | priv->decode = NULL; |
1888 | 258 | } |
1889 | | |
1890 | | /* |
1891 | | * Input: |
1892 | | * Buffer decoding order: 7 8 9 4 5 6 3 1 2 EOS |
1893 | | * Discont flag: D D D D |
1894 | | * |
1895 | | * - Each Discont marks a discont in the decoding order. |
1896 | | * |
1897 | | * for vorbis, each buffer is a keyframe when we have the previous |
1898 | | * buffer. This means that to decode buffer 7, we need buffer 6, which |
1899 | | * arrives out of order. |
1900 | | * |
1901 | | * we first gather buffers in the gather queue until we get a DISCONT. We |
1902 | | * prepend each incoming buffer so that they are in reversed order. |
1903 | | * |
1904 | | * gather queue: 9 8 7 |
1905 | | * decode queue: |
1906 | | * output queue: |
1907 | | * |
1908 | | * When a DISCONT is received (buffer 4), we move the gather queue to the |
1909 | | * decode queue. This is simply done be taking the head of the gather queue |
1910 | | * and prepending it to the decode queue. This yields: |
1911 | | * |
1912 | | * gather queue: |
1913 | | * decode queue: 7 8 9 |
1914 | | * output queue: |
1915 | | * |
1916 | | * Then we decode each buffer in the decode queue in order and put the output |
1917 | | * buffer in the output queue. The first buffer (7) will not produce any output |
1918 | | * because it needs the previous buffer (6) which did not arrive yet. This |
1919 | | * yields: |
1920 | | * |
1921 | | * gather queue: |
1922 | | * decode queue: 7 8 9 |
1923 | | * output queue: 9 8 |
1924 | | * |
1925 | | * Then we remove the consumed buffers from the decode queue. Buffer 7 is not |
1926 | | * completely consumed, we need to keep it around for when we receive buffer |
1927 | | * 6. This yields: |
1928 | | * |
1929 | | * gather queue: |
1930 | | * decode queue: 7 |
1931 | | * output queue: 9 8 |
1932 | | * |
1933 | | * Then we accumulate more buffers: |
1934 | | * |
1935 | | * gather queue: 6 5 4 |
1936 | | * decode queue: 7 |
1937 | | * output queue: |
1938 | | * |
1939 | | * prepending to the decode queue on DISCONT yields: |
1940 | | * |
1941 | | * gather queue: |
1942 | | * decode queue: 4 5 6 7 |
1943 | | * output queue: |
1944 | | * |
1945 | | * after decoding and keeping buffer 4: |
1946 | | * |
1947 | | * gather queue: |
1948 | | * decode queue: 4 |
1949 | | * output queue: 7 6 5 |
1950 | | * |
1951 | | * Etc.. |
1952 | | */ |
1953 | | static GstFlowReturn |
1954 | | gst_audio_decoder_flush_decode (GstAudioDecoder * dec) |
1955 | 0 | { |
1956 | 0 | GstAudioDecoderPrivate *priv = dec->priv; |
1957 | 0 | GstFlowReturn res = GST_FLOW_OK; |
1958 | 0 | GstClockTime timestamp; |
1959 | 0 | GList *walk; |
1960 | |
|
1961 | 0 | walk = priv->decode; |
1962 | |
|
1963 | 0 | GST_DEBUG_OBJECT (dec, "flushing buffers to decoder"); |
1964 | | |
1965 | | /* clear buffer and decoder state */ |
1966 | 0 | gst_audio_decoder_flush (dec, FALSE); |
1967 | |
|
1968 | 0 | while (walk) { |
1969 | 0 | GList *next; |
1970 | 0 | GstBuffer *buf = GST_BUFFER_CAST (walk->data); |
1971 | |
|
1972 | 0 | GST_DEBUG_OBJECT (dec, "decoding buffer %p, ts %" GST_TIME_FORMAT, |
1973 | 0 | buf, GST_TIME_ARGS (GST_BUFFER_PTS (buf))); |
1974 | |
|
1975 | 0 | next = g_list_next (walk); |
1976 | | /* decode buffer, resulting data prepended to output queue */ |
1977 | 0 | gst_buffer_ref (buf); |
1978 | 0 | res = gst_audio_decoder_chain_forward (dec, buf); |
1979 | | |
1980 | | /* if we generated output, we can discard the buffer, else we |
1981 | | * keep it in the queue */ |
1982 | 0 | if (priv->queued) { |
1983 | 0 | GST_DEBUG_OBJECT (dec, "decoded buffer to %p", priv->queued->data); |
1984 | 0 | priv->decode = g_list_delete_link (priv->decode, walk); |
1985 | 0 | gst_buffer_unref (buf); |
1986 | 0 | } else { |
1987 | 0 | GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping"); |
1988 | 0 | } |
1989 | 0 | walk = next; |
1990 | 0 | } |
1991 | | |
1992 | | /* drain any aggregation (or otherwise) leftover */ |
1993 | 0 | gst_audio_decoder_drain (dec); |
1994 | | |
1995 | | /* now send queued data downstream */ |
1996 | 0 | timestamp = GST_CLOCK_TIME_NONE; |
1997 | 0 | while (priv->queued) { |
1998 | 0 | GstBuffer *buf = GST_BUFFER_CAST (priv->queued->data); |
1999 | 0 | GstClockTime duration; |
2000 | |
|
2001 | 0 | duration = GST_BUFFER_DURATION (buf); |
2002 | | |
2003 | | /* duration should always be valid for raw audio */ |
2004 | 0 | g_assert (GST_CLOCK_TIME_IS_VALID (duration)); |
2005 | | |
2006 | | /* interpolate (backward) if needed */ |
2007 | 0 | if (G_LIKELY (timestamp != -1)) { |
2008 | 0 | if (timestamp > duration) |
2009 | 0 | timestamp -= duration; |
2010 | 0 | else |
2011 | 0 | timestamp = 0; |
2012 | 0 | } |
2013 | |
|
2014 | 0 | if (!GST_BUFFER_PTS_IS_VALID (buf)) { |
2015 | 0 | GST_LOG_OBJECT (dec, "applying reverse interpolated ts %" |
2016 | 0 | GST_TIME_FORMAT, GST_TIME_ARGS (timestamp)); |
2017 | 0 | GST_BUFFER_PTS (buf) = timestamp; |
2018 | 0 | } else { |
2019 | | /* track otherwise */ |
2020 | 0 | timestamp = GST_BUFFER_PTS (buf); |
2021 | 0 | GST_LOG_OBJECT (dec, "tracking ts %" GST_TIME_FORMAT, |
2022 | 0 | GST_TIME_ARGS (timestamp)); |
2023 | 0 | } |
2024 | |
|
2025 | 0 | if (G_LIKELY (res == GST_FLOW_OK)) { |
2026 | 0 | GST_DEBUG_OBJECT (dec, "pushing buffer %p of size %" G_GSIZE_FORMAT ", " |
2027 | 0 | "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf, |
2028 | 0 | gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_PTS (buf)), |
2029 | 0 | GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); |
2030 | | /* should be already, but let's be sure */ |
2031 | 0 | buf = gst_buffer_make_writable (buf); |
2032 | | /* avoid stray DISCONT from forward processing, |
2033 | | * which have no meaning in reverse pushing */ |
2034 | 0 | GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT); |
2035 | 0 | res = gst_audio_decoder_push_forward (dec, buf); |
2036 | 0 | } else { |
2037 | 0 | gst_buffer_unref (buf); |
2038 | 0 | } |
2039 | |
|
2040 | 0 | priv->queued = g_list_delete_link (priv->queued, priv->queued); |
2041 | 0 | } |
2042 | | |
2043 | 0 | return res; |
2044 | 0 | } |
2045 | | |
2046 | | static GstFlowReturn |
2047 | | gst_audio_decoder_chain_reverse (GstAudioDecoder * dec, GstBuffer * buf) |
2048 | 0 | { |
2049 | 0 | GstAudioDecoderPrivate *priv = dec->priv; |
2050 | 0 | GstFlowReturn result = GST_FLOW_OK; |
2051 | | |
2052 | | /* if we have a discont, move buffers to the decode list */ |
2053 | 0 | if (!buf || GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT)) { |
2054 | 0 | GST_DEBUG_OBJECT (dec, "received discont"); |
2055 | 0 | while (priv->gather) { |
2056 | 0 | GstBuffer *gbuf; |
2057 | |
|
2058 | 0 | gbuf = GST_BUFFER_CAST (priv->gather->data); |
2059 | | /* remove from the gather list */ |
2060 | 0 | priv->gather = g_list_delete_link (priv->gather, priv->gather); |
2061 | | /* copy to decode queue */ |
2062 | 0 | priv->decode = g_list_prepend (priv->decode, gbuf); |
2063 | 0 | } |
2064 | | /* decode stuff in the decode queue */ |
2065 | 0 | gst_audio_decoder_flush_decode (dec); |
2066 | 0 | } |
2067 | |
|
2068 | 0 | if (G_LIKELY (buf)) { |
2069 | 0 | GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", " |
2070 | 0 | "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf, |
2071 | 0 | gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_PTS (buf)), |
2072 | 0 | GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); |
2073 | | |
2074 | | /* add buffer to gather queue */ |
2075 | 0 | priv->gather = g_list_prepend (priv->gather, buf); |
2076 | 0 | } |
2077 | |
|
2078 | 0 | return result; |
2079 | 0 | } |
2080 | | |
2081 | | static GstFlowReturn |
2082 | | gst_audio_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer) |
2083 | 420 | { |
2084 | 420 | GstAudioDecoder *dec; |
2085 | 420 | GstFlowReturn ret; |
2086 | | |
2087 | 420 | dec = GST_AUDIO_DECODER (parent); |
2088 | | |
2089 | 420 | GST_LOG_OBJECT (dec, |
2090 | 420 | "received buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT |
2091 | 420 | ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buffer), |
2092 | 420 | GST_TIME_ARGS (GST_BUFFER_PTS (buffer)), |
2093 | 420 | GST_TIME_ARGS (GST_BUFFER_DURATION (buffer))); |
2094 | | |
2095 | 420 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
2096 | | |
2097 | 420 | if (G_UNLIKELY (dec->priv->ctx.input_caps == NULL && dec->priv->needs_format)) |
2098 | 0 | goto not_negotiated; |
2099 | | |
2100 | 420 | dec->priv->ctx.had_input_data = TRUE; |
2101 | | |
2102 | 420 | if (!dec->priv->expecting_discont_buf && |
2103 | 420 | GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) { |
2104 | 386 | gint64 samples, ts; |
2105 | | |
2106 | | /* track present position */ |
2107 | 386 | ts = dec->priv->base_ts; |
2108 | 386 | samples = dec->priv->samples; |
2109 | | |
2110 | 386 | GST_DEBUG_OBJECT (dec, "handling discont"); |
2111 | 386 | gst_audio_decoder_flush (dec, FALSE); |
2112 | 386 | dec->priv->discont = TRUE; |
2113 | | |
2114 | | /* buffer may claim DISCONT loudly, if it can't tell us where we are now, |
2115 | | * we'll stick to where we were ... |
2116 | | * Particularly useful/needed for upstream BYTE based */ |
2117 | 386 | if (dec->input_segment.rate > 0.0 && !GST_BUFFER_PTS_IS_VALID (buffer)) { |
2118 | 235 | GST_DEBUG_OBJECT (dec, "... but restoring previous ts tracking"); |
2119 | 235 | dec->priv->base_ts = ts; |
2120 | 235 | dec->priv->samples = samples; |
2121 | 235 | } |
2122 | 386 | } |
2123 | 420 | dec->priv->expecting_discont_buf = FALSE; |
2124 | | |
2125 | 420 | if (dec->input_segment.rate > 0.0) |
2126 | 420 | ret = gst_audio_decoder_chain_forward (dec, buffer); |
2127 | 0 | else |
2128 | 0 | ret = gst_audio_decoder_chain_reverse (dec, buffer); |
2129 | | |
2130 | 420 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2131 | | |
2132 | 420 | return ret; |
2133 | | |
2134 | | /* ERRORS */ |
2135 | 0 | not_negotiated: |
2136 | 0 | { |
2137 | 0 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2138 | 0 | GST_ELEMENT_ERROR (dec, CORE, NEGOTIATION, (NULL), |
2139 | 0 | ("decoder not initialized")); |
2140 | 0 | gst_buffer_unref (buffer); |
2141 | 0 | return GST_FLOW_NOT_NEGOTIATED; |
2142 | 420 | } |
2143 | 420 | } |
2144 | | |
2145 | | /* perform upstream byte <-> time conversion (duration, seeking) |
2146 | | * if subclass allows and if enough data for moderately decent conversion */ |
2147 | | static inline gboolean |
2148 | | gst_audio_decoder_do_byte (GstAudioDecoder * dec) |
2149 | 2 | { |
2150 | 2 | gboolean ret; |
2151 | | |
2152 | 2 | GST_OBJECT_LOCK (dec); |
2153 | 2 | ret = dec->priv->ctx.do_estimate_rate && dec->priv->ctx.info.bpf && |
2154 | 0 | dec->priv->ctx.info.rate <= dec->priv->samples_out; |
2155 | 2 | GST_OBJECT_UNLOCK (dec); |
2156 | | |
2157 | 2 | return ret; |
2158 | 2 | } |
2159 | | |
2160 | | /* Must be called holding the GST_AUDIO_DECODER_STREAM_LOCK */ |
2161 | | static gboolean |
2162 | | gst_audio_decoder_negotiate_default_caps (GstAudioDecoder * dec) |
2163 | 0 | { |
2164 | 0 | GstCaps *caps, *templcaps; |
2165 | 0 | gint i; |
2166 | 0 | gint channels = 0; |
2167 | 0 | gint rate; |
2168 | 0 | guint64 channel_mask = 0; |
2169 | 0 | gint caps_size; |
2170 | 0 | GstStructure *structure; |
2171 | 0 | GstAudioInfo info; |
2172 | |
|
2173 | 0 | templcaps = gst_pad_get_pad_template_caps (dec->srcpad); |
2174 | 0 | caps = gst_pad_peer_query_caps (dec->srcpad, templcaps); |
2175 | 0 | if (caps) |
2176 | 0 | gst_caps_unref (templcaps); |
2177 | 0 | else |
2178 | 0 | caps = templcaps; |
2179 | 0 | templcaps = NULL; |
2180 | |
|
2181 | 0 | if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps)) |
2182 | 0 | goto caps_error; |
2183 | | |
2184 | 0 | GST_LOG_OBJECT (dec, "peer caps %" GST_PTR_FORMAT, caps); |
2185 | | |
2186 | | /* before fixating, try to use whatever upstream provided */ |
2187 | 0 | caps = gst_caps_make_writable (caps); |
2188 | 0 | caps_size = gst_caps_get_size (caps); |
2189 | 0 | if (dec->priv->ctx.input_caps) { |
2190 | 0 | GstCaps *sinkcaps = dec->priv->ctx.input_caps; |
2191 | 0 | GstStructure *structure = gst_caps_get_structure (sinkcaps, 0); |
2192 | |
|
2193 | 0 | if (gst_structure_get_int (structure, "rate", &rate)) { |
2194 | 0 | for (i = 0; i < caps_size; i++) { |
2195 | 0 | gst_structure_set (gst_caps_get_structure (caps, i), "rate", |
2196 | 0 | G_TYPE_INT, rate, NULL); |
2197 | 0 | } |
2198 | 0 | } |
2199 | |
|
2200 | 0 | if (gst_structure_get_int (structure, "channels", &channels)) { |
2201 | 0 | for (i = 0; i < caps_size; i++) { |
2202 | 0 | gst_structure_set (gst_caps_get_structure (caps, i), "channels", |
2203 | 0 | G_TYPE_INT, channels, NULL); |
2204 | 0 | } |
2205 | 0 | } |
2206 | |
|
2207 | 0 | if (gst_structure_get (structure, "channel-mask", GST_TYPE_BITMASK, |
2208 | 0 | &channel_mask, NULL)) { |
2209 | 0 | for (i = 0; i < caps_size; i++) { |
2210 | 0 | gst_structure_set (gst_caps_get_structure (caps, i), "channel-mask", |
2211 | 0 | GST_TYPE_BITMASK, channel_mask, NULL); |
2212 | 0 | } |
2213 | 0 | } |
2214 | 0 | } |
2215 | |
|
2216 | 0 | for (i = 0; i < caps_size; i++) { |
2217 | 0 | structure = gst_caps_get_structure (caps, i); |
2218 | 0 | if (gst_structure_has_field (structure, "channels")) |
2219 | 0 | gst_structure_fixate_field_nearest_int (structure, |
2220 | 0 | "channels", GST_AUDIO_DEF_CHANNELS); |
2221 | 0 | else |
2222 | 0 | gst_structure_set (structure, "channels", G_TYPE_INT, |
2223 | 0 | GST_AUDIO_DEF_CHANNELS, NULL); |
2224 | 0 | if (gst_structure_has_field (structure, "rate")) |
2225 | 0 | gst_structure_fixate_field_nearest_int (structure, |
2226 | 0 | "rate", GST_AUDIO_DEF_RATE); |
2227 | 0 | else |
2228 | 0 | gst_structure_set (structure, "rate", G_TYPE_INT, GST_AUDIO_DEF_RATE, |
2229 | 0 | NULL); |
2230 | 0 | } |
2231 | 0 | caps = gst_caps_fixate (caps); |
2232 | 0 | structure = gst_caps_get_structure (caps, 0); |
2233 | | |
2234 | | /* Need to add a channel-mask if channels > 2 */ |
2235 | 0 | gst_structure_get_int (structure, "channels", &channels); |
2236 | 0 | if (channels > 2 && !gst_structure_has_field (structure, "channel-mask")) { |
2237 | 0 | channel_mask = gst_audio_channel_get_fallback_mask (channels); |
2238 | 0 | if (channel_mask != 0) { |
2239 | 0 | gst_structure_set (structure, "channel-mask", |
2240 | 0 | GST_TYPE_BITMASK, channel_mask, NULL); |
2241 | 0 | } else { |
2242 | 0 | GST_WARNING_OBJECT (dec, "No default channel-mask for %d channels", |
2243 | 0 | channels); |
2244 | 0 | } |
2245 | 0 | } |
2246 | |
|
2247 | 0 | if (!caps || !gst_audio_info_from_caps (&info, caps)) |
2248 | 0 | goto caps_error; |
2249 | | |
2250 | 0 | GST_OBJECT_LOCK (dec); |
2251 | 0 | dec->priv->ctx.info = info; |
2252 | 0 | dec->priv->ctx.caps = caps; |
2253 | 0 | GST_OBJECT_UNLOCK (dec); |
2254 | |
|
2255 | 0 | GST_INFO_OBJECT (dec, |
2256 | 0 | "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps); |
2257 | |
|
2258 | 0 | return TRUE; |
2259 | | |
2260 | 0 | caps_error: |
2261 | 0 | { |
2262 | 0 | if (caps) |
2263 | 0 | gst_caps_unref (caps); |
2264 | 0 | return FALSE; |
2265 | 0 | } |
2266 | 0 | } |
2267 | | |
2268 | | static gboolean |
2269 | | gst_audio_decoder_handle_gap (GstAudioDecoder * dec, GstEvent * event) |
2270 | 0 | { |
2271 | 0 | gboolean ret; |
2272 | 0 | GstClockTime timestamp, duration; |
2273 | 0 | gboolean needs_reconfigure = FALSE; |
2274 | | |
2275 | | /* Ensure we have caps first */ |
2276 | 0 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
2277 | 0 | if (!GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info)) { |
2278 | 0 | if (!gst_audio_decoder_negotiate_default_caps (dec)) { |
2279 | 0 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2280 | 0 | GST_ELEMENT_ERROR (dec, STREAM, FORMAT, (NULL), |
2281 | 0 | ("Decoder output not negotiated before GAP event.")); |
2282 | 0 | gst_event_unref (event); |
2283 | 0 | return FALSE; |
2284 | 0 | } |
2285 | 0 | needs_reconfigure = TRUE; |
2286 | 0 | } |
2287 | 0 | needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad) |
2288 | 0 | || needs_reconfigure; |
2289 | 0 | if (G_UNLIKELY (dec->priv->ctx.output_format_changed || needs_reconfigure)) { |
2290 | 0 | if (!gst_audio_decoder_negotiate_unlocked (dec)) { |
2291 | 0 | GST_WARNING_OBJECT (dec, "Failed to negotiate with downstream"); |
2292 | 0 | gst_pad_mark_reconfigure (dec->srcpad); |
2293 | 0 | } |
2294 | 0 | } |
2295 | 0 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2296 | |
|
2297 | 0 | gst_event_parse_gap (event, ×tamp, &duration); |
2298 | | |
2299 | | /* time progressed without data, see if we can fill the gap with |
2300 | | * some concealment data */ |
2301 | 0 | GST_DEBUG_OBJECT (dec, |
2302 | 0 | "gap event: plc %d, do_plc %d, position %" GST_TIME_FORMAT |
2303 | 0 | " duration %" GST_TIME_FORMAT, |
2304 | 0 | dec->priv->plc, dec->priv->ctx.do_plc, |
2305 | 0 | GST_TIME_ARGS (timestamp), GST_TIME_ARGS (duration)); |
2306 | |
|
2307 | 0 | if (dec->priv->plc && dec->priv->ctx.do_plc && dec->input_segment.rate > 0.0) { |
2308 | 0 | GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
2309 | 0 | GstBuffer *buf; |
2310 | | |
2311 | | /* hand subclass empty frame with duration that needs covering */ |
2312 | 0 | buf = gst_buffer_new (); |
2313 | 0 | GST_BUFFER_PTS (buf) = timestamp; |
2314 | 0 | GST_BUFFER_DURATION (buf) = duration; |
2315 | | /* best effort, not much error handling */ |
2316 | 0 | gst_audio_decoder_handle_frame (dec, klass, buf); |
2317 | 0 | ret = TRUE; |
2318 | 0 | dec->priv->expecting_discont_buf = TRUE; |
2319 | 0 | gst_event_unref (event); |
2320 | 0 | } else { |
2321 | 0 | GstFlowReturn flowret; |
2322 | | |
2323 | | /* sub-class doesn't know how to handle empty buffers, |
2324 | | * so just try sending GAP downstream */ |
2325 | 0 | flowret = check_pending_reconfigure (dec); |
2326 | 0 | if (flowret == GST_FLOW_OK) { |
2327 | 0 | send_pending_events (dec); |
2328 | 0 | ret = gst_audio_decoder_push_event (dec, event); |
2329 | 0 | } else { |
2330 | 0 | ret = FALSE; |
2331 | 0 | gst_event_unref (event); |
2332 | 0 | } |
2333 | 0 | } |
2334 | 0 | return ret; |
2335 | 0 | } |
2336 | | |
2337 | | static GList * |
2338 | | _flush_events (GstPad * pad, GList * events) |
2339 | 0 | { |
2340 | 0 | GList *tmp; |
2341 | |
|
2342 | 0 | for (tmp = events; tmp; tmp = tmp->next) { |
2343 | 0 | if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS && |
2344 | 0 | GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT && |
2345 | 0 | GST_EVENT_IS_STICKY (tmp->data)) { |
2346 | 0 | gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data)); |
2347 | 0 | } |
2348 | 0 | gst_event_unref (tmp->data); |
2349 | 0 | } |
2350 | 0 | g_list_free (events); |
2351 | |
|
2352 | 0 | return NULL; |
2353 | 0 | } |
2354 | | |
2355 | | static gboolean |
2356 | | gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec, GstEvent * event) |
2357 | 602 | { |
2358 | 602 | gboolean ret; |
2359 | | |
2360 | 602 | switch (GST_EVENT_TYPE (event)) { |
2361 | 167 | case GST_EVENT_STREAM_START: |
2362 | 167 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
2363 | | /* finish any data in current segment and clear the decoder |
2364 | | * to be ready for new stream data */ |
2365 | 167 | gst_audio_decoder_drain (dec); |
2366 | 167 | gst_audio_decoder_flush (dec, FALSE); |
2367 | | |
2368 | 167 | GST_DEBUG_OBJECT (dec, "received STREAM_START. Clearing taglist"); |
2369 | | /* Flush upstream tags after a STREAM_START */ |
2370 | 167 | if (dec->priv->upstream_tags) { |
2371 | 0 | gst_tag_list_unref (dec->priv->upstream_tags); |
2372 | 0 | dec->priv->upstream_tags = NULL; |
2373 | 0 | dec->priv->taglist_changed = TRUE; |
2374 | 0 | } |
2375 | 167 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2376 | | |
2377 | 167 | ret = gst_audio_decoder_push_event (dec, event); |
2378 | 167 | break; |
2379 | 83 | case GST_EVENT_SEGMENT: |
2380 | 83 | { |
2381 | 83 | GstSegment seg; |
2382 | 83 | GstFormat format; |
2383 | | |
2384 | 83 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
2385 | 83 | gst_event_copy_segment (event, &seg); |
2386 | | |
2387 | 83 | format = seg.format; |
2388 | 83 | if (format == GST_FORMAT_TIME) { |
2389 | 83 | GST_DEBUG_OBJECT (dec, "received TIME SEGMENT %" GST_SEGMENT_FORMAT, |
2390 | 83 | &seg); |
2391 | 83 | } else { |
2392 | 0 | gint64 nstart; |
2393 | 0 | GST_DEBUG_OBJECT (dec, "received SEGMENT %" GST_SEGMENT_FORMAT, &seg); |
2394 | | /* handle newsegment resulting from legacy simple seeking */ |
2395 | | /* note that we need to convert this whether or not enough data |
2396 | | * to handle initial newsegment */ |
2397 | 0 | if (dec->priv->ctx.do_estimate_rate && |
2398 | 0 | gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, seg.start, |
2399 | 0 | GST_FORMAT_TIME, &nstart)) { |
2400 | | /* best attempt convert */ |
2401 | | /* as these are only estimates, stop is kept open-ended to avoid |
2402 | | * premature cutting */ |
2403 | 0 | GST_DEBUG_OBJECT (dec, "converted to TIME start %" GST_TIME_FORMAT, |
2404 | 0 | GST_TIME_ARGS (nstart)); |
2405 | 0 | seg.format = GST_FORMAT_TIME; |
2406 | 0 | seg.start = nstart; |
2407 | 0 | seg.time = nstart; |
2408 | 0 | seg.stop = GST_CLOCK_TIME_NONE; |
2409 | | /* replace event */ |
2410 | 0 | gst_event_unref (event); |
2411 | 0 | event = gst_event_new_segment (&seg); |
2412 | 0 | } else { |
2413 | 0 | GST_DEBUG_OBJECT (dec, "unsupported format; ignoring"); |
2414 | 0 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2415 | 0 | gst_event_unref (event); |
2416 | 0 | ret = FALSE; |
2417 | 0 | break; |
2418 | 0 | } |
2419 | 0 | } |
2420 | | |
2421 | | /* prepare for next segment */ |
2422 | | /* Use the segment start as a base timestamp |
2423 | | * in case upstream does not come up with anything better |
2424 | | * (e.g. upstream BYTE) */ |
2425 | 83 | if (format != GST_FORMAT_TIME) { |
2426 | 0 | dec->priv->base_ts = seg.start; |
2427 | 0 | dec->priv->samples = 0; |
2428 | 0 | } |
2429 | | |
2430 | | /* Update the decode flags in the segment if we have an instant-rate |
2431 | | * override active */ |
2432 | 83 | GST_OBJECT_LOCK (dec); |
2433 | 83 | if (dec->priv->decode_flags_override) { |
2434 | 0 | seg.flags &= ~GST_SEGMENT_INSTANT_FLAGS; |
2435 | 0 | seg.flags |= dec->priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS; |
2436 | 0 | } |
2437 | | |
2438 | | /* and follow along with segment */ |
2439 | 83 | dec->priv->in_out_segment_sync = FALSE; |
2440 | 83 | dec->input_segment = seg; |
2441 | 83 | GST_OBJECT_UNLOCK (dec); |
2442 | | |
2443 | 83 | dec->priv->pending_events = |
2444 | 83 | g_list_append (dec->priv->pending_events, event); |
2445 | 83 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2446 | | |
2447 | 83 | ret = TRUE; |
2448 | 83 | break; |
2449 | 83 | } |
2450 | 0 | case GST_EVENT_INSTANT_RATE_CHANGE: |
2451 | 0 | { |
2452 | 0 | GstSegmentFlags flags; |
2453 | 0 | GstSegment *seg; |
2454 | |
|
2455 | 0 | gst_event_parse_instant_rate_change (event, NULL, &flags); |
2456 | |
|
2457 | 0 | GST_OBJECT_LOCK (dec); |
2458 | 0 | dec->priv->decode_flags_override = TRUE; |
2459 | 0 | dec->priv->decode_flags = flags; |
2460 | | |
2461 | | /* Update the input segment flags */ |
2462 | 0 | seg = &dec->input_segment; |
2463 | 0 | seg->flags &= ~GST_SEGMENT_INSTANT_FLAGS; |
2464 | 0 | seg->flags |= dec->priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS; |
2465 | 0 | GST_OBJECT_UNLOCK (dec); |
2466 | | |
2467 | | /* Forward downstream */ |
2468 | 0 | ret = gst_pad_event_default (dec->sinkpad, GST_OBJECT_CAST (dec), event); |
2469 | 0 | break; |
2470 | 83 | } |
2471 | 0 | case GST_EVENT_GAP: |
2472 | 0 | ret = gst_audio_decoder_handle_gap (dec, event); |
2473 | 0 | break; |
2474 | 0 | case GST_EVENT_FLUSH_STOP: |
2475 | 0 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
2476 | | /* prepare for fresh start */ |
2477 | 0 | gst_audio_decoder_flush (dec, TRUE); |
2478 | |
|
2479 | 0 | dec->priv->pending_events = _flush_events (dec->srcpad, |
2480 | 0 | dec->priv->pending_events); |
2481 | 0 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2482 | | |
2483 | | /* Forward FLUSH_STOP, it is expected to be forwarded immediately |
2484 | | * and no buffers are queued anyway. */ |
2485 | 0 | ret = gst_audio_decoder_push_event (dec, event); |
2486 | 0 | break; |
2487 | | |
2488 | 0 | case GST_EVENT_SEGMENT_DONE: |
2489 | 0 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
2490 | 0 | gst_audio_decoder_drain (dec); |
2491 | 0 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2492 | | |
2493 | | /* Forward SEGMENT_DONE because no buffer or serialized event might come after |
2494 | | * SEGMENT_DONE and nothing could trigger another _finish_frame() call. */ |
2495 | 0 | if (dec->priv->pending_events) |
2496 | 0 | send_pending_events (dec); |
2497 | 0 | ret = gst_audio_decoder_push_event (dec, event); |
2498 | 0 | break; |
2499 | | |
2500 | 30 | case GST_EVENT_EOS: |
2501 | 30 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
2502 | 30 | gst_audio_decoder_drain (dec); |
2503 | 30 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2504 | | |
2505 | 30 | if (dec->priv->ctx.had_input_data && !dec->priv->ctx.had_output_data) { |
2506 | 13 | GST_ELEMENT_ERROR (dec, STREAM, DECODE, |
2507 | 13 | ("No valid frames decoded before end of stream"), |
2508 | 13 | ("no valid frames found")); |
2509 | 13 | } |
2510 | | |
2511 | | /* Forward EOS because no buffer or serialized event will come after |
2512 | | * EOS and nothing could trigger another _finish_frame() call. */ |
2513 | 30 | if (dec->priv->pending_events) |
2514 | 13 | send_pending_events (dec); |
2515 | 30 | ret = gst_audio_decoder_push_event (dec, event); |
2516 | 30 | break; |
2517 | | |
2518 | 167 | case GST_EVENT_CAPS: |
2519 | 167 | { |
2520 | 167 | GstCaps *caps; |
2521 | | |
2522 | 167 | gst_event_parse_caps (event, &caps); |
2523 | 167 | ret = gst_audio_decoder_sink_setcaps (dec, caps); |
2524 | 167 | gst_event_unref (event); |
2525 | 167 | break; |
2526 | 83 | } |
2527 | 155 | case GST_EVENT_TAG: |
2528 | 155 | { |
2529 | 155 | GstTagList *tags; |
2530 | | |
2531 | 155 | gst_event_parse_tag (event, &tags); |
2532 | | |
2533 | 155 | if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) { |
2534 | 72 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
2535 | 72 | if (dec->priv->upstream_tags != tags) { |
2536 | 72 | if (dec->priv->upstream_tags) |
2537 | 0 | gst_tag_list_unref (dec->priv->upstream_tags); |
2538 | 72 | dec->priv->upstream_tags = gst_tag_list_ref (tags); |
2539 | 72 | GST_INFO_OBJECT (dec, "upstream stream tags: %" GST_PTR_FORMAT, tags); |
2540 | 72 | } |
2541 | 72 | gst_event_unref (event); |
2542 | 72 | event = gst_audio_decoder_create_merged_tags_event (dec); |
2543 | 72 | dec->priv->taglist_changed = FALSE; |
2544 | 72 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2545 | | |
2546 | | /* No tags, go out of here instead of fall through */ |
2547 | 72 | if (!event) { |
2548 | 0 | ret = TRUE; |
2549 | 0 | break; |
2550 | 0 | } |
2551 | 72 | } |
2552 | 155 | } |
2553 | | /* FALLTHROUGH */ |
2554 | 155 | default: |
2555 | 155 | if (!GST_EVENT_IS_SERIALIZED (event)) { |
2556 | 0 | ret = |
2557 | 0 | gst_pad_event_default (dec->sinkpad, GST_OBJECT_CAST (dec), event); |
2558 | 155 | } else { |
2559 | 155 | GST_DEBUG_OBJECT (dec, "Enqueuing event %d, %s", GST_EVENT_TYPE (event), |
2560 | 155 | GST_EVENT_TYPE_NAME (event)); |
2561 | 155 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
2562 | 155 | dec->priv->pending_events = |
2563 | 155 | g_list_append (dec->priv->pending_events, event); |
2564 | 155 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
2565 | 155 | ret = TRUE; |
2566 | 155 | } |
2567 | 155 | break; |
2568 | 602 | } |
2569 | 602 | return ret; |
2570 | 602 | } |
2571 | | |
2572 | | static gboolean |
2573 | | gst_audio_decoder_sink_event (GstPad * pad, GstObject * parent, |
2574 | | GstEvent * event) |
2575 | 602 | { |
2576 | 602 | GstAudioDecoder *dec; |
2577 | 602 | GstAudioDecoderClass *klass; |
2578 | 602 | gboolean ret; |
2579 | | |
2580 | 602 | dec = GST_AUDIO_DECODER (parent); |
2581 | 602 | klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
2582 | | |
2583 | 602 | GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event), |
2584 | 602 | GST_EVENT_TYPE_NAME (event)); |
2585 | | |
2586 | 602 | if (klass->sink_event) |
2587 | 602 | ret = klass->sink_event (dec, event); |
2588 | 0 | else { |
2589 | 0 | gst_event_unref (event); |
2590 | 0 | ret = FALSE; |
2591 | 0 | } |
2592 | 602 | return ret; |
2593 | 602 | } |
2594 | | |
2595 | | static gboolean |
2596 | | gst_audio_decoder_do_seek (GstAudioDecoder * dec, GstEvent * event) |
2597 | 0 | { |
2598 | 0 | GstSeekFlags flags; |
2599 | 0 | GstSeekType start_type, end_type; |
2600 | 0 | GstFormat format; |
2601 | 0 | gdouble rate; |
2602 | 0 | gint64 start, start_time, end_time; |
2603 | 0 | GstSegment seek_segment; |
2604 | 0 | guint32 seqnum; |
2605 | |
|
2606 | 0 | gst_event_parse_seek (event, &rate, &format, &flags, &start_type, |
2607 | 0 | &start_time, &end_type, &end_time); |
2608 | | |
2609 | | /* we'll handle plain open-ended flushing seeks with the simple approach */ |
2610 | 0 | if (rate != 1.0) { |
2611 | 0 | GST_DEBUG_OBJECT (dec, "unsupported seek: rate"); |
2612 | 0 | return FALSE; |
2613 | 0 | } |
2614 | | |
2615 | 0 | if (start_type != GST_SEEK_TYPE_SET) { |
2616 | 0 | GST_DEBUG_OBJECT (dec, "unsupported seek: start time"); |
2617 | 0 | return FALSE; |
2618 | 0 | } |
2619 | | |
2620 | 0 | if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) || |
2621 | 0 | (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) { |
2622 | 0 | GST_DEBUG_OBJECT (dec, "unsupported seek: end time"); |
2623 | 0 | return FALSE; |
2624 | 0 | } |
2625 | | |
2626 | 0 | if (!(flags & GST_SEEK_FLAG_FLUSH)) { |
2627 | 0 | GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing"); |
2628 | 0 | return FALSE; |
2629 | 0 | } |
2630 | | |
2631 | 0 | memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment)); |
2632 | 0 | gst_segment_do_seek (&seek_segment, rate, format, flags, start_type, |
2633 | 0 | start_time, end_type, end_time, NULL); |
2634 | 0 | start_time = seek_segment.position; |
2635 | |
|
2636 | 0 | if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time, |
2637 | 0 | GST_FORMAT_BYTES, &start)) { |
2638 | 0 | GST_DEBUG_OBJECT (dec, "conversion failed"); |
2639 | 0 | return FALSE; |
2640 | 0 | } |
2641 | | |
2642 | 0 | seqnum = gst_event_get_seqnum (event); |
2643 | 0 | event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags, |
2644 | 0 | GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1); |
2645 | 0 | gst_event_set_seqnum (event, seqnum); |
2646 | |
|
2647 | 0 | GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %" |
2648 | 0 | G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start); |
2649 | |
|
2650 | 0 | return gst_pad_push_event (dec->sinkpad, event); |
2651 | 0 | } |
2652 | | |
2653 | | static gboolean |
2654 | | gst_audio_decoder_src_eventfunc (GstAudioDecoder * dec, GstEvent * event) |
2655 | 38 | { |
2656 | 38 | gboolean res; |
2657 | | |
2658 | 38 | switch (GST_EVENT_TYPE (event)) { |
2659 | 0 | case GST_EVENT_SEEK: |
2660 | 0 | { |
2661 | 0 | GstFormat format; |
2662 | 0 | gdouble rate; |
2663 | 0 | GstSeekFlags flags; |
2664 | 0 | GstSeekType start_type, stop_type; |
2665 | 0 | gint64 start, stop; |
2666 | 0 | gint64 tstart, tstop; |
2667 | 0 | guint32 seqnum; |
2668 | |
|
2669 | 0 | gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start, |
2670 | 0 | &stop_type, &stop); |
2671 | 0 | seqnum = gst_event_get_seqnum (event); |
2672 | | |
2673 | | /* upstream gets a chance first */ |
2674 | 0 | if ((res = gst_pad_push_event (dec->sinkpad, event))) |
2675 | 0 | break; |
2676 | | |
2677 | | /* if upstream fails for a time seek, maybe we can help if allowed */ |
2678 | 0 | if (format == GST_FORMAT_TIME) { |
2679 | 0 | if (gst_audio_decoder_do_byte (dec)) |
2680 | 0 | res = gst_audio_decoder_do_seek (dec, event); |
2681 | 0 | break; |
2682 | 0 | } |
2683 | | |
2684 | | /* ... though a non-time seek can be aided as well */ |
2685 | | /* First bring the requested format to time */ |
2686 | 0 | if (!(res = |
2687 | 0 | gst_pad_query_convert (dec->srcpad, format, start, |
2688 | 0 | GST_FORMAT_TIME, &tstart))) |
2689 | 0 | goto convert_error; |
2690 | 0 | if (!(res = |
2691 | 0 | gst_pad_query_convert (dec->srcpad, format, stop, GST_FORMAT_TIME, |
2692 | 0 | &tstop))) |
2693 | 0 | goto convert_error; |
2694 | | |
2695 | | /* then seek with time on the peer */ |
2696 | 0 | event = gst_event_new_seek (rate, GST_FORMAT_TIME, |
2697 | 0 | flags, start_type, tstart, stop_type, tstop); |
2698 | 0 | gst_event_set_seqnum (event, seqnum); |
2699 | |
|
2700 | 0 | res = gst_pad_push_event (dec->sinkpad, event); |
2701 | 0 | break; |
2702 | 0 | } |
2703 | 38 | default: |
2704 | 38 | res = gst_pad_event_default (dec->srcpad, GST_OBJECT_CAST (dec), event); |
2705 | 38 | break; |
2706 | 38 | } |
2707 | 38 | done: |
2708 | 38 | return res; |
2709 | | |
2710 | | /* ERRORS */ |
2711 | 0 | convert_error: |
2712 | 0 | { |
2713 | 0 | GST_DEBUG_OBJECT (dec, "cannot convert start/stop for seek"); |
2714 | 0 | goto done; |
2715 | 38 | } |
2716 | 38 | } |
2717 | | |
2718 | | static gboolean |
2719 | | gst_audio_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event) |
2720 | 38 | { |
2721 | 38 | GstAudioDecoder *dec; |
2722 | 38 | GstAudioDecoderClass *klass; |
2723 | 38 | gboolean ret; |
2724 | | |
2725 | 38 | dec = GST_AUDIO_DECODER (parent); |
2726 | 38 | klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
2727 | | |
2728 | 38 | GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event), |
2729 | 38 | GST_EVENT_TYPE_NAME (event)); |
2730 | | |
2731 | 38 | if (klass->src_event) |
2732 | 38 | ret = klass->src_event (dec, event); |
2733 | 0 | else { |
2734 | 0 | gst_event_unref (event); |
2735 | 0 | ret = FALSE; |
2736 | 0 | } |
2737 | | |
2738 | 38 | return ret; |
2739 | 38 | } |
2740 | | |
2741 | | static gboolean |
2742 | | gst_audio_decoder_decide_allocation_default (GstAudioDecoder * dec, |
2743 | | GstQuery * query) |
2744 | 85 | { |
2745 | 85 | GstAllocator *allocator = NULL; |
2746 | 85 | GstAllocationParams params; |
2747 | 85 | gboolean update_allocator; |
2748 | | |
2749 | | /* we got configuration from our peer or the decide_allocation method, |
2750 | | * parse them */ |
2751 | 85 | if (gst_query_get_n_allocation_params (query) > 0) { |
2752 | | /* try the allocator */ |
2753 | 0 | gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms); |
2754 | 0 | update_allocator = TRUE; |
2755 | 85 | } else { |
2756 | 85 | allocator = NULL; |
2757 | 85 | gst_allocation_params_init (¶ms); |
2758 | 85 | update_allocator = FALSE; |
2759 | 85 | } |
2760 | | |
2761 | 85 | if (update_allocator) |
2762 | 0 | gst_query_set_nth_allocation_param (query, 0, allocator, ¶ms); |
2763 | 85 | else |
2764 | 85 | gst_query_add_allocation_param (query, allocator, ¶ms); |
2765 | 85 | if (allocator) |
2766 | 0 | gst_object_unref (allocator); |
2767 | | |
2768 | 85 | return TRUE; |
2769 | 85 | } |
2770 | | |
2771 | | static gboolean |
2772 | | gst_audio_decoder_propose_allocation_default (GstAudioDecoder * dec, |
2773 | | GstQuery * query) |
2774 | 0 | { |
2775 | 0 | return TRUE; |
2776 | 0 | } |
2777 | | |
2778 | | /** |
2779 | | * gst_audio_decoder_proxy_getcaps: |
2780 | | * @decoder: a #GstAudioDecoder |
2781 | | * @caps: (nullable): initial caps |
2782 | | * @filter: (nullable): filter caps |
2783 | | * |
2784 | | * Returns caps that express @caps (or sink template caps if @caps == NULL) |
2785 | | * restricted to rate/channels/... combinations supported by downstream |
2786 | | * elements. |
2787 | | * |
2788 | | * Returns: (transfer full): a #GstCaps owned by caller |
2789 | | * |
2790 | | * Since: 1.6 |
2791 | | */ |
2792 | | GstCaps * |
2793 | | gst_audio_decoder_proxy_getcaps (GstAudioDecoder * decoder, GstCaps * caps, |
2794 | | GstCaps * filter) |
2795 | 0 | { |
2796 | 0 | return __gst_audio_element_proxy_getcaps (GST_ELEMENT_CAST (decoder), |
2797 | 0 | GST_AUDIO_DECODER_SINK_PAD (decoder), |
2798 | 0 | GST_AUDIO_DECODER_SRC_PAD (decoder), caps, filter); |
2799 | 0 | } |
2800 | | |
2801 | | static GstCaps * |
2802 | | gst_audio_decoder_sink_getcaps (GstAudioDecoder * decoder, GstCaps * filter) |
2803 | 0 | { |
2804 | 0 | GstAudioDecoderClass *klass; |
2805 | 0 | GstCaps *caps; |
2806 | |
|
2807 | 0 | klass = GST_AUDIO_DECODER_GET_CLASS (decoder); |
2808 | |
|
2809 | 0 | if (klass->getcaps) |
2810 | 0 | caps = klass->getcaps (decoder, filter); |
2811 | 0 | else |
2812 | 0 | caps = gst_audio_decoder_proxy_getcaps (decoder, NULL, filter); |
2813 | |
|
2814 | 0 | GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps); |
2815 | |
|
2816 | 0 | return caps; |
2817 | 0 | } |
2818 | | |
2819 | | static gboolean |
2820 | | gst_audio_decoder_sink_query_default (GstAudioDecoder * dec, GstQuery * query) |
2821 | 336 | { |
2822 | 336 | GstPad *pad = GST_AUDIO_DECODER_SINK_PAD (dec); |
2823 | 336 | gboolean res = FALSE; |
2824 | | |
2825 | 336 | GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query); |
2826 | | |
2827 | 336 | switch (GST_QUERY_TYPE (query)) { |
2828 | 0 | case GST_QUERY_FORMATS: |
2829 | 0 | { |
2830 | 0 | gst_query_set_formats (query, 2, GST_FORMAT_TIME, GST_FORMAT_BYTES); |
2831 | 0 | res = TRUE; |
2832 | 0 | break; |
2833 | 0 | } |
2834 | 0 | case GST_QUERY_CONVERT: |
2835 | 0 | { |
2836 | 0 | GstFormat src_fmt, dest_fmt; |
2837 | 0 | gint64 src_val, dest_val; |
2838 | |
|
2839 | 0 | gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); |
2840 | 0 | GST_OBJECT_LOCK (dec); |
2841 | 0 | res = __gst_audio_encoded_audio_convert (&dec->priv->ctx.info, |
2842 | 0 | dec->priv->bytes_in, dec->priv->samples_out, |
2843 | 0 | src_fmt, src_val, &dest_fmt, &dest_val); |
2844 | 0 | GST_OBJECT_UNLOCK (dec); |
2845 | 0 | if (!res) |
2846 | 0 | goto error; |
2847 | 0 | gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); |
2848 | 0 | break; |
2849 | 0 | } |
2850 | 0 | case GST_QUERY_ALLOCATION: |
2851 | 0 | { |
2852 | 0 | GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
2853 | |
|
2854 | 0 | if (klass->propose_allocation) |
2855 | 0 | res = klass->propose_allocation (dec, query); |
2856 | 0 | break; |
2857 | 0 | } |
2858 | 0 | case GST_QUERY_CAPS:{ |
2859 | 0 | GstCaps *filter, *caps; |
2860 | |
|
2861 | 0 | gst_query_parse_caps (query, &filter); |
2862 | 0 | caps = gst_audio_decoder_sink_getcaps (dec, filter); |
2863 | 0 | gst_query_set_caps_result (query, caps); |
2864 | 0 | gst_caps_unref (caps); |
2865 | 0 | res = TRUE; |
2866 | 0 | break; |
2867 | 0 | } |
2868 | 336 | case GST_QUERY_ACCEPT_CAPS:{ |
2869 | 336 | if (dec->priv->use_default_pad_acceptcaps) { |
2870 | 336 | res = |
2871 | 336 | gst_pad_query_default (GST_AUDIO_DECODER_SINK_PAD (dec), |
2872 | 336 | GST_OBJECT_CAST (dec), query); |
2873 | 336 | } else { |
2874 | 0 | GstCaps *caps; |
2875 | 0 | GstCaps *allowed_caps; |
2876 | 0 | GstCaps *template_caps; |
2877 | 0 | gboolean accept; |
2878 | |
|
2879 | 0 | gst_query_parse_accept_caps (query, &caps); |
2880 | |
|
2881 | 0 | template_caps = gst_pad_get_pad_template_caps (pad); |
2882 | 0 | accept = gst_caps_is_subset (caps, template_caps); |
2883 | 0 | gst_caps_unref (template_caps); |
2884 | |
|
2885 | 0 | if (accept) { |
2886 | 0 | allowed_caps = gst_pad_query_caps (GST_AUDIO_DECODER_SINK_PAD (dec), |
2887 | 0 | caps); |
2888 | |
|
2889 | 0 | accept = gst_caps_can_intersect (caps, allowed_caps); |
2890 | |
|
2891 | 0 | gst_caps_unref (allowed_caps); |
2892 | 0 | } |
2893 | |
|
2894 | 0 | gst_query_set_accept_caps_result (query, accept); |
2895 | 0 | res = TRUE; |
2896 | 0 | } |
2897 | 336 | break; |
2898 | 0 | } |
2899 | 0 | case GST_QUERY_SEEKING: |
2900 | 0 | { |
2901 | 0 | GstFormat format; |
2902 | | |
2903 | | /* non-TIME segments are discarded, so we won't seek that way either */ |
2904 | 0 | gst_query_parse_seeking (query, &format, NULL, NULL, NULL); |
2905 | 0 | if (format != GST_FORMAT_TIME) { |
2906 | 0 | GST_DEBUG_OBJECT (dec, "discarding non-TIME SEEKING query"); |
2907 | 0 | res = FALSE; |
2908 | 0 | break; |
2909 | 0 | } |
2910 | 0 | } |
2911 | | /* FALLTHROUGH */ |
2912 | 0 | default: |
2913 | 0 | res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query); |
2914 | 0 | break; |
2915 | 336 | } |
2916 | | |
2917 | 336 | error: |
2918 | 336 | return res; |
2919 | 336 | } |
2920 | | |
2921 | | static gboolean |
2922 | | gst_audio_decoder_sink_query (GstPad * pad, GstObject * parent, |
2923 | | GstQuery * query) |
2924 | 336 | { |
2925 | 336 | GstAudioDecoderClass *dec_class; |
2926 | 336 | GstAudioDecoder *dec; |
2927 | 336 | gboolean ret = FALSE; |
2928 | | |
2929 | 336 | dec = GST_AUDIO_DECODER (parent); |
2930 | 336 | dec_class = GST_AUDIO_DECODER_GET_CLASS (dec); |
2931 | | |
2932 | 336 | GST_DEBUG_OBJECT (pad, "received query %" GST_PTR_FORMAT, query); |
2933 | | |
2934 | 336 | if (dec_class->sink_query) |
2935 | 336 | ret = dec_class->sink_query (dec, query); |
2936 | | |
2937 | 336 | return ret; |
2938 | 336 | } |
2939 | | |
2940 | | /* FIXME ? are any of these queries (other than latency) a decoder's business ?? |
2941 | | * also, the conversion stuff might seem to make sense, but seems to not mind |
2942 | | * segment stuff etc at all |
2943 | | * Supposedly that's backward compatibility ... */ |
2944 | | static gboolean |
2945 | | gst_audio_decoder_src_query_default (GstAudioDecoder * dec, GstQuery * query) |
2946 | 178 | { |
2947 | 178 | GstPad *pad = GST_AUDIO_DECODER_SRC_PAD (dec); |
2948 | 178 | gboolean res = FALSE; |
2949 | | |
2950 | 178 | GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query); |
2951 | | |
2952 | 178 | switch (GST_QUERY_TYPE (query)) { |
2953 | 2 | case GST_QUERY_DURATION: |
2954 | 2 | { |
2955 | 2 | GstFormat format; |
2956 | | |
2957 | | /* upstream in any case */ |
2958 | 2 | if ((res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query))) |
2959 | 0 | break; |
2960 | | |
2961 | 2 | gst_query_parse_duration (query, &format, NULL); |
2962 | | /* try answering TIME by converting from BYTE if subclass allows */ |
2963 | 2 | if (format == GST_FORMAT_TIME && gst_audio_decoder_do_byte (dec)) { |
2964 | 0 | gint64 value; |
2965 | |
|
2966 | 0 | if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES, |
2967 | 0 | &value)) { |
2968 | 0 | GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value); |
2969 | 0 | if (gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, value, |
2970 | 0 | GST_FORMAT_TIME, &value)) { |
2971 | 0 | gst_query_set_duration (query, GST_FORMAT_TIME, value); |
2972 | 0 | res = TRUE; |
2973 | 0 | } |
2974 | 0 | } |
2975 | 0 | } |
2976 | 2 | break; |
2977 | 2 | } |
2978 | 0 | case GST_QUERY_POSITION: |
2979 | 0 | { |
2980 | 0 | GstFormat format; |
2981 | 0 | gint64 time, value; |
2982 | |
|
2983 | 0 | if ((res = gst_pad_peer_query (dec->sinkpad, query))) { |
2984 | 0 | GST_LOG_OBJECT (dec, "returning peer response"); |
2985 | 0 | break; |
2986 | 0 | } |
2987 | | |
2988 | | /* Refuse BYTES format queries. If it made sense to |
2989 | | * answer them, upstream would have already */ |
2990 | 0 | gst_query_parse_position (query, &format, NULL); |
2991 | |
|
2992 | 0 | if (format == GST_FORMAT_BYTES) { |
2993 | 0 | GST_LOG_OBJECT (dec, "Ignoring BYTES position query"); |
2994 | 0 | break; |
2995 | 0 | } |
2996 | | |
2997 | | /* we start from the last seen time */ |
2998 | 0 | time = dec->output_segment.position; |
2999 | | /* correct for the segment values */ |
3000 | 0 | time = |
3001 | 0 | gst_segment_to_stream_time (&dec->output_segment, GST_FORMAT_TIME, |
3002 | 0 | time); |
3003 | |
|
3004 | 0 | GST_LOG_OBJECT (dec, |
3005 | 0 | "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time)); |
3006 | | |
3007 | | /* and convert to the final format */ |
3008 | 0 | if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time, |
3009 | 0 | format, &value))) |
3010 | 0 | break; |
3011 | | |
3012 | 0 | gst_query_set_position (query, format, value); |
3013 | |
|
3014 | 0 | GST_LOG_OBJECT (dec, |
3015 | 0 | "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value, |
3016 | 0 | format); |
3017 | 0 | break; |
3018 | 0 | } |
3019 | 0 | case GST_QUERY_FORMATS: |
3020 | 0 | { |
3021 | 0 | gst_query_set_formats (query, 3, |
3022 | 0 | GST_FORMAT_TIME, GST_FORMAT_BYTES, GST_FORMAT_DEFAULT); |
3023 | 0 | res = TRUE; |
3024 | 0 | break; |
3025 | 0 | } |
3026 | 0 | case GST_QUERY_CONVERT: |
3027 | 0 | { |
3028 | 0 | GstFormat src_fmt, dest_fmt; |
3029 | 0 | gint64 src_val, dest_val; |
3030 | |
|
3031 | 0 | gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); |
3032 | 0 | GST_OBJECT_LOCK (dec); |
3033 | 0 | res = gst_audio_info_convert (&dec->priv->ctx.info, |
3034 | 0 | src_fmt, src_val, dest_fmt, &dest_val); |
3035 | 0 | GST_OBJECT_UNLOCK (dec); |
3036 | 0 | if (!res) |
3037 | 0 | break; |
3038 | 0 | gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); |
3039 | 0 | break; |
3040 | 0 | } |
3041 | 0 | case GST_QUERY_LATENCY: |
3042 | 0 | { |
3043 | 0 | if ((res = gst_pad_peer_query (dec->sinkpad, query))) { |
3044 | 0 | gboolean live; |
3045 | 0 | GstClockTime min_latency, max_latency; |
3046 | |
|
3047 | 0 | gst_query_parse_latency (query, &live, &min_latency, &max_latency); |
3048 | 0 | GST_DEBUG_OBJECT (dec, "Peer latency: live %d, min %" |
3049 | 0 | GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live, |
3050 | 0 | GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency)); |
3051 | |
|
3052 | 0 | GST_OBJECT_LOCK (dec); |
3053 | | /* add our latency */ |
3054 | 0 | min_latency += dec->priv->ctx.min_latency; |
3055 | 0 | if (max_latency == -1 || dec->priv->ctx.max_latency == -1) |
3056 | 0 | max_latency = -1; |
3057 | 0 | else |
3058 | 0 | max_latency += dec->priv->ctx.max_latency; |
3059 | 0 | GST_OBJECT_UNLOCK (dec); |
3060 | |
|
3061 | 0 | gst_query_set_latency (query, live, min_latency, max_latency); |
3062 | 0 | } |
3063 | 0 | break; |
3064 | 0 | } |
3065 | 176 | default: |
3066 | 176 | res = gst_pad_query_default (pad, GST_OBJECT_CAST (dec), query); |
3067 | 176 | break; |
3068 | 178 | } |
3069 | | |
3070 | 178 | return res; |
3071 | 178 | } |
3072 | | |
3073 | | static gboolean |
3074 | | gst_audio_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query) |
3075 | 178 | { |
3076 | 178 | GstAudioDecoder *dec; |
3077 | 178 | GstAudioDecoderClass *dec_class; |
3078 | 178 | gboolean ret = FALSE; |
3079 | | |
3080 | 178 | dec = GST_AUDIO_DECODER (parent); |
3081 | 178 | dec_class = GST_AUDIO_DECODER_GET_CLASS (dec); |
3082 | | |
3083 | 178 | GST_DEBUG_OBJECT (pad, "received query %" GST_PTR_FORMAT, query); |
3084 | | |
3085 | 178 | if (dec_class->src_query) |
3086 | 178 | ret = dec_class->src_query (dec, query); |
3087 | | |
3088 | 178 | return ret; |
3089 | 178 | } |
3090 | | |
3091 | | static gboolean |
3092 | | gst_audio_decoder_stop (GstAudioDecoder * dec) |
3093 | 86 | { |
3094 | 86 | GstAudioDecoderClass *klass; |
3095 | 86 | gboolean ret = TRUE; |
3096 | | |
3097 | 86 | GST_DEBUG_OBJECT (dec, "gst_audio_decoder_stop"); |
3098 | | |
3099 | 86 | klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
3100 | | |
3101 | 86 | if (klass->stop) { |
3102 | 86 | ret = klass->stop (dec); |
3103 | 86 | } |
3104 | | |
3105 | | /* clean up */ |
3106 | 86 | gst_audio_decoder_reset (dec, TRUE); |
3107 | | |
3108 | 86 | if (ret) |
3109 | 86 | dec->priv->active = FALSE; |
3110 | | |
3111 | 86 | return ret; |
3112 | 86 | } |
3113 | | |
3114 | | static gboolean |
3115 | | gst_audio_decoder_start (GstAudioDecoder * dec) |
3116 | 86 | { |
3117 | 86 | GstAudioDecoderClass *klass; |
3118 | 86 | gboolean ret = TRUE; |
3119 | | |
3120 | 86 | GST_DEBUG_OBJECT (dec, "gst_audio_decoder_start"); |
3121 | | |
3122 | 86 | klass = GST_AUDIO_DECODER_GET_CLASS (dec); |
3123 | | |
3124 | | /* arrange clean state */ |
3125 | 86 | gst_audio_decoder_reset (dec, TRUE); |
3126 | | |
3127 | 86 | if (klass->start) { |
3128 | 86 | ret = klass->start (dec); |
3129 | 86 | } |
3130 | | |
3131 | 86 | if (ret) |
3132 | 86 | dec->priv->active = TRUE; |
3133 | | |
3134 | 86 | return ret; |
3135 | 86 | } |
3136 | | |
3137 | | static void |
3138 | | gst_audio_decoder_get_property (GObject * object, guint prop_id, |
3139 | | GValue * value, GParamSpec * pspec) |
3140 | 0 | { |
3141 | 0 | GstAudioDecoder *dec; |
3142 | |
|
3143 | 0 | dec = GST_AUDIO_DECODER (object); |
3144 | |
|
3145 | 0 | switch (prop_id) { |
3146 | 0 | case PROP_LATENCY: |
3147 | 0 | g_value_set_int64 (value, dec->priv->latency); |
3148 | 0 | break; |
3149 | 0 | case PROP_TOLERANCE: |
3150 | 0 | g_value_set_int64 (value, dec->priv->tolerance); |
3151 | 0 | break; |
3152 | 0 | case PROP_PLC: |
3153 | 0 | g_value_set_boolean (value, dec->priv->plc); |
3154 | 0 | break; |
3155 | 0 | case PROP_MAX_ERRORS: |
3156 | 0 | g_value_set_int (value, gst_audio_decoder_get_max_errors (dec)); |
3157 | 0 | break; |
3158 | 0 | default: |
3159 | 0 | G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); |
3160 | 0 | break; |
3161 | 0 | } |
3162 | 0 | } |
3163 | | |
3164 | | static void |
3165 | | gst_audio_decoder_set_property (GObject * object, guint prop_id, |
3166 | | const GValue * value, GParamSpec * pspec) |
3167 | 0 | { |
3168 | 0 | GstAudioDecoder *dec; |
3169 | |
|
3170 | 0 | dec = GST_AUDIO_DECODER (object); |
3171 | |
|
3172 | 0 | switch (prop_id) { |
3173 | 0 | case PROP_LATENCY: |
3174 | 0 | dec->priv->latency = g_value_get_int64 (value); |
3175 | 0 | break; |
3176 | 0 | case PROP_TOLERANCE: |
3177 | 0 | dec->priv->tolerance = g_value_get_int64 (value); |
3178 | 0 | break; |
3179 | 0 | case PROP_PLC: |
3180 | 0 | dec->priv->plc = g_value_get_boolean (value); |
3181 | 0 | break; |
3182 | 0 | case PROP_MAX_ERRORS: |
3183 | 0 | gst_audio_decoder_set_max_errors (dec, g_value_get_int (value)); |
3184 | 0 | break; |
3185 | 0 | default: |
3186 | 0 | G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); |
3187 | 0 | break; |
3188 | 0 | } |
3189 | 0 | } |
3190 | | |
3191 | | static GstStateChangeReturn |
3192 | | gst_audio_decoder_change_state (GstElement * element, GstStateChange transition) |
3193 | 415 | { |
3194 | 415 | GstAudioDecoder *codec; |
3195 | 415 | GstAudioDecoderClass *klass; |
3196 | 415 | GstStateChangeReturn ret; |
3197 | | |
3198 | 415 | codec = GST_AUDIO_DECODER (element); |
3199 | 415 | klass = GST_AUDIO_DECODER_GET_CLASS (codec); |
3200 | | |
3201 | 415 | switch (transition) { |
3202 | 86 | case GST_STATE_CHANGE_NULL_TO_READY: |
3203 | 86 | if (klass->open) { |
3204 | 0 | if (!klass->open (codec)) |
3205 | 0 | goto open_failed; |
3206 | 0 | } |
3207 | 86 | break; |
3208 | 86 | case GST_STATE_CHANGE_READY_TO_PAUSED: |
3209 | 86 | if (!gst_audio_decoder_start (codec)) { |
3210 | 0 | goto start_failed; |
3211 | 0 | } |
3212 | 86 | break; |
3213 | 86 | case GST_STATE_CHANGE_PAUSED_TO_PLAYING: |
3214 | 0 | break; |
3215 | 243 | default: |
3216 | 243 | break; |
3217 | 415 | } |
3218 | | |
3219 | 415 | ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); |
3220 | | |
3221 | 415 | switch (transition) { |
3222 | 0 | case GST_STATE_CHANGE_PLAYING_TO_PAUSED: |
3223 | 0 | break; |
3224 | 86 | case GST_STATE_CHANGE_PAUSED_TO_READY: |
3225 | 86 | if (!gst_audio_decoder_stop (codec)) { |
3226 | 0 | goto stop_failed; |
3227 | 0 | } |
3228 | 86 | break; |
3229 | 86 | case GST_STATE_CHANGE_READY_TO_NULL: |
3230 | 86 | if (klass->close) { |
3231 | 0 | if (!klass->close (codec)) |
3232 | 0 | goto close_failed; |
3233 | 0 | } |
3234 | 86 | break; |
3235 | 243 | default: |
3236 | 243 | break; |
3237 | 415 | } |
3238 | | |
3239 | 415 | return ret; |
3240 | | |
3241 | 0 | start_failed: |
3242 | 0 | { |
3243 | 0 | GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to start codec")); |
3244 | 0 | return GST_STATE_CHANGE_FAILURE; |
3245 | 415 | } |
3246 | 0 | stop_failed: |
3247 | 0 | { |
3248 | 0 | GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to stop codec")); |
3249 | 0 | return GST_STATE_CHANGE_FAILURE; |
3250 | 415 | } |
3251 | 0 | open_failed: |
3252 | 0 | { |
3253 | 0 | GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to open codec")); |
3254 | 0 | return GST_STATE_CHANGE_FAILURE; |
3255 | 415 | } |
3256 | 0 | close_failed: |
3257 | 0 | { |
3258 | 0 | GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to close codec")); |
3259 | 0 | return GST_STATE_CHANGE_FAILURE; |
3260 | 415 | } |
3261 | 415 | } |
3262 | | |
3263 | | GstFlowReturn |
3264 | | _gst_audio_decoder_error (GstAudioDecoder * dec, gint weight, |
3265 | | GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file, |
3266 | | const gchar * function, gint line) |
3267 | 0 | { |
3268 | 0 | if (txt) |
3269 | 0 | GST_WARNING_OBJECT (dec, "error: %s", txt); |
3270 | 0 | if (dbg) |
3271 | 0 | GST_WARNING_OBJECT (dec, "error: %s", dbg); |
3272 | 0 | dec->priv->error_count += weight; |
3273 | 0 | dec->priv->discont = TRUE; |
3274 | 0 | if (dec->priv->max_errors >= 0 |
3275 | 0 | && dec->priv->max_errors < dec->priv->error_count) { |
3276 | 0 | gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR, domain, |
3277 | 0 | code, txt, dbg, file, function, line); |
3278 | 0 | return GST_FLOW_ERROR; |
3279 | 0 | } else { |
3280 | 0 | g_free (txt); |
3281 | 0 | g_free (dbg); |
3282 | 0 | return GST_FLOW_OK; |
3283 | 0 | } |
3284 | 0 | } |
3285 | | |
3286 | | /** |
3287 | | * gst_audio_decoder_get_audio_info: |
3288 | | * @dec: a #GstAudioDecoder |
3289 | | * |
3290 | | * Returns: (transfer none): a #GstAudioInfo describing the input audio format |
3291 | | */ |
3292 | | GstAudioInfo * |
3293 | | gst_audio_decoder_get_audio_info (GstAudioDecoder * dec) |
3294 | 0 | { |
3295 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), NULL); |
3296 | | |
3297 | 0 | return &dec->priv->ctx.info; |
3298 | 0 | } |
3299 | | |
3300 | | /** |
3301 | | * gst_audio_decoder_set_plc_aware: |
3302 | | * @dec: a #GstAudioDecoder |
3303 | | * @plc: new plc state |
3304 | | * |
3305 | | * Indicates whether or not subclass handles packet loss concealment (plc). |
3306 | | */ |
3307 | | void |
3308 | | gst_audio_decoder_set_plc_aware (GstAudioDecoder * dec, gboolean plc) |
3309 | 0 | { |
3310 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3311 | | |
3312 | 0 | dec->priv->ctx.do_plc = plc; |
3313 | 0 | } |
3314 | | |
3315 | | /** |
3316 | | * gst_audio_decoder_get_plc_aware: |
3317 | | * @dec: a #GstAudioDecoder |
3318 | | * |
3319 | | * Returns: currently configured plc handling |
3320 | | */ |
3321 | | gint |
3322 | | gst_audio_decoder_get_plc_aware (GstAudioDecoder * dec) |
3323 | 0 | { |
3324 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0); |
3325 | | |
3326 | 0 | return dec->priv->ctx.do_plc; |
3327 | 0 | } |
3328 | | |
3329 | | /** |
3330 | | * gst_audio_decoder_set_estimate_rate: |
3331 | | * @dec: a #GstAudioDecoder |
3332 | | * @enabled: whether to enable byte to time conversion |
3333 | | * |
3334 | | * Allows baseclass to perform byte to time estimated conversion. |
3335 | | */ |
3336 | | void |
3337 | | gst_audio_decoder_set_estimate_rate (GstAudioDecoder * dec, gboolean enabled) |
3338 | 0 | { |
3339 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3340 | | |
3341 | 0 | dec->priv->ctx.do_estimate_rate = enabled; |
3342 | 0 | } |
3343 | | |
3344 | | /** |
3345 | | * gst_audio_decoder_get_estimate_rate: |
3346 | | * @dec: a #GstAudioDecoder |
3347 | | * |
3348 | | * Returns: currently configured byte to time conversion setting |
3349 | | */ |
3350 | | gint |
3351 | | gst_audio_decoder_get_estimate_rate (GstAudioDecoder * dec) |
3352 | 0 | { |
3353 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0); |
3354 | | |
3355 | 0 | return dec->priv->ctx.do_estimate_rate; |
3356 | 0 | } |
3357 | | |
3358 | | /** |
3359 | | * gst_audio_decoder_get_delay: |
3360 | | * @dec: a #GstAudioDecoder |
3361 | | * |
3362 | | * Returns: currently configured decoder delay |
3363 | | */ |
3364 | | gint |
3365 | | gst_audio_decoder_get_delay (GstAudioDecoder * dec) |
3366 | 0 | { |
3367 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0); |
3368 | | |
3369 | 0 | return dec->priv->ctx.delay; |
3370 | 0 | } |
3371 | | |
3372 | | /** |
3373 | | * gst_audio_decoder_set_max_errors: |
3374 | | * @dec: a #GstAudioDecoder |
3375 | | * @num: max tolerated errors |
3376 | | * |
3377 | | * Sets numbers of tolerated decoder errors, where a tolerated one is then only |
3378 | | * warned about, but more than tolerated will lead to fatal error. You can set |
3379 | | * -1 for never returning fatal errors. Default is set to |
3380 | | * GST_AUDIO_DECODER_MAX_ERRORS. |
3381 | | */ |
3382 | | void |
3383 | | gst_audio_decoder_set_max_errors (GstAudioDecoder * dec, gint num) |
3384 | 0 | { |
3385 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3386 | | |
3387 | 0 | dec->priv->max_errors = num; |
3388 | 0 | } |
3389 | | |
3390 | | /** |
3391 | | * gst_audio_decoder_get_max_errors: |
3392 | | * @dec: a #GstAudioDecoder |
3393 | | * |
3394 | | * Returns: currently configured decoder tolerated error count. |
3395 | | */ |
3396 | | gint |
3397 | | gst_audio_decoder_get_max_errors (GstAudioDecoder * dec) |
3398 | 0 | { |
3399 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0); |
3400 | | |
3401 | 0 | return dec->priv->max_errors; |
3402 | 0 | } |
3403 | | |
3404 | | /** |
3405 | | * gst_audio_decoder_set_latency: |
3406 | | * @dec: a #GstAudioDecoder |
3407 | | * @min: minimum latency |
3408 | | * @max: maximum latency |
3409 | | * |
3410 | | * Sets decoder latency. If the provided values changed from |
3411 | | * previously provided ones, this will also post a LATENCY message on the bus |
3412 | | * so the pipeline can reconfigure its global latency. |
3413 | | */ |
3414 | | void |
3415 | | gst_audio_decoder_set_latency (GstAudioDecoder * dec, |
3416 | | GstClockTime min, GstClockTime max) |
3417 | 0 | { |
3418 | 0 | gboolean post_message = FALSE; |
3419 | |
|
3420 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3421 | 0 | g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min)); |
3422 | 0 | g_return_if_fail (min <= max); |
3423 | | |
3424 | 0 | GST_DEBUG_OBJECT (dec, |
3425 | 0 | "min_latency:%" GST_TIME_FORMAT " max_latency:%" GST_TIME_FORMAT, |
3426 | 0 | GST_TIME_ARGS (min), GST_TIME_ARGS (max)); |
3427 | |
|
3428 | 0 | GST_OBJECT_LOCK (dec); |
3429 | 0 | if (dec->priv->ctx.min_latency != min) { |
3430 | 0 | dec->priv->ctx.min_latency = min; |
3431 | 0 | post_message = TRUE; |
3432 | 0 | } |
3433 | 0 | if (dec->priv->ctx.max_latency != max) { |
3434 | 0 | dec->priv->ctx.max_latency = max; |
3435 | 0 | post_message = TRUE; |
3436 | 0 | } |
3437 | 0 | if (!dec->priv->ctx.posted_latency_msg) { |
3438 | 0 | dec->priv->ctx.posted_latency_msg = TRUE; |
3439 | 0 | post_message = TRUE; |
3440 | 0 | } |
3441 | 0 | GST_OBJECT_UNLOCK (dec); |
3442 | | |
3443 | | /* post latency message on the bus */ |
3444 | 0 | if (post_message) |
3445 | 0 | gst_element_post_message (GST_ELEMENT (dec), |
3446 | 0 | gst_message_new_latency (GST_OBJECT (dec))); |
3447 | 0 | } |
3448 | | |
3449 | | /** |
3450 | | * gst_audio_decoder_get_latency: |
3451 | | * @dec: a #GstAudioDecoder |
3452 | | * @min: (out) (optional): a pointer to storage to hold minimum latency |
3453 | | * @max: (out) (optional): a pointer to storage to hold maximum latency |
3454 | | * |
3455 | | * Sets the variables pointed to by @min and @max to the currently configured |
3456 | | * latency. |
3457 | | */ |
3458 | | void |
3459 | | gst_audio_decoder_get_latency (GstAudioDecoder * dec, |
3460 | | GstClockTime * min, GstClockTime * max) |
3461 | 0 | { |
3462 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3463 | | |
3464 | 0 | GST_OBJECT_LOCK (dec); |
3465 | 0 | if (min) |
3466 | 0 | *min = dec->priv->ctx.min_latency; |
3467 | 0 | if (max) |
3468 | 0 | *max = dec->priv->ctx.max_latency; |
3469 | 0 | GST_OBJECT_UNLOCK (dec); |
3470 | 0 | } |
3471 | | |
3472 | | /** |
3473 | | * gst_audio_decoder_get_parse_state: |
3474 | | * @dec: a #GstAudioDecoder |
3475 | | * @sync: (out) (optional): a pointer to a variable to hold the current sync state |
3476 | | * @eos: (out) (optional): a pointer to a variable to hold the current eos state |
3477 | | * |
3478 | | * Return current parsing (sync and eos) state. |
3479 | | */ |
3480 | | void |
3481 | | gst_audio_decoder_get_parse_state (GstAudioDecoder * dec, |
3482 | | gboolean * sync, gboolean * eos) |
3483 | 0 | { |
3484 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3485 | | |
3486 | 0 | if (sync) |
3487 | 0 | *sync = dec->priv->ctx.sync; |
3488 | 0 | if (eos) |
3489 | 0 | *eos = dec->priv->ctx.eos; |
3490 | 0 | } |
3491 | | |
3492 | | /** |
3493 | | * gst_audio_decoder_set_allocation_caps: |
3494 | | * @dec: a #GstAudioDecoder |
3495 | | * @allocation_caps: (nullable): a #GstCaps or %NULL |
3496 | | * |
3497 | | * Sets a caps in allocation query which are different from the set |
3498 | | * pad's caps. Use this function before calling |
3499 | | * gst_audio_decoder_negotiate(). Setting to %NULL the allocation |
3500 | | * query will use the caps from the pad. |
3501 | | * |
3502 | | * Since: 1.10 |
3503 | | */ |
3504 | | void |
3505 | | gst_audio_decoder_set_allocation_caps (GstAudioDecoder * dec, |
3506 | | GstCaps * allocation_caps) |
3507 | 0 | { |
3508 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3509 | | |
3510 | 0 | gst_caps_replace (&dec->priv->ctx.allocation_caps, allocation_caps); |
3511 | 0 | } |
3512 | | |
3513 | | /** |
3514 | | * gst_audio_decoder_set_plc: |
3515 | | * @dec: a #GstAudioDecoder |
3516 | | * @enabled: new state |
3517 | | * |
3518 | | * Enable or disable decoder packet loss concealment, provided subclass |
3519 | | * and codec are capable and allow handling plc. |
3520 | | * |
3521 | | * MT safe. |
3522 | | */ |
3523 | | void |
3524 | | gst_audio_decoder_set_plc (GstAudioDecoder * dec, gboolean enabled) |
3525 | 0 | { |
3526 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3527 | | |
3528 | 0 | GST_LOG_OBJECT (dec, "enabled: %d", enabled); |
3529 | |
|
3530 | 0 | GST_OBJECT_LOCK (dec); |
3531 | 0 | dec->priv->plc = enabled; |
3532 | 0 | GST_OBJECT_UNLOCK (dec); |
3533 | 0 | } |
3534 | | |
3535 | | /** |
3536 | | * gst_audio_decoder_get_plc: |
3537 | | * @dec: a #GstAudioDecoder |
3538 | | * |
3539 | | * Queries decoder packet loss concealment handling. |
3540 | | * |
3541 | | * Returns: TRUE if packet loss concealment is enabled. |
3542 | | * |
3543 | | * MT safe. |
3544 | | */ |
3545 | | gboolean |
3546 | | gst_audio_decoder_get_plc (GstAudioDecoder * dec) |
3547 | 0 | { |
3548 | 0 | gboolean result; |
3549 | |
|
3550 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE); |
3551 | | |
3552 | 0 | GST_OBJECT_LOCK (dec); |
3553 | 0 | result = dec->priv->plc; |
3554 | 0 | GST_OBJECT_UNLOCK (dec); |
3555 | |
|
3556 | 0 | return result; |
3557 | 0 | } |
3558 | | |
3559 | | /** |
3560 | | * gst_audio_decoder_set_min_latency: |
3561 | | * @dec: a #GstAudioDecoder |
3562 | | * @num: new minimum latency |
3563 | | * |
3564 | | * Sets decoder minimum aggregation latency. |
3565 | | * |
3566 | | * MT safe. |
3567 | | */ |
3568 | | void |
3569 | | gst_audio_decoder_set_min_latency (GstAudioDecoder * dec, GstClockTime num) |
3570 | 0 | { |
3571 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3572 | 0 | g_return_if_fail (GST_CLOCK_TIME_IS_VALID (num)); |
3573 | | |
3574 | 0 | GST_OBJECT_LOCK (dec); |
3575 | 0 | dec->priv->latency = num; |
3576 | 0 | GST_OBJECT_UNLOCK (dec); |
3577 | 0 | } |
3578 | | |
3579 | | /** |
3580 | | * gst_audio_decoder_get_min_latency: |
3581 | | * @dec: a #GstAudioDecoder |
3582 | | * |
3583 | | * Queries decoder's latency aggregation. |
3584 | | * |
3585 | | * Returns: aggregation latency. |
3586 | | * |
3587 | | * MT safe. |
3588 | | */ |
3589 | | GstClockTime |
3590 | | gst_audio_decoder_get_min_latency (GstAudioDecoder * dec) |
3591 | 0 | { |
3592 | 0 | GstClockTime result; |
3593 | |
|
3594 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE); |
3595 | | |
3596 | 0 | GST_OBJECT_LOCK (dec); |
3597 | 0 | result = dec->priv->latency; |
3598 | 0 | GST_OBJECT_UNLOCK (dec); |
3599 | |
|
3600 | 0 | return result; |
3601 | 0 | } |
3602 | | |
3603 | | /** |
3604 | | * gst_audio_decoder_set_tolerance: |
3605 | | * @dec: a #GstAudioDecoder |
3606 | | * @tolerance: new tolerance |
3607 | | * |
3608 | | * Configures decoder audio jitter tolerance threshold. |
3609 | | * |
3610 | | * MT safe. |
3611 | | */ |
3612 | | void |
3613 | | gst_audio_decoder_set_tolerance (GstAudioDecoder * dec, GstClockTime tolerance) |
3614 | 0 | { |
3615 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3616 | 0 | g_return_if_fail (GST_CLOCK_TIME_IS_VALID (tolerance)); |
3617 | | |
3618 | 0 | GST_OBJECT_LOCK (dec); |
3619 | 0 | dec->priv->tolerance = tolerance; |
3620 | 0 | GST_OBJECT_UNLOCK (dec); |
3621 | 0 | } |
3622 | | |
3623 | | /** |
3624 | | * gst_audio_decoder_get_tolerance: |
3625 | | * @dec: a #GstAudioDecoder |
3626 | | * |
3627 | | * Queries current audio jitter tolerance threshold. |
3628 | | * |
3629 | | * Returns: decoder audio jitter tolerance threshold. |
3630 | | * |
3631 | | * MT safe. |
3632 | | */ |
3633 | | GstClockTime |
3634 | | gst_audio_decoder_get_tolerance (GstAudioDecoder * dec) |
3635 | 0 | { |
3636 | 0 | GstClockTime result; |
3637 | |
|
3638 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0); |
3639 | | |
3640 | 0 | GST_OBJECT_LOCK (dec); |
3641 | 0 | result = dec->priv->tolerance; |
3642 | 0 | GST_OBJECT_UNLOCK (dec); |
3643 | |
|
3644 | 0 | return result; |
3645 | 0 | } |
3646 | | |
3647 | | /** |
3648 | | * gst_audio_decoder_set_drainable: |
3649 | | * @dec: a #GstAudioDecoder |
3650 | | * @enabled: new state |
3651 | | * |
3652 | | * Configures decoder drain handling. If drainable, subclass might |
3653 | | * be handed a NULL buffer to have it return any leftover decoded data. |
3654 | | * Otherwise, it is not considered so capable and will only ever be passed |
3655 | | * real data. |
3656 | | * |
3657 | | * MT safe. |
3658 | | */ |
3659 | | void |
3660 | | gst_audio_decoder_set_drainable (GstAudioDecoder * dec, gboolean enabled) |
3661 | 0 | { |
3662 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3663 | | |
3664 | 0 | GST_OBJECT_LOCK (dec); |
3665 | 0 | dec->priv->drainable = enabled; |
3666 | 0 | GST_OBJECT_UNLOCK (dec); |
3667 | 0 | } |
3668 | | |
3669 | | /** |
3670 | | * gst_audio_decoder_get_drainable: |
3671 | | * @dec: a #GstAudioDecoder |
3672 | | * |
3673 | | * Queries decoder drain handling. |
3674 | | * |
3675 | | * Returns: TRUE if drainable handling is enabled. |
3676 | | * |
3677 | | * MT safe. |
3678 | | */ |
3679 | | gboolean |
3680 | | gst_audio_decoder_get_drainable (GstAudioDecoder * dec) |
3681 | 0 | { |
3682 | 0 | gboolean result; |
3683 | |
|
3684 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0); |
3685 | | |
3686 | 0 | GST_OBJECT_LOCK (dec); |
3687 | 0 | result = dec->priv->drainable; |
3688 | 0 | GST_OBJECT_UNLOCK (dec); |
3689 | |
|
3690 | 0 | return result; |
3691 | 0 | } |
3692 | | |
3693 | | /** |
3694 | | * gst_audio_decoder_set_needs_format: |
3695 | | * @dec: a #GstAudioDecoder |
3696 | | * @enabled: new state |
3697 | | * |
3698 | | * Configures decoder format needs. If enabled, subclass needs to be |
3699 | | * negotiated with format caps before it can process any data. It will then |
3700 | | * never be handed any data before it has been configured. |
3701 | | * Otherwise, it might be handed data without having been configured and |
3702 | | * is then expected being able to do so either by default |
3703 | | * or based on the input data. |
3704 | | * |
3705 | | * MT safe. |
3706 | | */ |
3707 | | void |
3708 | | gst_audio_decoder_set_needs_format (GstAudioDecoder * dec, gboolean enabled) |
3709 | 0 | { |
3710 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3711 | | |
3712 | 0 | GST_OBJECT_LOCK (dec); |
3713 | 0 | dec->priv->needs_format = enabled; |
3714 | 0 | GST_OBJECT_UNLOCK (dec); |
3715 | 0 | } |
3716 | | |
3717 | | /** |
3718 | | * gst_audio_decoder_get_needs_format: |
3719 | | * @dec: a #GstAudioDecoder |
3720 | | * |
3721 | | * Queries decoder required format handling. |
3722 | | * |
3723 | | * Returns: TRUE if required format handling is enabled. |
3724 | | * |
3725 | | * MT safe. |
3726 | | */ |
3727 | | gboolean |
3728 | | gst_audio_decoder_get_needs_format (GstAudioDecoder * dec) |
3729 | 0 | { |
3730 | 0 | gboolean result; |
3731 | |
|
3732 | 0 | g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE); |
3733 | | |
3734 | 0 | GST_OBJECT_LOCK (dec); |
3735 | 0 | result = dec->priv->needs_format; |
3736 | 0 | GST_OBJECT_UNLOCK (dec); |
3737 | |
|
3738 | 0 | return result; |
3739 | 0 | } |
3740 | | |
3741 | | /** |
3742 | | * gst_audio_decoder_merge_tags: |
3743 | | * @dec: a #GstAudioDecoder |
3744 | | * @tags: (nullable): a #GstTagList to merge, or NULL |
3745 | | * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE |
3746 | | * |
3747 | | * Sets the audio decoder tags and how they should be merged with any |
3748 | | * upstream stream tags. This will override any tags previously-set |
3749 | | * with gst_audio_decoder_merge_tags(). |
3750 | | * |
3751 | | * Note that this is provided for convenience, and the subclass is |
3752 | | * not required to use this and can still do tag handling on its own. |
3753 | | */ |
3754 | | void |
3755 | | gst_audio_decoder_merge_tags (GstAudioDecoder * dec, |
3756 | | const GstTagList * tags, GstTagMergeMode mode) |
3757 | 73 | { |
3758 | 73 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3759 | 73 | g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags)); |
3760 | 73 | g_return_if_fail (mode != GST_TAG_MERGE_UNDEFINED); |
3761 | | |
3762 | 73 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
3763 | 73 | if (dec->priv->taglist != tags) { |
3764 | 73 | if (dec->priv->taglist) { |
3765 | 1 | gst_tag_list_unref (dec->priv->taglist); |
3766 | 1 | dec->priv->taglist = NULL; |
3767 | 1 | dec->priv->decoder_tags_merge_mode = GST_TAG_MERGE_KEEP_ALL; |
3768 | 1 | } |
3769 | 73 | if (tags) { |
3770 | 73 | dec->priv->taglist = gst_tag_list_ref ((GstTagList *) tags); |
3771 | 73 | dec->priv->decoder_tags_merge_mode = mode; |
3772 | 73 | } |
3773 | | |
3774 | 73 | GST_DEBUG_OBJECT (dec, "setting decoder tags to %" GST_PTR_FORMAT, tags); |
3775 | 73 | dec->priv->taglist_changed = TRUE; |
3776 | 73 | } |
3777 | 73 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
3778 | 73 | } |
3779 | | |
3780 | | /** |
3781 | | * gst_audio_decoder_allocate_output_buffer: |
3782 | | * @dec: a #GstAudioDecoder |
3783 | | * @size: size of the buffer |
3784 | | * |
3785 | | * Helper function that allocates a buffer to hold an audio frame |
3786 | | * for @dec's current output format. |
3787 | | * |
3788 | | * Returns: (transfer full): allocated buffer |
3789 | | */ |
3790 | | GstBuffer * |
3791 | | gst_audio_decoder_allocate_output_buffer (GstAudioDecoder * dec, gsize size) |
3792 | 68 | { |
3793 | 68 | GstBuffer *buffer = NULL; |
3794 | 68 | gboolean needs_reconfigure = FALSE; |
3795 | | |
3796 | 68 | g_return_val_if_fail (size > 0, NULL); |
3797 | | |
3798 | 68 | GST_DEBUG ("alloc src buffer"); |
3799 | | |
3800 | 68 | GST_AUDIO_DECODER_STREAM_LOCK (dec); |
3801 | | |
3802 | 68 | needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad); |
3803 | 68 | if (G_UNLIKELY (dec->priv->ctx.output_format_changed || |
3804 | 68 | (GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info) |
3805 | 68 | && needs_reconfigure))) { |
3806 | 68 | if (!gst_audio_decoder_negotiate_unlocked (dec)) { |
3807 | 0 | GST_INFO_OBJECT (dec, "Failed to negotiate, fallback allocation"); |
3808 | 0 | gst_pad_mark_reconfigure (dec->srcpad); |
3809 | 0 | goto fallback; |
3810 | 0 | } |
3811 | 68 | } |
3812 | | |
3813 | 68 | buffer = |
3814 | 68 | gst_buffer_new_allocate (dec->priv->ctx.allocator, size, |
3815 | 68 | &dec->priv->ctx.params); |
3816 | 68 | if (!buffer) { |
3817 | 0 | GST_INFO_OBJECT (dec, "couldn't allocate output buffer"); |
3818 | 0 | goto fallback; |
3819 | 0 | } |
3820 | | |
3821 | 68 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
3822 | | |
3823 | 68 | return buffer; |
3824 | 0 | fallback: |
3825 | 0 | buffer = gst_buffer_new_allocate (NULL, size, NULL); |
3826 | 0 | GST_AUDIO_DECODER_STREAM_UNLOCK (dec); |
3827 | |
|
3828 | 0 | return buffer; |
3829 | 68 | } |
3830 | | |
3831 | | /** |
3832 | | * gst_audio_decoder_get_allocator: |
3833 | | * @dec: a #GstAudioDecoder |
3834 | | * @allocator: (out) (optional) (nullable) (transfer full): the #GstAllocator |
3835 | | * used |
3836 | | * @params: (out) (optional) (transfer full): the |
3837 | | * #GstAllocationParams of @allocator |
3838 | | * |
3839 | | * Lets #GstAudioDecoder sub-classes to know the memory @allocator |
3840 | | * used by the base class and its @params. |
3841 | | * |
3842 | | * Unref the @allocator after use it. |
3843 | | */ |
3844 | | void |
3845 | | gst_audio_decoder_get_allocator (GstAudioDecoder * dec, |
3846 | | GstAllocator ** allocator, GstAllocationParams * params) |
3847 | 0 | { |
3848 | 0 | g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); |
3849 | | |
3850 | 0 | if (allocator) |
3851 | 0 | *allocator = dec->priv->ctx.allocator ? |
3852 | 0 | gst_object_ref (dec->priv->ctx.allocator) : NULL; |
3853 | |
|
3854 | 0 | if (params) |
3855 | 0 | *params = dec->priv->ctx.params; |
3856 | 0 | } |
3857 | | |
3858 | | /** |
3859 | | * gst_audio_decoder_set_use_default_pad_acceptcaps: |
3860 | | * @decoder: a #GstAudioDecoder |
3861 | | * @use: if the default pad accept-caps query handling should be used |
3862 | | * |
3863 | | * Lets #GstAudioDecoder sub-classes decide if they want the sink pad |
3864 | | * to use the default pad query handler to reply to accept-caps queries. |
3865 | | * |
3866 | | * By setting this to true it is possible to further customize the default |
3867 | | * handler with %GST_PAD_SET_ACCEPT_INTERSECT and |
3868 | | * %GST_PAD_SET_ACCEPT_TEMPLATE |
3869 | | * |
3870 | | * Since: 1.6 |
3871 | | */ |
3872 | | void |
3873 | | gst_audio_decoder_set_use_default_pad_acceptcaps (GstAudioDecoder * decoder, |
3874 | | gboolean use) |
3875 | 86 | { |
3876 | 86 | decoder->priv->use_default_pad_acceptcaps = use; |
3877 | 86 | } |