/src/gstreamer/subprojects/gst-plugins-base/gst-libs/gst/audio/gstaudiobasesrc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* GStreamer |
2 | | * Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu> |
3 | | * 2005 Wim Taymans <wim@fluendo.com> |
4 | | * |
5 | | * gstaudiobasesrc.c: |
6 | | * |
7 | | * This library is free software; you can redistribute it and/or |
8 | | * modify it under the terms of the GNU Library General Public |
9 | | * License as published by the Free Software Foundation; either |
10 | | * version 2 of the License, or (at your option) any later version. |
11 | | * |
12 | | * This library is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | | * Library General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU Library General Public |
18 | | * License along with this library; if not, write to the |
19 | | * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, |
20 | | * Boston, MA 02110-1301, USA. |
21 | | */ |
22 | | |
23 | | /** |
24 | | * SECTION:gstaudiobasesrc |
25 | | * @title: GstAudioBaseSrc |
26 | | * @short_description: Base class for audio sources |
27 | | * @see_also: #GstAudioSrc, #GstAudioRingBuffer. |
28 | | * |
29 | | * This is the base class for audio sources. Subclasses need to implement the |
30 | | * ::create_ringbuffer vmethod. This base class will then take care of |
31 | | * reading samples from the ringbuffer, synchronisation and flushing. |
32 | | */ |
33 | | |
34 | | #ifdef HAVE_CONFIG_H |
35 | | # include "config.h" |
36 | | #endif |
37 | | |
38 | | #include <string.h> |
39 | | |
40 | | #include <gst/audio/audio.h> |
41 | | #include "gstaudiobasesrc.h" |
42 | | |
43 | | #include <glib/gi18n-lib.h> |
44 | | |
45 | | GST_DEBUG_CATEGORY_STATIC (gst_audio_base_src_debug); |
46 | | #define GST_CAT_DEFAULT gst_audio_base_src_debug |
47 | | |
48 | | struct _GstAudioBaseSrcPrivate |
49 | | { |
50 | | /* the clock slaving algorithm in use */ |
51 | | GstAudioBaseSrcSlaveMethod slave_method; |
52 | | }; |
53 | | |
54 | | /* BaseAudioSrc signals and args */ |
55 | | enum |
56 | | { |
57 | | /* FILL ME */ |
58 | | LAST_SIGNAL |
59 | | }; |
60 | | |
61 | | /* FIXME: 2.0, handle BUFFER_TIME and LATENCY in nanoseconds */ |
62 | 0 | #define DEFAULT_BUFFER_TIME ((200 * GST_MSECOND) / GST_USECOND) |
63 | 0 | #define DEFAULT_LATENCY_TIME ((10 * GST_MSECOND) / GST_USECOND) |
64 | 0 | #define DEFAULT_ACTUAL_BUFFER_TIME -1 |
65 | 0 | #define DEFAULT_ACTUAL_LATENCY_TIME -1 |
66 | 0 | #define DEFAULT_PROVIDE_CLOCK TRUE |
67 | 0 | #define DEFAULT_SLAVE_METHOD GST_AUDIO_BASE_SRC_SLAVE_SKEW |
68 | | |
69 | | enum |
70 | | { |
71 | | PROP_0, |
72 | | PROP_BUFFER_TIME, |
73 | | PROP_LATENCY_TIME, |
74 | | PROP_ACTUAL_BUFFER_TIME, |
75 | | PROP_ACTUAL_LATENCY_TIME, |
76 | | PROP_PROVIDE_CLOCK, |
77 | | PROP_SLAVE_METHOD, |
78 | | PROP_LAST |
79 | | }; |
80 | | |
81 | | static void |
82 | | _do_init (GType type) |
83 | 0 | { |
84 | 0 | GST_DEBUG_CATEGORY_INIT (gst_audio_base_src_debug, "audiobasesrc", 0, |
85 | 0 | "audiobasesrc element"); |
86 | |
|
87 | | #ifdef ENABLE_NLS |
88 | | GST_DEBUG ("binding text domain %s to locale dir %s", GETTEXT_PACKAGE, |
89 | | LOCALEDIR); |
90 | | bindtextdomain (GETTEXT_PACKAGE, LOCALEDIR); |
91 | | bind_textdomain_codeset (GETTEXT_PACKAGE, "UTF-8"); |
92 | | #endif /* ENABLE_NLS */ |
93 | 0 | } |
94 | | |
95 | 0 | #define gst_audio_base_src_parent_class parent_class |
96 | | G_DEFINE_TYPE_WITH_CODE (GstAudioBaseSrc, gst_audio_base_src, GST_TYPE_PUSH_SRC, |
97 | | G_ADD_PRIVATE (GstAudioBaseSrc) |
98 | | _do_init (g_define_type_id)); |
99 | | |
100 | | static void gst_audio_base_src_set_property (GObject * object, guint prop_id, |
101 | | const GValue * value, GParamSpec * pspec); |
102 | | static void gst_audio_base_src_get_property (GObject * object, guint prop_id, |
103 | | GValue * value, GParamSpec * pspec); |
104 | | static void gst_audio_base_src_dispose (GObject * object); |
105 | | |
106 | | static GstStateChangeReturn gst_audio_base_src_change_state (GstElement * |
107 | | element, GstStateChange transition); |
108 | | static gboolean gst_audio_base_src_post_message (GstElement * element, |
109 | | GstMessage * message); |
110 | | static GstClock *gst_audio_base_src_provide_clock (GstElement * elem); |
111 | | static GstClockTime gst_audio_base_src_get_time (GstClock * clock, |
112 | | GstAudioBaseSrc * src); |
113 | | |
114 | | static GstFlowReturn gst_audio_base_src_create (GstBaseSrc * bsrc, |
115 | | guint64 offset, guint length, GstBuffer ** buf); |
116 | | |
117 | | static gboolean gst_audio_base_src_event (GstBaseSrc * bsrc, GstEvent * event); |
118 | | static void gst_audio_base_src_get_times (GstBaseSrc * bsrc, |
119 | | GstBuffer * buffer, GstClockTime * start, GstClockTime * end); |
120 | | static gboolean gst_audio_base_src_setcaps (GstBaseSrc * bsrc, GstCaps * caps); |
121 | | static gboolean gst_audio_base_src_query (GstBaseSrc * bsrc, GstQuery * query); |
122 | | static GstCaps *gst_audio_base_src_fixate (GstBaseSrc * bsrc, GstCaps * caps); |
123 | | |
124 | | /* static guint gst_audio_base_src_signals[LAST_SIGNAL] = { 0 }; */ |
125 | | |
126 | | static void |
127 | | gst_audio_base_src_class_init (GstAudioBaseSrcClass * klass) |
128 | 0 | { |
129 | 0 | GObjectClass *gobject_class; |
130 | 0 | GstElementClass *gstelement_class; |
131 | 0 | GstBaseSrcClass *gstbasesrc_class; |
132 | |
|
133 | 0 | gobject_class = (GObjectClass *) klass; |
134 | 0 | gstelement_class = (GstElementClass *) klass; |
135 | 0 | gstbasesrc_class = (GstBaseSrcClass *) klass; |
136 | |
|
137 | 0 | gobject_class->set_property = gst_audio_base_src_set_property; |
138 | 0 | gobject_class->get_property = gst_audio_base_src_get_property; |
139 | 0 | gobject_class->dispose = gst_audio_base_src_dispose; |
140 | | |
141 | | /* FIXME: 2.0, handle BUFFER_TIME and LATENCY in nanoseconds */ |
142 | 0 | g_object_class_install_property (gobject_class, PROP_BUFFER_TIME, |
143 | 0 | g_param_spec_int64 ("buffer-time", "Buffer Time", |
144 | 0 | "Size of audio buffer in microseconds. This is the maximum amount " |
145 | 0 | "of data that is buffered in the device and the maximum latency that " |
146 | 0 | "the source reports. This value might be ignored by the element if " |
147 | 0 | "necessary; see \"actual-buffer-time\"", |
148 | 0 | 1, G_MAXINT64, DEFAULT_BUFFER_TIME, |
149 | 0 | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); |
150 | |
|
151 | 0 | g_object_class_install_property (gobject_class, PROP_LATENCY_TIME, |
152 | 0 | g_param_spec_int64 ("latency-time", "Latency Time", |
153 | 0 | "The minimum amount of data to read in each iteration in " |
154 | 0 | "microseconds. This is the minimum latency that the source reports. " |
155 | 0 | "This value might be ignored by the element if necessary; see " |
156 | 0 | "\"actual-latency-time\"", 1, G_MAXINT64, DEFAULT_LATENCY_TIME, |
157 | 0 | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); |
158 | | |
159 | | /** |
160 | | * GstAudioBaseSrc:actual-buffer-time: |
161 | | * |
162 | | * Actual configured size of audio buffer in microseconds. |
163 | | **/ |
164 | 0 | g_object_class_install_property (gobject_class, PROP_ACTUAL_BUFFER_TIME, |
165 | 0 | g_param_spec_int64 ("actual-buffer-time", "Actual Buffer Time", |
166 | 0 | "Actual configured size of audio buffer in microseconds", |
167 | 0 | DEFAULT_ACTUAL_BUFFER_TIME, G_MAXINT64, DEFAULT_ACTUAL_BUFFER_TIME, |
168 | 0 | G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); |
169 | | |
170 | | /** |
171 | | * GstAudioBaseSrc:actual-latency-time: |
172 | | * |
173 | | * Actual configured audio latency in microseconds. |
174 | | **/ |
175 | 0 | g_object_class_install_property (gobject_class, PROP_ACTUAL_LATENCY_TIME, |
176 | 0 | g_param_spec_int64 ("actual-latency-time", "Actual Latency Time", |
177 | 0 | "Actual configured audio latency in microseconds", |
178 | 0 | DEFAULT_ACTUAL_LATENCY_TIME, G_MAXINT64, DEFAULT_ACTUAL_LATENCY_TIME, |
179 | 0 | G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); |
180 | |
|
181 | 0 | g_object_class_install_property (gobject_class, PROP_PROVIDE_CLOCK, |
182 | 0 | g_param_spec_boolean ("provide-clock", "Provide Clock", |
183 | 0 | "Provide a clock to be used as the global pipeline clock", |
184 | 0 | DEFAULT_PROVIDE_CLOCK, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); |
185 | |
|
186 | 0 | g_object_class_install_property (gobject_class, PROP_SLAVE_METHOD, |
187 | 0 | g_param_spec_enum ("slave-method", "Slave Method", |
188 | 0 | "Algorithm used to match the rate of the masterclock", |
189 | 0 | GST_TYPE_AUDIO_BASE_SRC_SLAVE_METHOD, DEFAULT_SLAVE_METHOD, |
190 | 0 | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); |
191 | |
|
192 | 0 | gstelement_class->change_state = |
193 | 0 | GST_DEBUG_FUNCPTR (gst_audio_base_src_change_state); |
194 | 0 | gstelement_class->provide_clock = |
195 | 0 | GST_DEBUG_FUNCPTR (gst_audio_base_src_provide_clock); |
196 | 0 | gstelement_class->post_message = |
197 | 0 | GST_DEBUG_FUNCPTR (gst_audio_base_src_post_message); |
198 | |
|
199 | 0 | gstbasesrc_class->set_caps = GST_DEBUG_FUNCPTR (gst_audio_base_src_setcaps); |
200 | 0 | gstbasesrc_class->event = GST_DEBUG_FUNCPTR (gst_audio_base_src_event); |
201 | 0 | gstbasesrc_class->query = GST_DEBUG_FUNCPTR (gst_audio_base_src_query); |
202 | 0 | gstbasesrc_class->get_times = |
203 | 0 | GST_DEBUG_FUNCPTR (gst_audio_base_src_get_times); |
204 | 0 | gstbasesrc_class->create = GST_DEBUG_FUNCPTR (gst_audio_base_src_create); |
205 | 0 | gstbasesrc_class->fixate = GST_DEBUG_FUNCPTR (gst_audio_base_src_fixate); |
206 | | |
207 | | /* ref class from a thread-safe context to work around missing bit of |
208 | | * thread-safety in GObject */ |
209 | 0 | g_type_class_ref (GST_TYPE_AUDIO_CLOCK); |
210 | 0 | g_type_class_ref (GST_TYPE_AUDIO_RING_BUFFER); |
211 | 0 | } |
212 | | |
213 | | static void |
214 | | gst_audio_base_src_init (GstAudioBaseSrc * audiobasesrc) |
215 | 0 | { |
216 | 0 | audiobasesrc->priv = gst_audio_base_src_get_instance_private (audiobasesrc); |
217 | |
|
218 | 0 | audiobasesrc->buffer_time = DEFAULT_BUFFER_TIME; |
219 | 0 | audiobasesrc->latency_time = DEFAULT_LATENCY_TIME; |
220 | 0 | if (DEFAULT_PROVIDE_CLOCK) |
221 | 0 | GST_OBJECT_FLAG_SET (audiobasesrc, GST_ELEMENT_FLAG_PROVIDE_CLOCK); |
222 | 0 | else |
223 | 0 | GST_OBJECT_FLAG_UNSET (audiobasesrc, GST_ELEMENT_FLAG_PROVIDE_CLOCK); |
224 | 0 | audiobasesrc->priv->slave_method = DEFAULT_SLAVE_METHOD; |
225 | | /* reset blocksize we use latency time to calculate a more useful |
226 | | * value based on negotiated format. */ |
227 | 0 | GST_BASE_SRC (audiobasesrc)->blocksize = 0; |
228 | |
|
229 | 0 | audiobasesrc->clock = gst_audio_clock_new ("GstAudioSrcClock", |
230 | 0 | (GstAudioClockGetTimeFunc) gst_audio_base_src_get_time, audiobasesrc, |
231 | 0 | NULL); |
232 | | |
233 | | |
234 | | /* we are always a live source */ |
235 | 0 | gst_base_src_set_live (GST_BASE_SRC (audiobasesrc), TRUE); |
236 | | /* we operate in time */ |
237 | 0 | gst_base_src_set_format (GST_BASE_SRC (audiobasesrc), GST_FORMAT_TIME); |
238 | 0 | } |
239 | | |
240 | | static void |
241 | | gst_audio_base_src_dispose (GObject * object) |
242 | 0 | { |
243 | 0 | GstAudioBaseSrc *src; |
244 | |
|
245 | 0 | src = GST_AUDIO_BASE_SRC (object); |
246 | |
|
247 | 0 | GST_OBJECT_LOCK (src); |
248 | 0 | if (src->clock) { |
249 | 0 | gst_audio_clock_invalidate (GST_AUDIO_CLOCK (src->clock)); |
250 | 0 | gst_object_unref (src->clock); |
251 | 0 | src->clock = NULL; |
252 | 0 | } |
253 | |
|
254 | 0 | if (src->ringbuffer) { |
255 | 0 | gst_object_unparent (GST_OBJECT_CAST (src->ringbuffer)); |
256 | 0 | src->ringbuffer = NULL; |
257 | 0 | } |
258 | 0 | GST_OBJECT_UNLOCK (src); |
259 | |
|
260 | 0 | G_OBJECT_CLASS (parent_class)->dispose (object); |
261 | 0 | } |
262 | | |
263 | | static GstClock * |
264 | | gst_audio_base_src_provide_clock (GstElement * elem) |
265 | 0 | { |
266 | 0 | GstAudioBaseSrc *src; |
267 | 0 | GstClock *clock; |
268 | |
|
269 | 0 | src = GST_AUDIO_BASE_SRC (elem); |
270 | | |
271 | | /* we have no ringbuffer (must be NULL state) */ |
272 | 0 | if (src->ringbuffer == NULL) |
273 | 0 | goto wrong_state; |
274 | | |
275 | 0 | if (gst_audio_ring_buffer_is_flushing (src->ringbuffer)) |
276 | 0 | goto wrong_state; |
277 | | |
278 | 0 | GST_OBJECT_LOCK (src); |
279 | |
|
280 | 0 | if (!GST_OBJECT_FLAG_IS_SET (src, GST_ELEMENT_FLAG_PROVIDE_CLOCK)) |
281 | 0 | goto clock_disabled; |
282 | | |
283 | 0 | clock = GST_CLOCK_CAST (gst_object_ref (src->clock)); |
284 | 0 | GST_OBJECT_UNLOCK (src); |
285 | |
|
286 | 0 | return clock; |
287 | | |
288 | | /* ERRORS */ |
289 | 0 | wrong_state: |
290 | 0 | { |
291 | 0 | GST_DEBUG_OBJECT (src, "ringbuffer is flushing"); |
292 | 0 | return NULL; |
293 | 0 | } |
294 | 0 | clock_disabled: |
295 | 0 | { |
296 | 0 | GST_DEBUG_OBJECT (src, "clock provide disabled"); |
297 | 0 | GST_OBJECT_UNLOCK (src); |
298 | 0 | return NULL; |
299 | 0 | } |
300 | 0 | } |
301 | | |
302 | | static GstClockTime |
303 | | gst_audio_base_src_get_time (GstClock * clock, GstAudioBaseSrc * src) |
304 | 0 | { |
305 | 0 | guint64 raw, samples; |
306 | 0 | guint delay; |
307 | 0 | GstClockTime result; |
308 | 0 | GstAudioRingBuffer *ringbuffer; |
309 | 0 | gint rate; |
310 | |
|
311 | 0 | ringbuffer = src->ringbuffer; |
312 | 0 | if (!ringbuffer) |
313 | 0 | return GST_CLOCK_TIME_NONE; |
314 | | |
315 | 0 | rate = ringbuffer->spec.info.rate; |
316 | 0 | if (rate == 0) |
317 | 0 | return GST_CLOCK_TIME_NONE; |
318 | | |
319 | 0 | raw = samples = gst_audio_ring_buffer_samples_done (ringbuffer); |
320 | | |
321 | | /* the number of samples not yet processed, this is still queued in the |
322 | | * device (not yet read for capture). */ |
323 | 0 | delay = gst_audio_ring_buffer_delay (ringbuffer); |
324 | |
|
325 | 0 | samples += delay; |
326 | |
|
327 | 0 | result = gst_util_uint64_scale_int (samples, GST_SECOND, rate); |
328 | |
|
329 | 0 | GST_DEBUG_OBJECT (src, |
330 | 0 | "processed samples: raw %" G_GUINT64_FORMAT ", delay %u, real %" |
331 | 0 | G_GUINT64_FORMAT ", time %" GST_TIME_FORMAT, raw, delay, samples, |
332 | 0 | GST_TIME_ARGS (result)); |
333 | |
|
334 | 0 | return result; |
335 | 0 | } |
336 | | |
337 | | /** |
338 | | * gst_audio_base_src_set_provide_clock: |
339 | | * @src: a #GstAudioBaseSrc |
340 | | * @provide: new state |
341 | | * |
342 | | * Controls whether @src will provide a clock or not. If @provide is %TRUE, |
343 | | * gst_element_provide_clock() will return a clock that reflects the datarate |
344 | | * of @src. If @provide is %FALSE, gst_element_provide_clock() will return NULL. |
345 | | */ |
346 | | void |
347 | | gst_audio_base_src_set_provide_clock (GstAudioBaseSrc * src, gboolean provide) |
348 | 0 | { |
349 | 0 | g_return_if_fail (GST_IS_AUDIO_BASE_SRC (src)); |
350 | | |
351 | 0 | GST_OBJECT_LOCK (src); |
352 | 0 | if (provide) |
353 | 0 | GST_OBJECT_FLAG_SET (src, GST_ELEMENT_FLAG_PROVIDE_CLOCK); |
354 | 0 | else |
355 | 0 | GST_OBJECT_FLAG_UNSET (src, GST_ELEMENT_FLAG_PROVIDE_CLOCK); |
356 | 0 | GST_OBJECT_UNLOCK (src); |
357 | 0 | } |
358 | | |
359 | | /** |
360 | | * gst_audio_base_src_get_provide_clock: |
361 | | * @src: a #GstAudioBaseSrc |
362 | | * |
363 | | * Queries whether @src will provide a clock or not. See also |
364 | | * gst_audio_base_src_set_provide_clock. |
365 | | * |
366 | | * Returns: %TRUE if @src will provide a clock. |
367 | | */ |
368 | | gboolean |
369 | | gst_audio_base_src_get_provide_clock (GstAudioBaseSrc * src) |
370 | 0 | { |
371 | 0 | gboolean result; |
372 | |
|
373 | 0 | g_return_val_if_fail (GST_IS_AUDIO_BASE_SRC (src), FALSE); |
374 | | |
375 | 0 | GST_OBJECT_LOCK (src); |
376 | 0 | result = GST_OBJECT_FLAG_IS_SET (src, GST_ELEMENT_FLAG_PROVIDE_CLOCK); |
377 | 0 | GST_OBJECT_UNLOCK (src); |
378 | |
|
379 | 0 | return result; |
380 | 0 | } |
381 | | |
382 | | /** |
383 | | * gst_audio_base_src_set_slave_method: |
384 | | * @src: a #GstAudioBaseSrc |
385 | | * @method: the new slave method |
386 | | * |
387 | | * Controls how clock slaving will be performed in @src. |
388 | | */ |
389 | | void |
390 | | gst_audio_base_src_set_slave_method (GstAudioBaseSrc * src, |
391 | | GstAudioBaseSrcSlaveMethod method) |
392 | 0 | { |
393 | 0 | g_return_if_fail (GST_IS_AUDIO_BASE_SRC (src)); |
394 | | |
395 | 0 | GST_OBJECT_LOCK (src); |
396 | 0 | src->priv->slave_method = method; |
397 | 0 | GST_OBJECT_UNLOCK (src); |
398 | 0 | } |
399 | | |
400 | | /** |
401 | | * gst_audio_base_src_get_slave_method: |
402 | | * @src: a #GstAudioBaseSrc |
403 | | * |
404 | | * Get the current slave method used by @src. |
405 | | * |
406 | | * Returns: The current slave method used by @src. |
407 | | */ |
408 | | GstAudioBaseSrcSlaveMethod |
409 | | gst_audio_base_src_get_slave_method (GstAudioBaseSrc * src) |
410 | 0 | { |
411 | 0 | GstAudioBaseSrcSlaveMethod result; |
412 | |
|
413 | 0 | g_return_val_if_fail (GST_IS_AUDIO_BASE_SRC (src), -1); |
414 | | |
415 | 0 | GST_OBJECT_LOCK (src); |
416 | 0 | result = src->priv->slave_method; |
417 | 0 | GST_OBJECT_UNLOCK (src); |
418 | |
|
419 | 0 | return result; |
420 | 0 | } |
421 | | |
422 | | static void |
423 | | gst_audio_base_src_set_property (GObject * object, guint prop_id, |
424 | | const GValue * value, GParamSpec * pspec) |
425 | 0 | { |
426 | 0 | GstAudioBaseSrc *src; |
427 | |
|
428 | 0 | src = GST_AUDIO_BASE_SRC (object); |
429 | |
|
430 | 0 | switch (prop_id) { |
431 | 0 | case PROP_BUFFER_TIME: |
432 | 0 | src->buffer_time = g_value_get_int64 (value); |
433 | 0 | break; |
434 | 0 | case PROP_LATENCY_TIME: |
435 | 0 | src->latency_time = g_value_get_int64 (value); |
436 | 0 | break; |
437 | 0 | case PROP_PROVIDE_CLOCK: |
438 | 0 | gst_audio_base_src_set_provide_clock (src, g_value_get_boolean (value)); |
439 | 0 | break; |
440 | 0 | case PROP_SLAVE_METHOD: |
441 | 0 | gst_audio_base_src_set_slave_method (src, g_value_get_enum (value)); |
442 | 0 | break; |
443 | 0 | default: |
444 | 0 | G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); |
445 | 0 | break; |
446 | 0 | } |
447 | 0 | } |
448 | | |
449 | | static void |
450 | | gst_audio_base_src_get_property (GObject * object, guint prop_id, |
451 | | GValue * value, GParamSpec * pspec) |
452 | 0 | { |
453 | 0 | GstAudioBaseSrc *src; |
454 | |
|
455 | 0 | src = GST_AUDIO_BASE_SRC (object); |
456 | |
|
457 | 0 | switch (prop_id) { |
458 | 0 | case PROP_BUFFER_TIME: |
459 | 0 | g_value_set_int64 (value, src->buffer_time); |
460 | 0 | break; |
461 | 0 | case PROP_LATENCY_TIME: |
462 | 0 | g_value_set_int64 (value, src->latency_time); |
463 | 0 | break; |
464 | 0 | case PROP_ACTUAL_BUFFER_TIME: |
465 | 0 | GST_OBJECT_LOCK (src); |
466 | 0 | if (src->ringbuffer && src->ringbuffer->acquired) |
467 | 0 | g_value_set_int64 (value, src->ringbuffer->spec.buffer_time); |
468 | 0 | else |
469 | 0 | g_value_set_int64 (value, DEFAULT_ACTUAL_BUFFER_TIME); |
470 | 0 | GST_OBJECT_UNLOCK (src); |
471 | 0 | break; |
472 | 0 | case PROP_ACTUAL_LATENCY_TIME: |
473 | 0 | GST_OBJECT_LOCK (src); |
474 | 0 | if (src->ringbuffer && src->ringbuffer->acquired) |
475 | 0 | g_value_set_int64 (value, src->ringbuffer->spec.latency_time); |
476 | 0 | else |
477 | 0 | g_value_set_int64 (value, DEFAULT_ACTUAL_LATENCY_TIME); |
478 | 0 | GST_OBJECT_UNLOCK (src); |
479 | 0 | break; |
480 | 0 | case PROP_PROVIDE_CLOCK: |
481 | 0 | g_value_set_boolean (value, gst_audio_base_src_get_provide_clock (src)); |
482 | 0 | break; |
483 | 0 | case PROP_SLAVE_METHOD: |
484 | 0 | g_value_set_enum (value, gst_audio_base_src_get_slave_method (src)); |
485 | 0 | break; |
486 | 0 | default: |
487 | 0 | G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); |
488 | 0 | break; |
489 | 0 | } |
490 | 0 | } |
491 | | |
492 | | static GstCaps * |
493 | | gst_audio_base_src_fixate (GstBaseSrc * bsrc, GstCaps * caps) |
494 | 0 | { |
495 | 0 | GstStructure *s; |
496 | |
|
497 | 0 | caps = gst_caps_make_writable (caps); |
498 | |
|
499 | 0 | s = gst_caps_get_structure (caps, 0); |
500 | | |
501 | | /* fields for all formats */ |
502 | 0 | gst_structure_fixate_field_nearest_int (s, "rate", GST_AUDIO_DEF_RATE); |
503 | 0 | gst_structure_fixate_field_nearest_int (s, "channels", |
504 | 0 | GST_AUDIO_DEF_CHANNELS); |
505 | 0 | gst_structure_fixate_field_string (s, "format", GST_AUDIO_DEF_FORMAT); |
506 | |
|
507 | 0 | caps = GST_BASE_SRC_CLASS (parent_class)->fixate (bsrc, caps); |
508 | |
|
509 | 0 | return caps; |
510 | 0 | } |
511 | | |
512 | | static gboolean |
513 | | gst_audio_base_src_setcaps (GstBaseSrc * bsrc, GstCaps * caps) |
514 | 0 | { |
515 | 0 | GstAudioBaseSrc *src = GST_AUDIO_BASE_SRC (bsrc); |
516 | 0 | GstAudioRingBufferSpec *spec; |
517 | 0 | gint bpf, rate; |
518 | |
|
519 | 0 | spec = &src->ringbuffer->spec; |
520 | |
|
521 | 0 | if (G_UNLIKELY (gst_audio_ring_buffer_is_acquired (src->ringbuffer) |
522 | 0 | && gst_caps_is_equal (spec->caps, caps))) { |
523 | 0 | GST_DEBUG_OBJECT (src, |
524 | 0 | "Ringbuffer caps haven't changed, skipping reconfiguration"); |
525 | 0 | return TRUE; |
526 | 0 | } |
527 | | |
528 | 0 | GST_DEBUG ("release old ringbuffer"); |
529 | 0 | gst_audio_ring_buffer_release (src->ringbuffer); |
530 | |
|
531 | 0 | spec->buffer_time = src->buffer_time; |
532 | 0 | spec->latency_time = src->latency_time; |
533 | |
|
534 | 0 | GST_OBJECT_LOCK (src); |
535 | 0 | if (!gst_audio_ring_buffer_parse_caps (spec, caps)) { |
536 | 0 | GST_OBJECT_UNLOCK (src); |
537 | 0 | goto parse_error; |
538 | 0 | } |
539 | | |
540 | 0 | bpf = GST_AUDIO_INFO_BPF (&spec->info); |
541 | 0 | rate = GST_AUDIO_INFO_RATE (&spec->info); |
542 | | |
543 | | /* calculate suggested segsize and segtotal */ |
544 | 0 | spec->segsize = rate * bpf * spec->latency_time / GST_MSECOND; |
545 | | /* Round to an integer number of samples */ |
546 | 0 | spec->segsize -= spec->segsize % bpf; |
547 | 0 | spec->segtotal = spec->buffer_time / spec->latency_time; |
548 | |
|
549 | 0 | GST_OBJECT_UNLOCK (src); |
550 | |
|
551 | 0 | gst_audio_ring_buffer_debug_spec_buff (spec); |
552 | |
|
553 | 0 | GST_DEBUG ("acquire new ringbuffer"); |
554 | |
|
555 | 0 | if (!gst_audio_ring_buffer_acquire (src->ringbuffer, spec)) |
556 | 0 | goto acquire_error; |
557 | | |
558 | | /* calculate actual latency and buffer times */ |
559 | 0 | spec->latency_time = spec->segsize * GST_MSECOND / (rate * bpf); |
560 | 0 | spec->buffer_time = |
561 | 0 | spec->segtotal * spec->segsize * GST_MSECOND / (rate * bpf); |
562 | |
|
563 | 0 | gst_audio_ring_buffer_debug_spec_buff (spec); |
564 | |
|
565 | 0 | g_object_notify (G_OBJECT (src), "actual-buffer-time"); |
566 | 0 | g_object_notify (G_OBJECT (src), "actual-latency-time"); |
567 | |
|
568 | 0 | gst_element_post_message (GST_ELEMENT_CAST (bsrc), |
569 | 0 | gst_message_new_latency (GST_OBJECT (bsrc))); |
570 | |
|
571 | 0 | return TRUE; |
572 | | |
573 | | /* ERRORS */ |
574 | 0 | parse_error: |
575 | 0 | { |
576 | 0 | GST_DEBUG ("could not parse caps"); |
577 | 0 | return FALSE; |
578 | 0 | } |
579 | 0 | acquire_error: |
580 | 0 | { |
581 | 0 | GST_DEBUG ("could not acquire ringbuffer"); |
582 | 0 | return FALSE; |
583 | 0 | } |
584 | 0 | } |
585 | | |
586 | | static void |
587 | | gst_audio_base_src_get_times (GstBaseSrc * bsrc, GstBuffer * buffer, |
588 | | GstClockTime * start, GstClockTime * end) |
589 | 0 | { |
590 | | /* No need to sync to a clock here. We schedule the samples based |
591 | | * on our own clock for the moment. */ |
592 | 0 | *start = GST_CLOCK_TIME_NONE; |
593 | 0 | *end = GST_CLOCK_TIME_NONE; |
594 | 0 | } |
595 | | |
596 | | static gboolean |
597 | | gst_audio_base_src_query (GstBaseSrc * bsrc, GstQuery * query) |
598 | 0 | { |
599 | 0 | GstAudioBaseSrc *src = GST_AUDIO_BASE_SRC (bsrc); |
600 | 0 | gboolean res = FALSE; |
601 | |
|
602 | 0 | switch (GST_QUERY_TYPE (query)) { |
603 | 0 | case GST_QUERY_LATENCY: |
604 | 0 | { |
605 | 0 | GstClockTime min_latency, max_latency; |
606 | 0 | GstAudioRingBufferSpec *spec; |
607 | 0 | gint bpf, rate; |
608 | |
|
609 | 0 | GST_OBJECT_LOCK (src); |
610 | 0 | if (G_UNLIKELY (src->ringbuffer == NULL |
611 | 0 | || src->ringbuffer->spec.info.rate == 0)) { |
612 | 0 | GST_OBJECT_UNLOCK (src); |
613 | 0 | goto done; |
614 | 0 | } |
615 | | |
616 | 0 | spec = &src->ringbuffer->spec; |
617 | 0 | rate = GST_AUDIO_INFO_RATE (&spec->info); |
618 | 0 | bpf = GST_AUDIO_INFO_BPF (&spec->info); |
619 | | |
620 | | /* we have at least 1 segment of latency */ |
621 | 0 | min_latency = |
622 | 0 | gst_util_uint64_scale_int (spec->segsize, GST_SECOND, rate * bpf); |
623 | | /* we cannot delay more than the buffersize else we lose data */ |
624 | 0 | max_latency = |
625 | 0 | gst_util_uint64_scale_int (spec->segtotal * spec->segsize, GST_SECOND, |
626 | 0 | rate * bpf); |
627 | 0 | GST_OBJECT_UNLOCK (src); |
628 | |
|
629 | 0 | GST_DEBUG_OBJECT (src, |
630 | 0 | "report latency min %" GST_TIME_FORMAT " max %" GST_TIME_FORMAT, |
631 | 0 | GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency)); |
632 | | |
633 | | /* we are always live, the min latency is 1 segment and the max latency is |
634 | | * the complete buffer of segments. */ |
635 | 0 | gst_query_set_latency (query, TRUE, min_latency, max_latency); |
636 | |
|
637 | 0 | res = TRUE; |
638 | 0 | break; |
639 | 0 | } |
640 | 0 | case GST_QUERY_SCHEDULING: |
641 | 0 | { |
642 | | /* We allow limited pull base operation. Basically, pulling can be |
643 | | * done on any number of bytes as long as the offset is -1 or |
644 | | * sequentially increasing. */ |
645 | 0 | gst_query_set_scheduling (query, GST_SCHEDULING_FLAG_SEQUENTIAL, 1, -1, |
646 | 0 | 0); |
647 | 0 | gst_query_add_scheduling_mode (query, GST_PAD_MODE_PULL); |
648 | 0 | gst_query_add_scheduling_mode (query, GST_PAD_MODE_PUSH); |
649 | |
|
650 | 0 | res = TRUE; |
651 | 0 | break; |
652 | 0 | } |
653 | 0 | default: |
654 | 0 | res = GST_BASE_SRC_CLASS (parent_class)->query (bsrc, query); |
655 | 0 | break; |
656 | 0 | } |
657 | 0 | done: |
658 | 0 | return res; |
659 | 0 | } |
660 | | |
661 | | static gboolean |
662 | | gst_audio_base_src_event (GstBaseSrc * bsrc, GstEvent * event) |
663 | 0 | { |
664 | 0 | GstAudioBaseSrc *src = GST_AUDIO_BASE_SRC (bsrc); |
665 | 0 | gboolean res, forward; |
666 | |
|
667 | 0 | res = FALSE; |
668 | 0 | forward = TRUE; |
669 | |
|
670 | 0 | switch (GST_EVENT_TYPE (event)) { |
671 | 0 | case GST_EVENT_FLUSH_START: |
672 | 0 | GST_DEBUG_OBJECT (bsrc, "flush-start"); |
673 | 0 | gst_audio_ring_buffer_pause (src->ringbuffer); |
674 | 0 | gst_audio_ring_buffer_clear_all (src->ringbuffer); |
675 | 0 | break; |
676 | 0 | case GST_EVENT_FLUSH_STOP: |
677 | 0 | GST_DEBUG_OBJECT (bsrc, "flush-stop"); |
678 | | /* always resync on sample after a flush */ |
679 | 0 | src->next_sample = -1; |
680 | 0 | gst_audio_ring_buffer_clear_all (src->ringbuffer); |
681 | 0 | break; |
682 | 0 | case GST_EVENT_SEEK: |
683 | 0 | GST_DEBUG_OBJECT (bsrc, "refuse to seek"); |
684 | 0 | forward = FALSE; |
685 | 0 | break; |
686 | 0 | default: |
687 | 0 | GST_DEBUG_OBJECT (bsrc, "forward event %p", event); |
688 | 0 | break; |
689 | 0 | } |
690 | 0 | if (forward) |
691 | 0 | res = GST_BASE_SRC_CLASS (parent_class)->event (bsrc, event); |
692 | |
|
693 | 0 | return res; |
694 | 0 | } |
695 | | |
696 | | /* Get the next offset in the ringbuffer for reading samples. |
697 | | * If the next sample is too far away, this function will position itself to the |
698 | | * next most recent sample, creating discontinuity */ |
699 | | static guint64 |
700 | | gst_audio_base_src_get_offset (GstAudioBaseSrc * src) |
701 | 0 | { |
702 | 0 | guint64 sample; |
703 | 0 | guint64 readseg, segdone; |
704 | 0 | gint segtotal, sps; |
705 | 0 | gint64 diff; |
706 | | |
707 | | /* assume we can append to the previous sample */ |
708 | 0 | sample = src->next_sample; |
709 | |
|
710 | 0 | sps = src->ringbuffer->samples_per_seg; |
711 | 0 | segtotal = src->ringbuffer->spec.segtotal; |
712 | | |
713 | | /* get the currently processed segment */ |
714 | 0 | segdone = gst_audio_ring_buffer_get_segdone (src->ringbuffer) |
715 | 0 | - gst_audio_ring_buffer_get_segbase (src->ringbuffer); |
716 | |
|
717 | 0 | if (sample != -1) { |
718 | 0 | GST_DEBUG_OBJECT (src, |
719 | 0 | "at segment %" G_GUINT64_FORMAT " and sample %" G_GUINT64_FORMAT, |
720 | 0 | segdone, sample); |
721 | | /* figure out the segment and the offset inside the segment where |
722 | | * the sample should be read from. */ |
723 | 0 | readseg = sample / sps; |
724 | | |
725 | | /* See how far away it is from the read segment. Normally, segdone (where |
726 | | * new data is written in the ringbuffer) is bigger than readseg |
727 | | * (where we are reading). */ |
728 | 0 | diff = segdone - readseg; |
729 | 0 | if (diff >= segtotal) { |
730 | 0 | GST_DEBUG_OBJECT (src, "dropped, align to segment %" G_GUINT64_FORMAT, |
731 | 0 | segdone); |
732 | | /* sample would be dropped, position to next playable position */ |
733 | 0 | sample = ((guint64) (segdone)) * sps; |
734 | 0 | } |
735 | 0 | } else { |
736 | | /* no previous sample, go to the current position */ |
737 | 0 | GST_DEBUG_OBJECT (src, "first sample, align to current %" G_GUINT64_FORMAT, |
738 | 0 | segdone); |
739 | 0 | sample = segdone * sps; |
740 | 0 | readseg = segdone; |
741 | 0 | } |
742 | |
|
743 | 0 | GST_DEBUG_OBJECT (src, |
744 | 0 | "reading from %" G_GUINT64_FORMAT ", we are at %" G_GUINT64_FORMAT |
745 | 0 | ", sample %" G_GUINT64_FORMAT, readseg, segdone, sample); |
746 | |
|
747 | 0 | return sample; |
748 | 0 | } |
749 | | |
750 | | static GstFlowReturn |
751 | | gst_audio_base_src_create (GstBaseSrc * bsrc, guint64 offset, guint length, |
752 | | GstBuffer ** outbuf) |
753 | 0 | { |
754 | 0 | GstFlowReturn ret; |
755 | 0 | GstAudioBaseSrc *src = GST_AUDIO_BASE_SRC (bsrc); |
756 | 0 | GstBuffer *buf; |
757 | 0 | GstMapInfo info; |
758 | 0 | guint8 *ptr; |
759 | 0 | guint samples, total_samples; |
760 | 0 | guint64 sample; |
761 | 0 | gint bpf, rate; |
762 | 0 | GstAudioRingBuffer *ringbuffer; |
763 | 0 | GstAudioRingBufferSpec *spec; |
764 | 0 | guint read; |
765 | 0 | GstClockTime timestamp, duration; |
766 | 0 | GstClockTime rb_timestamp = GST_CLOCK_TIME_NONE; |
767 | 0 | GstClock *clock; |
768 | 0 | gboolean first; |
769 | 0 | gboolean first_sample = src->next_sample == -1; |
770 | |
|
771 | 0 | ringbuffer = src->ringbuffer; |
772 | 0 | spec = &ringbuffer->spec; |
773 | |
|
774 | 0 | if (G_UNLIKELY (!gst_audio_ring_buffer_is_acquired (ringbuffer))) |
775 | 0 | goto wrong_state; |
776 | | |
777 | 0 | bpf = GST_AUDIO_INFO_BPF (&spec->info); |
778 | 0 | rate = GST_AUDIO_INFO_RATE (&spec->info); |
779 | |
|
780 | 0 | if ((length == 0 && bsrc->blocksize == 0) || length == -1) |
781 | | /* no length given, use the default segment size */ |
782 | 0 | length = spec->segsize; |
783 | 0 | else |
784 | | /* make sure we round down to an integral number of samples */ |
785 | 0 | length -= length % bpf; |
786 | | |
787 | | /* figure out the offset in the ringbuffer */ |
788 | 0 | if (G_UNLIKELY (offset != -1)) { |
789 | 0 | sample = offset / bpf; |
790 | | /* if a specific offset was given it must be the next sequential |
791 | | * offset we expect or we fail for now. */ |
792 | 0 | if (src->next_sample != -1 && sample != src->next_sample) |
793 | 0 | goto wrong_offset; |
794 | 0 | } else { |
795 | | /* Calculate the sequentially-next sample we need to read. This can jump and |
796 | | * create a DISCONT. */ |
797 | 0 | sample = gst_audio_base_src_get_offset (src); |
798 | 0 | } |
799 | | |
800 | 0 | GST_DEBUG_OBJECT (src, "reading from sample %" G_GUINT64_FORMAT " length %u", |
801 | 0 | sample, length); |
802 | | |
803 | | /* get the number of samples to read */ |
804 | 0 | total_samples = samples = length / bpf; |
805 | | |
806 | | /* use the basesrc allocation code to use bufferpools or custom allocators */ |
807 | 0 | ret = GST_BASE_SRC_CLASS (parent_class)->alloc (bsrc, offset, length, &buf); |
808 | 0 | if (G_UNLIKELY (ret != GST_FLOW_OK)) |
809 | 0 | goto alloc_failed; |
810 | | |
811 | 0 | gst_buffer_map (buf, &info, GST_MAP_WRITE); |
812 | 0 | ptr = info.data; |
813 | 0 | first = TRUE; |
814 | 0 | do { |
815 | 0 | GstClockTime tmp_ts = GST_CLOCK_TIME_NONE; |
816 | |
|
817 | 0 | read = |
818 | 0 | gst_audio_ring_buffer_read (ringbuffer, sample, ptr, samples, &tmp_ts); |
819 | 0 | if (first && GST_CLOCK_TIME_IS_VALID (tmp_ts)) { |
820 | 0 | first = FALSE; |
821 | 0 | rb_timestamp = tmp_ts; |
822 | 0 | } |
823 | 0 | GST_DEBUG_OBJECT (src, "read %u of %u", read, samples); |
824 | | /* if we read all, we're done */ |
825 | 0 | if (read == samples) |
826 | 0 | break; |
827 | | |
828 | 0 | if (g_atomic_int_get (&ringbuffer->state) == |
829 | 0 | GST_AUDIO_RING_BUFFER_STATE_ERROR) |
830 | 0 | goto got_error; |
831 | | |
832 | | /* else something interrupted us and we wait for playing again. */ |
833 | 0 | GST_DEBUG_OBJECT (src, "wait playing"); |
834 | 0 | if (gst_base_src_wait_playing (bsrc) != GST_FLOW_OK) |
835 | 0 | goto stopped; |
836 | | |
837 | 0 | GST_DEBUG_OBJECT (src, "continue playing"); |
838 | | |
839 | | /* read next samples */ |
840 | 0 | sample += read; |
841 | 0 | samples -= read; |
842 | 0 | ptr += read * bpf; |
843 | 0 | } while (TRUE); |
844 | 0 | gst_buffer_unmap (buf, &info); |
845 | | |
846 | | /* mark discontinuity if needed */ |
847 | 0 | if (G_UNLIKELY (sample != src->next_sample) && src->next_sample != -1) { |
848 | 0 | GST_WARNING_OBJECT (src, |
849 | 0 | "create DISCONT of %" G_GUINT64_FORMAT " samples at sample %" |
850 | 0 | G_GUINT64_FORMAT, sample - src->next_sample, sample); |
851 | 0 | GST_ELEMENT_WARNING (src, CORE, CLOCK, |
852 | 0 | (_("Can't record audio fast enough")), |
853 | 0 | ("Dropped %" G_GUINT64_FORMAT " samples. This is most likely because " |
854 | 0 | "downstream can't keep up and is consuming samples too slowly.", |
855 | 0 | sample - src->next_sample)); |
856 | 0 | GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT); |
857 | 0 | } |
858 | |
|
859 | 0 | src->next_sample = sample + samples; |
860 | | |
861 | | /* get the normal timestamp to get the duration. */ |
862 | 0 | timestamp = gst_util_uint64_scale_int (sample, GST_SECOND, rate); |
863 | 0 | duration = gst_util_uint64_scale_int (src->next_sample, GST_SECOND, |
864 | 0 | rate) - timestamp; |
865 | |
|
866 | 0 | GST_OBJECT_LOCK (src); |
867 | 0 | if (!(clock = GST_ELEMENT_CLOCK (src))) |
868 | 0 | goto no_sync; |
869 | | |
870 | 0 | if (!GST_CLOCK_TIME_IS_VALID (rb_timestamp) && clock != src->clock) { |
871 | | /* we are slaved, check how to handle this */ |
872 | 0 | switch (src->priv->slave_method) { |
873 | 0 | case GST_AUDIO_BASE_SRC_SLAVE_RESAMPLE: |
874 | | /* Not implemented, use skew algorithm. This algorithm should |
875 | | * work on the readout pointer and produce more or less samples based |
876 | | * on the clock drift */ |
877 | 0 | case GST_AUDIO_BASE_SRC_SLAVE_SKEW: |
878 | 0 | { |
879 | 0 | GstClockTime running_time; |
880 | 0 | GstClockTime base_time; |
881 | 0 | GstClockTime current_time; |
882 | 0 | guint64 running_time_sample; |
883 | 0 | guint64 running_time_segment; |
884 | 0 | guint64 last_read_segment; |
885 | 0 | gint64 segment_skew; |
886 | 0 | gint sps; |
887 | 0 | guint64 segments_written; |
888 | 0 | guint64 last_written_segment; |
889 | | |
890 | | /* get the amount of segments written from the device by now */ |
891 | 0 | segments_written = gst_audio_ring_buffer_get_segdone (src->ringbuffer); |
892 | | |
893 | | /* subtract the base to segments_written to get the number of the |
894 | | * last written segment in the ringbuffer |
895 | | * (one segment written = segment 0) */ |
896 | 0 | last_written_segment = |
897 | 0 | segments_written - gst_audio_ring_buffer_get_segbase (ringbuffer) - |
898 | 0 | 1; |
899 | | |
900 | | /* samples per segment */ |
901 | 0 | sps = ringbuffer->samples_per_seg; |
902 | | |
903 | | /* get the current time */ |
904 | 0 | current_time = gst_clock_get_time (clock); |
905 | | |
906 | | /* get the basetime */ |
907 | 0 | base_time = GST_ELEMENT_CAST (src)->base_time; |
908 | | |
909 | | /* get the running_time */ |
910 | 0 | running_time = current_time - base_time; |
911 | | |
912 | | /* the running_time converted to a sample |
913 | | * (relative to the ringbuffer) */ |
914 | 0 | running_time_sample = |
915 | 0 | gst_util_uint64_scale_int (running_time, rate, GST_SECOND); |
916 | | |
917 | | /* the segmentnr corresponding to running_time, round down */ |
918 | 0 | running_time_segment = running_time_sample / sps; |
919 | | |
920 | | /* the segment currently read from the ringbuffer */ |
921 | 0 | last_read_segment = sample / sps; |
922 | | |
923 | | /* the skew we have between running_time and the ringbuffertime |
924 | | * (last written to) */ |
925 | 0 | segment_skew = running_time_segment - last_written_segment; |
926 | |
|
927 | 0 | GST_DEBUG_OBJECT (bsrc, |
928 | 0 | "\n running_time = %" |
929 | 0 | GST_TIME_FORMAT |
930 | 0 | "\n timestamp = %" |
931 | 0 | GST_TIME_FORMAT |
932 | 0 | "\n running_time_segment = %" |
933 | 0 | G_GUINT64_FORMAT |
934 | 0 | "\n last_written_segment = %" |
935 | 0 | G_GUINT64_FORMAT |
936 | 0 | "\n segment_skew (running time segment - last_written_segment) = %" |
937 | 0 | G_GINT64_FORMAT |
938 | 0 | "\n last_read_segment = %" |
939 | 0 | G_GUINT64_FORMAT, GST_TIME_ARGS (running_time), |
940 | 0 | GST_TIME_ARGS (timestamp), running_time_segment, |
941 | 0 | last_written_segment, segment_skew, last_read_segment); |
942 | | |
943 | | /* Resync the ringbuffer if: |
944 | | * |
945 | | * 1. We are more than the length of the ringbuffer behind. |
946 | | * The length of the ringbuffer then gets to dictate |
947 | | * the threshold for what is considered "too late" |
948 | | * |
949 | | * 2. If this is our first buffer. |
950 | | * We know that we should catch up to running_time |
951 | | * the first time we are ran. |
952 | | */ |
953 | 0 | if ((segment_skew >= ringbuffer->spec.segtotal) || |
954 | 0 | (last_read_segment == 0) || first_sample) { |
955 | 0 | guint64 new_read_segment; |
956 | 0 | guint64 segment_diff; |
957 | 0 | guint64 new_sample; |
958 | | |
959 | | /* the difference between running_time and the last written segment */ |
960 | 0 | segment_diff = running_time_segment - last_written_segment; |
961 | | |
962 | | /* advance the ringbuffer */ |
963 | 0 | gst_audio_ring_buffer_advance (ringbuffer, segment_diff); |
964 | | |
965 | | /* we move the new read segment to the last known written segment */ |
966 | 0 | new_read_segment = gst_audio_ring_buffer_get_segdone (src->ringbuffer) |
967 | 0 | - gst_audio_ring_buffer_get_segbase (src->ringbuffer); |
968 | | |
969 | | /* we calculate the new sample value */ |
970 | 0 | new_sample = new_read_segment * sps; |
971 | | |
972 | | /* and get the relative time to this -> our new timestamp */ |
973 | 0 | timestamp = gst_util_uint64_scale_int (new_sample, GST_SECOND, rate); |
974 | | |
975 | | /* we update the next sample accordingly */ |
976 | 0 | src->next_sample = new_sample + samples; |
977 | |
|
978 | 0 | GST_DEBUG_OBJECT (bsrc, |
979 | 0 | "Timeshifted the ringbuffer with %" G_GINT64_FORMAT " segments: " |
980 | 0 | "Updating the timestamp to %" GST_TIME_FORMAT ", " |
981 | 0 | "and src->next_sample to %" G_GUINT64_FORMAT, segment_diff, |
982 | 0 | GST_TIME_ARGS (timestamp), src->next_sample); |
983 | 0 | } |
984 | 0 | break; |
985 | 0 | } |
986 | 0 | case GST_AUDIO_BASE_SRC_SLAVE_RE_TIMESTAMP: |
987 | 0 | { |
988 | 0 | GstClockTime base_time, latency; |
989 | | |
990 | | /* We are slaved to another clock. Take running time of the pipeline |
991 | | * clock and timestamp against it. Somebody else in the pipeline should |
992 | | * figure out the clock drift. We keep the duration we calculated |
993 | | * above. */ |
994 | 0 | timestamp = gst_clock_get_time (clock); |
995 | 0 | base_time = GST_ELEMENT_CAST (src)->base_time; |
996 | |
|
997 | 0 | if (GST_CLOCK_DIFF (timestamp, base_time) < 0) |
998 | 0 | timestamp -= base_time; |
999 | 0 | else |
1000 | 0 | timestamp = 0; |
1001 | | |
1002 | | /* subtract latency */ |
1003 | 0 | latency = gst_util_uint64_scale_int (total_samples, GST_SECOND, rate); |
1004 | 0 | if (timestamp > latency) |
1005 | 0 | timestamp -= latency; |
1006 | 0 | else |
1007 | 0 | timestamp = 0; |
1008 | 0 | } |
1009 | 0 | case GST_AUDIO_BASE_SRC_SLAVE_NONE: |
1010 | 0 | break; |
1011 | 0 | } |
1012 | 0 | } else { |
1013 | 0 | GstClockTime base_time; |
1014 | |
|
1015 | 0 | if (GST_CLOCK_TIME_IS_VALID (rb_timestamp)) { |
1016 | | /* the read method returned a timestamp so we use this instead */ |
1017 | 0 | timestamp = rb_timestamp; |
1018 | 0 | } else { |
1019 | | /* to get the timestamp against the clock we also need to add our |
1020 | | * offset */ |
1021 | 0 | timestamp = gst_audio_clock_adjust (GST_AUDIO_CLOCK (clock), timestamp); |
1022 | 0 | } |
1023 | | |
1024 | | /* we are not slaved, subtract base_time */ |
1025 | 0 | base_time = GST_ELEMENT_CAST (src)->base_time; |
1026 | |
|
1027 | 0 | if (GST_CLOCK_DIFF (timestamp, base_time) < 0) { |
1028 | 0 | timestamp -= base_time; |
1029 | 0 | GST_LOG_OBJECT (src, |
1030 | 0 | "buffer timestamp %" GST_TIME_FORMAT " (base_time %" GST_TIME_FORMAT |
1031 | 0 | ")", GST_TIME_ARGS (timestamp), GST_TIME_ARGS (base_time)); |
1032 | 0 | } else { |
1033 | 0 | GST_LOG_OBJECT (src, |
1034 | 0 | "buffer timestamp 0, ts %" GST_TIME_FORMAT " <= base_time %" |
1035 | 0 | GST_TIME_FORMAT, GST_TIME_ARGS (timestamp), |
1036 | 0 | GST_TIME_ARGS (base_time)); |
1037 | 0 | timestamp = 0; |
1038 | 0 | } |
1039 | 0 | } |
1040 | | |
1041 | 0 | no_sync: |
1042 | 0 | GST_OBJECT_UNLOCK (src); |
1043 | |
|
1044 | 0 | GST_BUFFER_PTS (buf) = timestamp; |
1045 | 0 | GST_BUFFER_DURATION (buf) = duration; |
1046 | 0 | GST_BUFFER_OFFSET (buf) = sample; |
1047 | 0 | GST_BUFFER_OFFSET_END (buf) = sample + samples; |
1048 | |
|
1049 | 0 | *outbuf = buf; |
1050 | |
|
1051 | 0 | GST_LOG_OBJECT (src, "Pushed buffer timestamp %" GST_TIME_FORMAT, |
1052 | 0 | GST_TIME_ARGS (GST_BUFFER_PTS (buf))); |
1053 | |
|
1054 | 0 | return GST_FLOW_OK; |
1055 | | |
1056 | | /* ERRORS */ |
1057 | 0 | wrong_state: |
1058 | 0 | { |
1059 | 0 | GST_DEBUG_OBJECT (src, "ringbuffer in wrong state"); |
1060 | 0 | return GST_FLOW_FLUSHING; |
1061 | 0 | } |
1062 | 0 | wrong_offset: |
1063 | 0 | { |
1064 | 0 | GST_ELEMENT_ERROR (src, RESOURCE, SEEK, |
1065 | 0 | (NULL), ("resource can only be operated on sequentially but offset %" |
1066 | 0 | G_GUINT64_FORMAT " was given", offset)); |
1067 | 0 | return GST_FLOW_ERROR; |
1068 | 0 | } |
1069 | 0 | alloc_failed: |
1070 | 0 | { |
1071 | 0 | GST_DEBUG_OBJECT (src, "alloc failed: %s", gst_flow_get_name (ret)); |
1072 | 0 | return ret; |
1073 | 0 | } |
1074 | 0 | stopped: |
1075 | 0 | { |
1076 | 0 | gst_buffer_unmap (buf, &info); |
1077 | 0 | gst_buffer_unref (buf); |
1078 | 0 | GST_DEBUG_OBJECT (src, "ringbuffer stopped"); |
1079 | 0 | return GST_FLOW_FLUSHING; |
1080 | 0 | } |
1081 | 0 | got_error: |
1082 | 0 | { |
1083 | 0 | gst_buffer_unmap (buf, &info); |
1084 | 0 | gst_buffer_unref (buf); |
1085 | 0 | GST_DEBUG_OBJECT (src, "ringbuffer was in error state, bailing out"); |
1086 | 0 | return GST_FLOW_ERROR; |
1087 | 0 | } |
1088 | 0 | } |
1089 | | |
1090 | | /** |
1091 | | * gst_audio_base_src_create_ringbuffer: |
1092 | | * @src: a #GstAudioBaseSrc. |
1093 | | * |
1094 | | * Create and return the #GstAudioRingBuffer for @src. This function will call |
1095 | | * the ::create_ringbuffer vmethod and will set @src as the parent of the |
1096 | | * returned buffer (see gst_object_set_parent()). |
1097 | | * |
1098 | | * Returns: (transfer none) (nullable): The new ringbuffer of @src. |
1099 | | */ |
1100 | | GstAudioRingBuffer * |
1101 | | gst_audio_base_src_create_ringbuffer (GstAudioBaseSrc * src) |
1102 | 0 | { |
1103 | 0 | GstAudioBaseSrcClass *bclass; |
1104 | 0 | GstAudioRingBuffer *buffer = NULL; |
1105 | |
|
1106 | 0 | bclass = GST_AUDIO_BASE_SRC_GET_CLASS (src); |
1107 | 0 | if (bclass->create_ringbuffer) |
1108 | 0 | buffer = bclass->create_ringbuffer (src); |
1109 | |
|
1110 | 0 | if (G_LIKELY (buffer)) |
1111 | 0 | gst_object_set_parent (GST_OBJECT_CAST (buffer), GST_OBJECT_CAST (src)); |
1112 | |
|
1113 | 0 | return buffer; |
1114 | 0 | } |
1115 | | |
1116 | | static GstStateChangeReturn |
1117 | | gst_audio_base_src_change_state (GstElement * element, |
1118 | | GstStateChange transition) |
1119 | 0 | { |
1120 | 0 | GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS; |
1121 | 0 | GstAudioBaseSrc *src = GST_AUDIO_BASE_SRC (element); |
1122 | |
|
1123 | 0 | switch (transition) { |
1124 | 0 | case GST_STATE_CHANGE_NULL_TO_READY:{ |
1125 | 0 | GstAudioRingBuffer *rb; |
1126 | |
|
1127 | 0 | GST_DEBUG_OBJECT (src, "NULL->READY"); |
1128 | 0 | gst_audio_clock_reset (GST_AUDIO_CLOCK (src->clock), 0); |
1129 | 0 | rb = gst_audio_base_src_create_ringbuffer (src); |
1130 | 0 | if (rb == NULL) |
1131 | 0 | goto create_failed; |
1132 | | |
1133 | 0 | GST_OBJECT_LOCK (src); |
1134 | 0 | src->ringbuffer = rb; |
1135 | 0 | GST_OBJECT_UNLOCK (src); |
1136 | |
|
1137 | 0 | if (!gst_audio_ring_buffer_open_device (src->ringbuffer)) { |
1138 | 0 | GST_OBJECT_LOCK (src); |
1139 | 0 | gst_object_unparent (GST_OBJECT_CAST (src->ringbuffer)); |
1140 | 0 | src->ringbuffer = NULL; |
1141 | 0 | GST_OBJECT_UNLOCK (src); |
1142 | 0 | goto open_failed; |
1143 | 0 | } |
1144 | 0 | break; |
1145 | 0 | } |
1146 | 0 | case GST_STATE_CHANGE_READY_TO_PAUSED: |
1147 | 0 | GST_DEBUG_OBJECT (src, "READY->PAUSED"); |
1148 | 0 | src->next_sample = -1; |
1149 | 0 | gst_audio_ring_buffer_set_flushing (src->ringbuffer, FALSE); |
1150 | 0 | gst_audio_ring_buffer_may_start (src->ringbuffer, FALSE); |
1151 | | /* Only post clock-provide messages if this is the clock that |
1152 | | * we've created. If the subclass has overridden it the subclass |
1153 | | * should post this messages whenever necessary */ |
1154 | 0 | if (src->clock && GST_IS_AUDIO_CLOCK (src->clock) && |
1155 | 0 | GST_AUDIO_CLOCK_CAST (src->clock)->func == |
1156 | 0 | (GstAudioClockGetTimeFunc) gst_audio_base_src_get_time) |
1157 | 0 | gst_element_post_message (element, |
1158 | 0 | gst_message_new_clock_provide (GST_OBJECT_CAST (element), |
1159 | 0 | src->clock, TRUE)); |
1160 | 0 | break; |
1161 | 0 | case GST_STATE_CHANGE_PAUSED_TO_PLAYING: |
1162 | 0 | GST_DEBUG_OBJECT (src, "PAUSED->PLAYING"); |
1163 | 0 | gst_audio_ring_buffer_may_start (src->ringbuffer, TRUE); |
1164 | 0 | break; |
1165 | 0 | case GST_STATE_CHANGE_PLAYING_TO_PAUSED: |
1166 | 0 | GST_DEBUG_OBJECT (src, "PLAYING->PAUSED"); |
1167 | 0 | gst_audio_ring_buffer_may_start (src->ringbuffer, FALSE); |
1168 | 0 | gst_audio_ring_buffer_pause (src->ringbuffer); |
1169 | 0 | break; |
1170 | 0 | case GST_STATE_CHANGE_PAUSED_TO_READY: |
1171 | 0 | GST_DEBUG_OBJECT (src, "PAUSED->READY"); |
1172 | | /* Only post clock-lost messages if this is the clock that |
1173 | | * we've created. If the subclass has overridden it the subclass |
1174 | | * should post this messages whenever necessary */ |
1175 | 0 | if (src->clock && GST_IS_AUDIO_CLOCK (src->clock) && |
1176 | 0 | GST_AUDIO_CLOCK_CAST (src->clock)->func == |
1177 | 0 | (GstAudioClockGetTimeFunc) gst_audio_base_src_get_time) |
1178 | 0 | gst_element_post_message (element, |
1179 | 0 | gst_message_new_clock_lost (GST_OBJECT_CAST (element), src->clock)); |
1180 | 0 | gst_audio_ring_buffer_set_flushing (src->ringbuffer, TRUE); |
1181 | 0 | break; |
1182 | 0 | default: |
1183 | 0 | break; |
1184 | 0 | } |
1185 | | |
1186 | 0 | ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); |
1187 | |
|
1188 | 0 | switch (transition) { |
1189 | 0 | case GST_STATE_CHANGE_PAUSED_TO_READY: |
1190 | 0 | GST_DEBUG_OBJECT (src, "PAUSED->READY"); |
1191 | 0 | gst_audio_ring_buffer_release (src->ringbuffer); |
1192 | 0 | break; |
1193 | 0 | case GST_STATE_CHANGE_READY_TO_NULL: |
1194 | 0 | GST_DEBUG_OBJECT (src, "READY->NULL"); |
1195 | 0 | gst_audio_ring_buffer_close_device (src->ringbuffer); |
1196 | 0 | GST_OBJECT_LOCK (src); |
1197 | 0 | gst_object_unparent (GST_OBJECT_CAST (src->ringbuffer)); |
1198 | 0 | src->ringbuffer = NULL; |
1199 | 0 | GST_OBJECT_UNLOCK (src); |
1200 | 0 | break; |
1201 | 0 | default: |
1202 | 0 | break; |
1203 | 0 | } |
1204 | | |
1205 | 0 | return ret; |
1206 | | |
1207 | | /* ERRORS */ |
1208 | 0 | create_failed: |
1209 | 0 | { |
1210 | | /* subclass must post a meaningful error message */ |
1211 | 0 | GST_DEBUG_OBJECT (src, "create failed"); |
1212 | 0 | return GST_STATE_CHANGE_FAILURE; |
1213 | 0 | } |
1214 | 0 | open_failed: |
1215 | 0 | { |
1216 | | /* subclass must post a meaningful error message */ |
1217 | 0 | GST_DEBUG_OBJECT (src, "open failed"); |
1218 | 0 | return GST_STATE_CHANGE_FAILURE; |
1219 | 0 | } |
1220 | |
|
1221 | 0 | } |
1222 | | |
1223 | | static gboolean |
1224 | | gst_audio_base_src_post_message (GstElement * element, GstMessage * message) |
1225 | 0 | { |
1226 | 0 | GstAudioBaseSrc *src = GST_AUDIO_BASE_SRC (element); |
1227 | 0 | gboolean ret; |
1228 | |
|
1229 | 0 | if (GST_MESSAGE_TYPE (message) == GST_MESSAGE_ERROR && src->ringbuffer) { |
1230 | 0 | GstAudioRingBuffer *ringbuffer; |
1231 | |
|
1232 | 0 | GST_INFO_OBJECT (element, "subclass posted error"); |
1233 | |
|
1234 | 0 | ringbuffer = gst_object_ref (src->ringbuffer); |
1235 | | |
1236 | | /* post message first before signalling the error to the ringbuffer, to |
1237 | | * make sure it ends up on the bus before the generic basesrc internal |
1238 | | * flow error message */ |
1239 | 0 | ret = GST_ELEMENT_CLASS (parent_class)->post_message (element, message); |
1240 | |
|
1241 | 0 | gst_audio_ring_buffer_set_errored (ringbuffer); |
1242 | 0 | GST_AUDIO_RING_BUFFER_SIGNAL (ringbuffer); |
1243 | 0 | gst_object_unref (ringbuffer); |
1244 | 0 | } else { |
1245 | 0 | ret = GST_ELEMENT_CLASS (parent_class)->post_message (element, message); |
1246 | 0 | } |
1247 | 0 | return ret; |
1248 | 0 | } |