Coverage Report

Created: 2024-02-11 06:48

/src/gpac/src/compositor/audio_input.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 *      GPAC - Multimedia Framework C SDK
3
 *
4
 *      Authors: Jean Le Feuvre
5
 *      Copyright (c) Telecom ParisTech 2000-2023
6
 *          All rights reserved
7
 *
8
 *  This file is part of GPAC / Scene Compositor sub-project
9
 *
10
 *  GPAC is free software; you can redistribute it and/or modify
11
 *  it under the terms of the GNU Lesser General Public License as published by
12
 *  the Free Software Foundation; either version 2, or (at your option)
13
 *  any later version.
14
 *
15
 *  GPAC is distributed in the hope that it will be useful,
16
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18
 *  GNU Lesser General Public License for more details.
19
 *
20
 *  You should have received a copy of the GNU Lesser General Public
21
 *  License along with this library; see the file COPYING.  If not, write to
22
 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
 *
24
 */
25
26
#include <gpac/internal/compositor_dev.h>
27
28
#ifndef GPAC_DISABLE_COMPOSITOR
29
30
#define ENABLE_EARLY_FRAME_DETECTION
31
32
/*diff time in ms to consider an audio frame too late and drop it - we should try to dynamically figure this out
33
since the drift may be high on TS for example, where PTS-PCR>500ms is quite common*/
34
#define MAX_RESYNC_TIME   1000
35
//if drift between audio object time and clock varies more is than this value (in ms) between two drift computation, clock is adjusted. We don't adjust for lower values otherwise we would
36
//introduce oscillations in the clock and non-smooth playback
37
0
#define MIN_DRIFT_ADJUST  75
38
39
40
static u8 *gf_audio_input_fetch_frame(void *callback, u32 *size, u32 *planar_size, u32 audio_delay_ms)
41
0
{
42
0
  char *frame;
43
0
  u32 obj_time, ts;
44
0
  s32 drift;
45
0
  Fixed speed;
46
0
  Bool done;
47
0
  GF_AudioInput *ai = (GF_AudioInput *) callback;
48
49
0
restart:
50
51
  /*even if the stream is signaled as finished we must check it, because it may have been restarted by a mediaControl*/
52
0
  if (!ai->stream) return NULL;
53
54
0
  done = ai->stream_finished;
55
0
  ai->input_ifce.is_buffering = GF_FALSE;
56
57
0
  frame = gf_mo_fetch_data(ai->stream, ai->compositor->audio_renderer->non_rt_output ? GF_MO_FETCH_PAUSED : GF_MO_FETCH, 0, &ai->stream_finished, &ts, size, NULL, NULL, NULL, planar_size);
58
  /*invalidate scene on end of stream to refresh audio graph*/
59
0
  if (done != ai->stream_finished) {
60
0
    gf_sc_invalidate(ai->compositor, NULL);
61
0
  }
62
0
  ai->input_ifce.is_eos = ai->stream_finished;
63
64
  /*no more data or not enough data, reset syncro drift*/
65
0
  if (!frame) {
66
0
    if (!ai->stream_finished && gf_mo_is_started(ai->stream) && (ai->stream->odm->ck->speed == FIX_ONE)) {
67
0
      GF_LOG(GF_LOG_DEBUG, GF_LOG_AUDIO, ("[Audio Input] No data in audio object\n"));
68
0
    }
69
0
    gf_mo_adjust_clock(ai->stream, 0);
70
0
    if (!ai->stream_finished && !ai->compositor->player)
71
0
      ai->input_ifce.is_buffering = GF_TRUE;
72
0
    else
73
0
      ai->input_ifce.is_buffering = gf_mo_is_buffering(ai->stream);
74
0
    *size = 0;
75
0
    if (!ai->stream->odm->state) ai->is_playing = GF_FALSE;
76
0
    return NULL;
77
0
  }
78
0
  if (ai->stream->config_changed) {
79
0
    gf_mo_release_data(ai->stream, 0, -1);
80
0
    *size = 0;
81
0
    return NULL;
82
0
  }
83
0
  ai->need_release = GF_TRUE;
84
85
  //step mode, return the frame without sync check
86
0
  if (ai->compositor->audio_renderer->non_rt_output) {
87
0
    GF_LOG(GF_LOG_DEBUG, GF_LOG_AUDIO, ("[Audio Input] audio frame CTS %u %d bytes fetched\n", ts, *size));
88
0
    return frame;
89
0
  }
90
91
0
  speed = gf_mo_get_current_speed(ai->stream);
92
93
0
  gf_mo_get_object_time(ai->stream, &obj_time);
94
0
  obj_time += audio_delay_ms;
95
96
0
  if (ai->compositor->audd<0) obj_time += -ai->compositor->audd;
97
0
  else if (obj_time > (u32) ai->compositor->audd) obj_time -= ai->compositor->audd;
98
0
  else obj_time=0;
99
100
0
  if (ai->compositor->bench_mode) {
101
0
    drift = 0;
102
0
  } else {
103
0
    drift = (s32)obj_time;
104
0
    drift -= (s32)ts;
105
0
  }
106
0
  if (ai->stream->odm->prev_clock_at_discontinuity_plus_one) {
107
0
    s32 drift_old = drift;
108
0
    s32 diff;
109
0
    drift_old -= (s32) ai->stream->odm->ck->init_timestamp;
110
0
    drift_old += (s32) ai->stream->odm->prev_clock_at_discontinuity_plus_one - 1;
111
0
    diff = ABS(drift_old);
112
0
    diff -= ABS(drift);
113
0
    if (diff < 0) {
114
0
      GF_LOG(GF_LOG_INFO, GF_LOG_COMPTIME, ("[Audio Input] in clock discontinuity: drift old clock %d new clock %d - disabling clock adjustment\n", drift_old, drift));
115
0
      drift = 0;
116
0
      audio_delay_ms = 0;
117
0
    } else {
118
0
      GF_LOG(GF_LOG_INFO, GF_LOG_COMPTIME, ("[Audio Input] end of clock discontinuity: drift old clock %d new clock %d\n", drift_old, drift));
119
0
      ai->stream->odm->prev_clock_at_discontinuity_plus_one = 0;
120
0
      if (drift<0) {
121
0
        drift = 0;
122
0
      }
123
0
    }
124
0
  }
125
126
  //flush audio (compositor in non-player mode is exiting), don't check drift
127
0
  if (ai->compositor->flush_audio) {
128
0
    drift = 0;
129
0
    audio_delay_ms = 0;
130
0
  }
131
132
0
#ifdef ENABLE_EARLY_FRAME_DETECTION
133
  /*too early (silence insertions), skip*/
134
0
  if (drift < 0) {
135
    //if not playing, start if audio is due in less than 50ms
136
0
    if (ai->is_playing || (drift < -50)) {
137
0
      GF_LOG(GF_LOG_INFO, GF_LOG_AUDIO, ("[Audio Input] audio too early of %d (CTS %u at OTB %u with audio delay %d ms)\n", drift + audio_delay_ms, ts, obj_time, audio_delay_ms));
138
0
      ai->need_release = GF_FALSE;
139
0
      gf_mo_release_data(ai->stream, 0, -1);
140
0
      *size = 0;
141
0
      return NULL;
142
0
    }
143
0
  }
144
0
#endif
145
  /*adjust drift*/
146
0
  if (audio_delay_ms) {
147
0
    s32 resync_delay = speed > 0 ? FIX2INT(speed * MAX_RESYNC_TIME) : FIX2INT(-speed * MAX_RESYNC_TIME);
148
0
    if (!ai->is_playing) resync_delay = 0;
149
    /*CU is way too late, discard and fetch a new one - this usually happen when media speed is more than 1*/
150
0
    if (drift>resync_delay) {
151
0
      GF_LOG(GF_LOG_INFO, GF_LOG_AUDIO, ("[Audio Input] Audio data too late obj time %d - CTS %d - drift %d ms - resync forced\n", obj_time - audio_delay_ms, ts, drift));
152
0
      gf_mo_release_data(ai->stream, *size, 2);
153
0
      ai->need_release = GF_FALSE;
154
      //avoid recursion
155
0
      goto restart;
156
0
    }
157
0
    if (ai->stream->odm && ai->stream->odm->ck)
158
0
      resync_delay = ai->stream->odm->ck->audio_delay - drift;
159
0
    else
160
0
      resync_delay = -drift;
161
162
0
    if (resync_delay < 0) resync_delay = -resync_delay;
163
164
0
    if (resync_delay > MIN_DRIFT_ADJUST) {
165
0
      GF_LOG(GF_LOG_DEBUG, GF_LOG_AUDIO, ("[Audio Input] Audio clock: delay %d - obj time %d - audio delay %d - CTS %d - adjust drift %d\n", audio_delay_ms, obj_time, audio_delay_ms, ts, drift));
166
0
      gf_mo_adjust_clock(ai->stream, drift);
167
0
    }
168
0
  }
169
0
  ai->is_playing = GF_TRUE;
170
0
  return frame;
171
0
}
172
173
static void gf_audio_input_release_frame(void *callback, u32 nb_bytes)
174
0
{
175
0
  GF_AudioInput *ai = (GF_AudioInput *) callback;
176
0
  if (!ai->stream) return;
177
0
  gf_mo_release_data(ai->stream, nb_bytes, 1);
178
0
  ai->need_release = GF_FALSE;
179
0
}
180
181
static Fixed gf_audio_input_get_speed(void *callback)
182
0
{
183
0
  GF_AudioInput *ai = (GF_AudioInput *) callback;
184
0
  return gf_mo_get_current_speed(ai->stream);
185
0
}
186
187
static Bool gf_audio_input_get_volume(void *callback, Fixed *vol)
188
0
{
189
0
  GF_AudioInput *ai = (GF_AudioInput *) callback;
190
0
  if (ai->snd && ai->snd->GetChannelVolume) {
191
0
    return ai->snd->GetChannelVolume(ai->snd->owner, vol);
192
0
  } else {
193
0
    u32 i;
194
0
    for (i=0; i<GF_AUDIO_MIXER_MAX_CHANNELS; i++)
195
0
      vol[i] = ai->intensity;
196
      
197
0
    return (ai->intensity==FIX_ONE) ? GF_FALSE : GF_TRUE;
198
0
  }
199
0
}
200
201
static Bool gf_audio_input_is_muted(void *callback)
202
0
{
203
0
  GF_AudioInput *ai = (GF_AudioInput *) callback;
204
0
  if (!ai->stream) return GF_TRUE;
205
206
0
  if (ai->stream->odm->nb_buffering)
207
0
    gf_odm_check_buffering(ai->stream->odm, NULL);
208
0
  if (ai->is_muted)
209
0
    return GF_TRUE;
210
0
  return gf_mo_is_muted(ai->stream);
211
0
}
212
213
static Bool gf_audio_input_get_config(GF_AudioInterface *aifc, Bool for_recf)
214
0
{
215
0
  GF_AudioInput *ai = (GF_AudioInput *) aifc->callback;
216
0
  if (!ai->stream) return GF_FALSE;
217
  /*watchout for object reuse*/
218
0
  if (aifc->samplerate &&  !ai->stream->config_changed) return GF_TRUE;
219
220
0
  gf_mo_get_audio_info(ai->stream, &aifc->samplerate, &aifc->afmt , &aifc->chan, &aifc->ch_layout, &aifc->forced_layout);
221
222
0
  if (!for_recf && !ai->stream->config_changed)
223
0
    return aifc->samplerate ? GF_TRUE : GF_FALSE;
224
225
0
  if (aifc->samplerate && aifc->chan && aifc->afmt && ((aifc->chan<=2) || aifc->ch_layout))  {
226
0
    if (ai->stream->config_changed) {
227
0
      ai->stream->config_changed = GF_FALSE;
228
0
      return GF_FALSE;
229
0
    }
230
0
    return GF_TRUE;
231
0
  }
232
  //still not ready !
233
0
  ai->stream->config_changed=GF_TRUE;
234
0
  return GF_FALSE;
235
0
}
236
237
GF_EXPORT
238
void gf_sc_audio_setup(GF_AudioInput *ai, GF_Compositor *compositor, GF_Node *node)
239
0
{
240
0
  memset(ai, 0, sizeof(GF_AudioInput));
241
0
  ai->owner = node;
242
0
  ai->compositor = compositor;
243
0
  ai->stream = NULL;
244
  /*setup io interface*/
245
0
  ai->input_ifce.FetchFrame = gf_audio_input_fetch_frame;
246
0
  ai->input_ifce.ReleaseFrame = gf_audio_input_release_frame;
247
0
  ai->input_ifce.GetConfig = gf_audio_input_get_config;
248
0
  ai->input_ifce.GetChannelVolume = gf_audio_input_get_volume;
249
0
  ai->input_ifce.GetSpeed = gf_audio_input_get_speed;
250
0
  ai->input_ifce.IsMuted = gf_audio_input_is_muted;
251
0
  ai->input_ifce.callback = ai;
252
0
  ai->intensity = FIX_ONE;
253
254
0
  ai->speed = FIX_ONE;
255
256
0
}
257
258
void gf_sc_audio_predestroy(GF_AudioInput *ai)
259
0
{
260
0
  gf_sc_audio_stop(ai);
261
0
  gf_sc_audio_unregister(ai);
262
0
}
263
264
GF_EXPORT
265
GF_Err gf_sc_audio_open(GF_AudioInput *ai, MFURL *url, Double clipBegin, Double clipEnd, Bool lock_timeline)
266
0
{
267
0
  if (ai->is_open) return GF_BAD_PARAM;
268
269
  /*get media object*/
270
0
  ai->stream = gf_mo_register(ai->owner, url, lock_timeline, GF_FALSE);
271
  /*bad URL*/
272
0
  if (!ai->stream) return GF_NOT_SUPPORTED;
273
274
  /*request play*/
275
0
  gf_mo_play(ai->stream, clipBegin, clipEnd, GF_FALSE);
276
277
0
  ai->stream_finished = GF_FALSE;
278
0
  ai->is_open = 1;
279
  //force reload of audio props
280
0
  ai->stream->config_changed = GF_TRUE;
281
282
0
  return GF_OK;
283
0
}
284
285
GF_EXPORT
286
void gf_sc_audio_stop(GF_AudioInput *ai)
287
0
{
288
0
  if (!ai->is_open) return;
289
290
  /*we must make sure audio mixer is not using the stream otherwise we may leave it dirty (with unrelease frame)*/
291
0
  gf_mixer_lock(ai->compositor->audio_renderer->mixer, GF_TRUE);
292
293
0
  gf_assert(!ai->need_release);
294
295
0
  gf_mo_stop(&ai->stream);
296
0
  ai->is_open = 0;
297
0
  gf_mo_unregister(ai->owner, ai->stream);
298
0
  ai->stream = NULL;
299
300
0
  gf_mixer_lock(ai->compositor->audio_renderer->mixer, GF_FALSE);
301
302
0
}
303
304
GF_EXPORT
305
void gf_sc_audio_restart(GF_AudioInput *ai)
306
0
{
307
0
  if (!ai->is_open) return;
308
0
  if (ai->need_release) gf_mo_release_data(ai->stream, 0xFFFFFFFF, 2);
309
0
  ai->need_release = GF_FALSE;
310
0
  ai->stream_finished = GF_FALSE;
311
312
0
  gf_mo_restart(ai->stream);
313
0
}
314
315
GF_EXPORT
316
Bool gf_sc_audio_check_url(GF_AudioInput *ai, MFURL *url)
317
0
{
318
0
  if (!ai->stream) return url->count;
319
0
  return gf_mo_url_changed(ai->stream, url);
320
0
}
321
322
GF_EXPORT
323
void gf_sc_audio_register(GF_AudioInput *ai, GF_TraverseState *tr_state)
324
0
{
325
0
  GF_AudioInterface *aifce;
326
  /*check interface is valid*/
327
0
  if (!ai->input_ifce.FetchFrame
328
0
          || !ai->input_ifce.GetChannelVolume
329
0
          || !ai->input_ifce.GetConfig
330
0
          || !ai->input_ifce.GetSpeed
331
0
          || !ai->input_ifce.IsMuted
332
0
          || !ai->input_ifce.ReleaseFrame
333
0
     ) return;
334
335
0
  aifce = &ai->input_ifce;
336
337
0
  if (tr_state->audio_parent) {
338
    /*this assume only one parent may use an audio node*/
339
0
    if (ai->register_with_parent) return;
340
0
    if (ai->register_with_renderer) {
341
0
      gf_sc_ar_remove_src(ai->compositor->audio_renderer, aifce);
342
0
      ai->register_with_renderer = GF_FALSE;
343
0
    }
344
0
    tr_state->audio_parent->add_source(tr_state->audio_parent, ai);
345
0
    ai->register_with_parent = GF_TRUE;
346
0
    ai->snd = tr_state->sound_holder;
347
0
  } else if (!ai->register_with_renderer) {
348
349
0
    if (ai->register_with_parent) {
350
0
      ai->register_with_parent = GF_FALSE;
351
      /*if used in a parent audio group, do a complete traverse to rebuild the group*/
352
0
      gf_sc_invalidate(ai->compositor, NULL);
353
0
    }
354
355
0
    gf_sc_ar_add_src(ai->compositor->audio_renderer, aifce);
356
0
    ai->register_with_renderer = GF_TRUE;
357
0
    ai->snd = tr_state->sound_holder;
358
0
  }
359
0
}
360
361
GF_EXPORT
362
void gf_sc_audio_unregister(GF_AudioInput *ai)
363
0
{
364
0
  GF_AudioInterface *aifce = &ai->input_ifce;
365
366
0
  if (ai->register_with_renderer) {
367
0
    ai->register_with_renderer = GF_FALSE;
368
0
    gf_sc_ar_remove_src(ai->compositor->audio_renderer, aifce);
369
0
  } else {
370
    /*if used in a parent audio group, do a complete traverse to rebuild the group*/
371
0
    gf_sc_invalidate(ai->compositor, NULL);
372
0
  }
373
0
}
374
375
#endif //GPAC_DISABLE_COMPOSITOR