Example #1
0
static int filter_out(struct af_instance *af)
{
    struct priv *p = af->priv;

    while (rubberband_available(p->rubber) <= 0) {
        const float *dummy[MP_NUM_CHANNELS] = {0};
        const float **in_data = dummy;
        size_t in_samples = 0;
        if (p->pending) {
            if (!p->pending->samples)
                break;

            // recover from previous EOF
            if (p->needs_reset) {
                rubberband_reset(p->rubber);
                p->rubber_delay = 0;
            }
            p->needs_reset = false;

            size_t needs = rubberband_get_samples_required(p->rubber);
            in_data = (void *)&p->pending->planes;
            in_samples = MPMIN(p->pending->samples, needs);
        }

        if (p->needs_reset)
            break; // previous EOF
        p->needs_reset = !p->pending; // EOF

        rubberband_process(p->rubber, in_data, in_samples, p->needs_reset);
        p->rubber_delay += in_samples;

        if (!p->pending)
            break;
        mp_audio_skip_samples(p->pending, in_samples);
    }

    int out_samples = rubberband_available(p->rubber);
    if (out_samples > 0) {
        struct mp_audio *out =
            mp_audio_pool_get(af->out_pool, af->data, out_samples);
        if (!out)
            return -1;
        if (p->pending)
            mp_audio_copy_config(out, p->pending);

        float **out_data = (void *)&out->planes;
        out->samples = rubberband_retrieve(p->rubber, out_data, out->samples);
        p->rubber_delay -= out->samples * p->speed;

        af_add_output_frame(af, out);
    }

    int delay_samples = p->rubber_delay;
    if (p->pending)
        delay_samples += p->pending->samples;
    af->delay = delay_samples / (af->data->rate * p->speed);

    return 0;
}
Example #2
0
static int
stream_callback (const void *input_buffer, void *output_buffer, unsigned long frames_per_buffer, const PaStreamCallbackTimeInfo * time_info, PaStreamCallbackFlags status_flags, void *user_data)
{
  float **buffers = (float **) output_buffer;
#ifdef _HAVE_RUBBERBAND_
  static gboolean initialized = FALSE;
  if (!initialized) {
      rubberband_set_max_process_size(rubberband, frames_per_buffer);
      initialized = TRUE;
  }
#endif

  size_t i;
  for (i = 0; i < 2; ++i)
    {
      memset (buffers[i], 0, frames_per_buffer * sizeof (float));
    }

  if (!ready)
    return paContinue;

#ifdef _HAVE_FLUIDSYNTH_
  if (reset_audio)
    {
      fluidsynth_all_notes_off ();
      reset_synth_channels ();
      reset_audio = FALSE;
      return paContinue;
    }

  unsigned char event_data[MAX_MESSAGE_LENGTH]; //needs to be long enough for variable length messages...
  size_t event_length = MAX_MESSAGE_LENGTH;
  double event_time;

  double until_time = nframes_to_seconds (playback_frame + frames_per_buffer);
#ifdef _HAVE_RUBBERBAND_
  gint available = rubberband_available(rubberband);
if((!rubberband_active) || (available < (gint)frames_per_buffer)) {
#endif

  while (read_event_from_queue (AUDIO_BACKEND, event_data, &event_length, &event_time, until_time/slowdown))
    {//g_print("%x %x %x\n", event_data[0], event_data[1], event_data[2] );
      fluidsynth_feed_midi (event_data, event_length);  //in fluid.c note fluidsynth api ues fluid_synth_xxx these naming conventions are a bit too similar
    }

  fluidsynth_render_audio (frames_per_buffer, buffers[0], buffers[1]);  //in fluid.c calls fluid_synth_write_float()

// Now get any audio to mix - dump it in the left hand channel for now
  event_length = frames_per_buffer;
  read_event_from_mixer_queue (AUDIO_BACKEND, (void *) buffers[1], &event_length);

#ifdef _HAVE_RUBBERBAND_
  }
  //if there is stuff available use it and give buffers[] to rubber band to process
  if(rubberband_active)
      {
      if(available < (gint)frames_per_buffer)
          rubberband_process(rubberband, (const float * const*)buffers, frames_per_buffer, 0);
      available = rubberband_available(rubberband);
      if(available >= (gint)frames_per_buffer)
          {
              rubberband_retrieve(rubberband, buffers, frames_per_buffer);//re-use buffers[] as they are available...
              write_samples_to_rubberband_queue (AUDIO_BACKEND, buffers[0], frames_per_buffer);
              write_samples_to_rubberband_queue (AUDIO_BACKEND,  buffers[1], frames_per_buffer);
              available -= frames_per_buffer;
          }
      event_length = frames_per_buffer;
      read_event_from_rubberband_queue (AUDIO_BACKEND, (unsigned char *) buffers[0], &event_length);
      event_length = frames_per_buffer;
      read_event_from_rubberband_queue (AUDIO_BACKEND, (unsigned char *) buffers[1],  &event_length);
      }
#endif //_HAVE_RUBBERBAND_

  if (until_time < get_playuntil ())
    {
#endif //_HAVE_FLUIDSYNTH_
      playback_frame += frames_per_buffer;
      update_playback_time (TIMEBASE_PRIO_AUDIO, nframes_to_seconds (playback_frame));
#ifdef _HAVE_FLUIDSYNTH_
    }
#endif //_HAVE_FLUIDSYNTH_

  // This is probably a bad idea to do heavy work in an audio callback
  record_audio(buffers, frames_per_buffer);
  return paContinue;
}
Example #3
0
int alogg_poll_ogg_ts(ALOGG_OGG *ogg) {
  void *audiobuf;
  char *audiobuf_p;
  unsigned short *audiobuf_sp;
  int i, size_done, finished = 0;

  /* continue only if we are playing it */
  if (!alogg_is_playing_ogg(ogg))
    return ALOGG_POLL_NOTPLAYING;

  /* get the audio stream buffer and only continue if we need to fill it */
  audiobuf = get_audio_stream_buffer(ogg->audiostream);
  if (audiobuf == NULL)
    return ALOGG_OK;

  /* clear the buffer with 16bit unsigned data */
  {
    int i;
    unsigned short *j = (unsigned short *)audiobuf;
    for (i = 0; i < (ogg->audiostream_buffer_len / 2); i++, j++)
      *j = 0x8000;
  }

  /* if we need to fill it, but we were just waiting for it to finish */
  if (!ogg->loop) {
    if (ogg->wait_for_audio_stop > 0) {
      free_audio_stream_buffer(ogg->audiostream);
      if (--ogg->wait_for_audio_stop == 0) {
        /* stop it */
        alogg_stop_ogg(ogg);
        return ALOGG_POLL_PLAYJUSTFINISHED;
      }
      else
        return ALOGG_OK;
    }
  }

  audiobuf_sp = (unsigned short *)audiobuf;
  while (!finished && rubberband_available(ogg->time_stretch_state) < ogg->time_stretch_buffer_samples) {
    /* reset these each iteration so we don't overrun the buffer */
    audiobuf_p = (char *)audiobuf;
    size_done = 0;

    /* read samples from Ogg Vorbis file */
    for (i = ogg->audiostream_buffer_len; i > 0; i -= size_done) {
      /* decode */
      size_done = ov_read(&(ogg->vf), audiobuf_p, i, alogg_endianess, 2, 0, &(ogg->current_section));

      /* check if the decoding was not successful */
      if (size_done < 0) {
        if (size_done == OV_HOLE)
          size_done = 0;
        else {
          free_audio_stream_buffer(ogg->audiostream);
          alogg_stop_ogg(ogg);
          alogg_rewind_ogg(ogg);
          return ALOGG_POLL_FRAMECORRUPT;
        }
      }
      else if (size_done == 0) {
        alogg_rewind_ogg(ogg);
        ogg->wait_for_audio_stop = 2;
        finished = 1;
        break; // playback finished so get out of loop
      }
      audiobuf_p += size_done;
    }

    /* process samples with Rubber Band */
    if (ogg->stereo) {
      for (i = 0; i < ogg->time_stretch_buffer_samples; i++) {
        ogg->time_stretch_buffer[0][i] = (float)((long)audiobuf_sp[i * 2] - 0x8000) / (float)0x8000;		//Convert sample to signed floating point format
        ogg->time_stretch_buffer[1][i] = (float)((long)audiobuf_sp[i * 2 + 1] - 0x8000) / (float)0x8000;	//Repeat for the other channel's sample
      }
    }
    else {
      for (i = 0; i < ogg->time_stretch_buffer_samples; i++) {
        ogg->time_stretch_buffer[0][i] = (float)((long)audiobuf_sp[i] - 0x8000) / (float)0x8000;	//Convert sample to signed floating point format
      }
	}
    rubberband_process(ogg->time_stretch_state, (const float **)ogg->time_stretch_buffer, ogg->time_stretch_buffer_samples, 0);
  }

  /* retrieve audio from rubberband and put it into stream buffer */
  size_done = rubberband_retrieve(ogg->time_stretch_state, ogg->time_stretch_buffer, ogg->time_stretch_buffer_samples);
  if (ogg->stereo) {
    for (i = 0; i < size_done; i++) {
      if(ogg->time_stretch_buffer[0][i] > 1.0)
      {
        audiobuf_sp[i * 2] = 0xFFFF;
      }
      else if(ogg->time_stretch_buffer[0][i] < -1.0)
      {
        audiobuf_sp[i * 2] = 0;
      }
      else
      {
        audiobuf_sp[i * 2] = (ogg->time_stretch_buffer[0][i] * (float)0x8000) + (float)0x8000;		//Convert sample back to unsigned integer format
      }
      if(ogg->time_stretch_buffer[1][i] > 1.0)
      {
        audiobuf_sp[i * 2 + 1] = 0xFFFF;
      }
      else if(ogg->time_stretch_buffer[1][i] < -1.0)
      {
        audiobuf_sp[i * 2 + 1] = 0;
      }
      else
      {
        audiobuf_sp[i * 2 + 1] = (ogg->time_stretch_buffer[1][i] * (float)0x8000) + (float)0x8000;	//Repeat for the other channel's sample
      }
    }
  }
  else {
    for (i = 0; i < size_done; i++) {
      if(ogg->time_stretch_buffer[0][i] > 1.0)
      {
        audiobuf_sp[i] = 0xFFFF;
      }
      else if(ogg->time_stretch_buffer[0][i] < -1.0)
      {
        audiobuf_sp[i] = 0;
      }
      else
      {
        audiobuf_sp[i] = (ogg->time_stretch_buffer[0][i] * (float)0x8000) + (float)0x8000;		//Convert sample back to unsigned integer format
      }
    }
  }

  /* lock the buffer */
  if(alogg_buffer_callback)
  {
     alogg_buffer_callback(audiobuf, ogg->audiostream_buffer_len);
  }
  free_audio_stream_buffer(ogg->audiostream);
  return ALOGG_OK;
}