static void vinlet_free(t_vinlet *x) { canvas_rminlet(x->x_canvas, x->x_inlet); if (x->x_buf) t_freebytes(x->x_buf, x->x_bufsize * sizeof(*x->x_buf)); resample_free(&x->x_updown); }
static gboolean audioresample_transform_size (GstBaseTransform * base, GstPadDirection direction, GstCaps * caps, guint size, GstCaps * othercaps, guint * othersize) { GstAudioresample *audioresample = GST_AUDIORESAMPLE (base); ResampleState *state; GstCaps *srccaps, *sinkcaps; gboolean use_internal = FALSE; /* whether we use the internal state */ gboolean ret = TRUE; GST_LOG_OBJECT (base, "asked to transform size %d in direction %s", size, direction == GST_PAD_SINK ? "SINK" : "SRC"); if (direction == GST_PAD_SINK) { sinkcaps = caps; srccaps = othercaps; } else { sinkcaps = othercaps; srccaps = caps; } /* if the caps are the ones that _set_caps got called with; we can use * our own state; otherwise we'll have to create a state */ if (gst_caps_is_equal (sinkcaps, audioresample->sinkcaps) && gst_caps_is_equal (srccaps, audioresample->srccaps)) { use_internal = TRUE; state = audioresample->resample; } else { GST_DEBUG_OBJECT (audioresample, "caps are not the set caps, creating state"); state = resample_new (); resample_set_filter_length (state, audioresample->filter_length); resample_set_state_from_caps (state, sinkcaps, srccaps, NULL, NULL, NULL); } if (direction == GST_PAD_SINK) { /* asked to convert size of an incoming buffer */ *othersize = resample_get_output_size_for_input (state, size); } else { /* asked to convert size of an outgoing buffer */ *othersize = resample_get_input_size_for_output (state, size); } g_assert (*othersize % state->sample_size == 0); /* we make room for one extra sample, given that the resampling filter * can output an extra one for non-integral i_rate/o_rate */ GST_LOG_OBJECT (base, "transformed size %d to %d", size, *othersize); if (!use_internal) { resample_free (state); } return ret; }
static gboolean audioresample_stop (GstBaseTransform * base) { GstAudioresample *audioresample = GST_AUDIORESAMPLE (base); if (audioresample->resample) { resample_free (audioresample->resample); audioresample->resample = NULL; } gst_caps_replace (&audioresample->sinkcaps, NULL); gst_caps_replace (&audioresample->srccaps, NULL); return TRUE; }
static gboolean legacyresample_stop (GstBaseTransform * base) { GstLegacyresample *legacyresample = GST_LEGACYRESAMPLE (base); if (legacyresample->resample) { resample_free (legacyresample->resample); legacyresample->resample = NULL; } gst_caps_replace (&legacyresample->sinkcaps, NULL); gst_caps_replace (&legacyresample->srccaps, NULL); return TRUE; }
FeAudioImp::~FeAudioImp() { sf::Lock l( buffer_mutex ); #ifdef DO_RESAMPLE if ( resample_ctx ) { resample_free( &resample_ctx ); resample_ctx = NULL; } #endif if ( buffer ) { av_free( buffer ); buffer=NULL; } }
bool FeMedia::onGetData( Chunk &data ) { int offset=0; data.samples = NULL; data.sampleCount = 0; if ( (!m_audio) || end_of_file() ) return false; while ( offset < m_audio->codec_ctx->sample_rate ) { AVPacket *packet = m_audio->pop_packet(); while (( packet == NULL ) && ( !end_of_file() )) { read_packet(); packet = m_audio->pop_packet(); } if ( packet == NULL ) { m_audio->at_end=true; if ( offset > 0 ) return true; return false; } #if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT( 53, 25, 0 )) { sf::Lock l( m_audio->buffer_mutex ); int bsize = MAX_AUDIO_FRAME_SIZE; if ( avcodec_decode_audio3( m_audio->codec_ctx, (m_audio->buffer + offset), &bsize, packet) < 0 ) { std::cerr << "Error decoding audio." << std::endl; FeBaseStream::free_packet( packet ); return false; } else { offset += bsize / sizeof( sf::Int16 ); data.sampleCount += bsize / sizeof(sf::Int16); data.samples = m_audio->buffer; } } #else #if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 55, 45, 0 )) AVFrame *frame = av_frame_alloc(); m_audio->codec_ctx->refcounted_frames = 1; #else AVFrame *frame = avcodec_alloc_frame(); #endif // // TODO: avcodec_decode_audio4() can return multiple frames per packet depending on the codec. // We don't deal with this appropriately... // int got_frame( 0 ); int len = avcodec_decode_audio4( m_audio->codec_ctx, frame, &got_frame, packet ); if ( len < 0 ) { #ifdef FE_DEBUG char buff[256]; av_strerror( len, buff, 256 ); std::cerr << "Error decoding audio: " << buff << std::endl; #endif } if ( got_frame ) { int data_size = av_samples_get_buffer_size( NULL, m_audio->codec_ctx->channels, frame->nb_samples, m_audio->codec_ctx->sample_fmt, 1); #ifdef DO_RESAMPLE if ( m_audio->codec_ctx->sample_fmt == AV_SAMPLE_FMT_S16 ) #endif { sf::Lock l( m_audio->buffer_mutex ); memcpy( (m_audio->buffer + offset), frame->data[0], data_size ); offset += data_size / sizeof( sf::Int16 ); data.sampleCount += data_size / sizeof(sf::Int16); data.samples = m_audio->buffer; } #ifdef DO_RESAMPLE else { sf::Lock l( m_audio->buffer_mutex ); if ( !m_audio->resample_ctx ) { m_audio->resample_ctx = resample_alloc(); if ( !m_audio->resample_ctx ) { std::cerr << "Error allocating audio format converter." << std::endl; FeBaseStream::free_packet( packet ); FeBaseStream::free_frame( frame ); return false; } int64_t channel_layout = frame->channel_layout; if ( !channel_layout ) { channel_layout = av_get_default_channel_layout( m_audio->codec_ctx->channels ); } av_opt_set_int( m_audio->resample_ctx, "in_channel_layout", channel_layout, 0 ); av_opt_set_int( m_audio->resample_ctx, "in_sample_fmt", frame->format, 0 ); av_opt_set_int( m_audio->resample_ctx, "in_sample_rate", frame->sample_rate, 0 ); av_opt_set_int( m_audio->resample_ctx, "out_channel_layout", channel_layout, 0 ); av_opt_set_int( m_audio->resample_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0 ); av_opt_set_int( m_audio->resample_ctx, "out_sample_rate", frame->sample_rate, 0 ); #ifdef FE_DEBUG std::cout << "Initializing resampler: in_sample_fmt=" << av_get_sample_fmt_name( (AVSampleFormat)frame->format ) << ", in_sample_rate=" << frame->sample_rate << ", out_sample_fmt=" << av_get_sample_fmt_name( AV_SAMPLE_FMT_S16 ) << ", out_sample_rate=" << frame->sample_rate << std::endl; #endif if ( resample_init( m_audio->resample_ctx ) < 0 ) { std::cerr << "Error initializing audio format converter, input format=" << av_get_sample_fmt_name( (AVSampleFormat)frame->format ) << ", input sample rate=" << frame->sample_rate << std::endl; FeBaseStream::free_packet( packet ); FeBaseStream::free_frame( frame ); resample_free( &m_audio->resample_ctx ); m_audio->resample_ctx = NULL; return false; } } if ( m_audio->resample_ctx ) { int out_linesize; av_samples_get_buffer_size( &out_linesize, m_audio->codec_ctx->channels, frame->nb_samples, AV_SAMPLE_FMT_S16, 0 ); uint8_t *tmp_ptr = (uint8_t *)(m_audio->buffer + offset); #ifdef USE_SWRESAMPLE int out_samples = swr_convert( m_audio->resample_ctx, &tmp_ptr, frame->nb_samples, (const uint8_t **)frame->data, frame->nb_samples ); #else // USE_AVRESAMPLE int out_samples = avresample_convert( m_audio->resample_ctx, &tmp_ptr, out_linesize, frame->nb_samples, frame->data, frame->linesize[0], frame->nb_samples ); #endif if ( out_samples < 0 ) { std::cerr << "Error performing audio conversion." << std::endl; FeBaseStream::free_packet( packet ); FeBaseStream::free_frame( frame ); break; } offset += out_samples * m_audio->codec_ctx->channels; data.sampleCount += out_samples * m_audio->codec_ctx->channels; data.samples = m_audio->buffer; } } #endif } FeBaseStream::free_frame( frame ); #endif FeBaseStream::free_packet( packet ); } return true; }
static void vinlet_free(t_vinlet *x) { canvas_rminlet(x->x_canvas, x->x_inlet); resample_free(&x->x_updown); }
static void voutlet_free(t_voutlet *x) { canvas_rmoutlet(x->x_canvas, x->x_parentoutlet); resample_free(&x->x_updown); }