static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) { AResampleContext *aresample = ctx->priv; int ret = 0; char *argd = av_strdup(args); aresample->next_pts = AV_NOPTS_VALUE; aresample->swr = swr_alloc(); if (!aresample->swr) return AVERROR(ENOMEM); if (args) { char *ptr=argd, *token; while(token = av_strtok(ptr, ":", &ptr)) { char *value; av_strtok(token, "=", &value); if(value) { if((ret=av_opt_set(aresample->swr, token, value, 0)) < 0) goto end; } else { int out_rate; if ((ret = ff_parse_sample_rate(&out_rate, token, ctx)) < 0) goto end; if((ret = av_opt_set_int(aresample->swr, "osr", out_rate, 0)) < 0) goto end; } } } end: av_free(argd); return ret; }
static int output_ffmpeg_init(void) { Log_error("ffmpeg", "output_ffmpeg_init----- "); SongMetaData_init(&song_meta_); register_mime_type("audio/*"); register_mime_type("audio/x-mpeg"); register_mime_type("audio/mpeg"); av_register_all(); avformat_network_init(); //:(s_pFormatCtx = avformat_alloc_context(); s_au_convert_ctx = swr_alloc(); s_out_buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2); s_pFrame = av_frame_alloc(); mp_msg_init(); char ** ao_list= malloc(sizeof(char*)*2); const int c_number_count = 10; *ao_list = malloc(sizeof(char) * c_number_count); ao_list[1] = malloc(sizeof(char) *c_number_count); strcpy(ao_list[0],"alsa"); memset(ao_list[1],0, c_number_count); s_audio_device = init_best_audio_out(ao_list, 0, 44100, 2,AF_FORMAT_S16_LE,0); assert(s_audio_device != NULL); free(ao_list[0]); free(ao_list[1]); free(ao_list); mixer_Init_control_point(&s_mixer, s_audio_device); pthread_mutex_init(&s_mutex, NULL); pthread_cond_init(&s_cond, NULL); return 0; }
EC_U32 AudioWaveScale::Init(MediaCtxInfo* pMediaInfo, AudioPCMBuffer *pFirstFrame) { if (EC_NULL == pMediaInfo) return Audio_Render_Err_InitFail; EC_S32 out_sample_rate = pMediaInfo->m_nSampleRate; EC_S64 out_channel_layout = AV_CH_LAYOUT_STEREO; EC_S32 out_channels = av_get_channel_layout_nb_channels(out_channel_layout); AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16; AVCodecContext *pCodecCtx = (AVCodecContext*)(pMediaInfo->m_pAudioCodecInfo); EC_S64 in_channel_layout = av_get_default_channel_layout(pCodecCtx->channels); EC_S32 in_sample_rate = pCodecCtx->sample_rate; AVSampleFormat in_sample_fmt = pCodecCtx->sample_fmt; m_nOutChannels = out_channels; m_nOutSampleFormat = out_sample_fmt; m_pWaveScaleContext = swr_alloc(); m_pWaveScaleContext = swr_alloc_set_opts(m_pWaveScaleContext, out_channel_layout, out_sample_fmt, out_sample_rate, in_channel_layout, in_sample_fmt, in_sample_rate, 0, NULL); EC_S32 nRet = swr_init(m_pWaveScaleContext); if (nRet < 0) return Audio_Render_Err_InitFail; m_pScaleOutbuffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2); if (m_pScaleOutbuffer == EC_NULL) return EC_Err_Memory_Low; return Audio_Render_Err_None; }
struct SwrContext * av_swr_alloc(int in_ch,int in_rate,enum AVSampleFormat in_fmt, int out_ch,int out_rate,enum AVSampleFormat out_fmt) { int ret; struct SwrContext * swr = swr_alloc(); if (!swr) { av_log(NULL, AV_LOG_FATAL, "Could not allocate resampler context.\n"); return NULL; } /* set options */ av_opt_set_int(swr, "in_channel_count", in_ch, 0); av_opt_set_int(swr, "in_sample_rate", in_rate, 0); av_opt_set_sample_fmt(swr, "in_sample_fmt", in_fmt, 0); av_opt_set_int(swr, "out_channel_count", out_ch, 0); av_opt_set_int(swr, "out_sample_rate", out_rate, 0); av_opt_set_sample_fmt(swr, "out_sample_fmt", out_fmt, 0); /* initialize the resampling context */ if ((ret = swr_init(swr)) < 0) { av_log(NULL, AV_LOG_FATAL, "Failed to initialize the resampling context\n"); return NULL; } return swr; }
// Initialization and runtime control static int control(struct af_instance_s* af, int cmd, void* arg) { af_resample_t* s = (af_resample_t*)af->setup; af_data_t *data= (af_data_t*)arg; int out_rate, test_output_res; // helpers for checking input format switch(cmd){ case AF_CONTROL_REINIT: if((af->data->rate == data->rate) || (af->data->rate == 0)) return AF_DETACH; af->data->nch = data->nch; if (af->data->nch > AF_NCH) af->data->nch = AF_NCH; af->data->format = AF_FORMAT_S16_NE; af->data->bps = 2; af->mul = (double)af->data->rate / data->rate; af->delay = af->data->nch * s->filter_length / FFMIN(af->mul, 1); // *bps*.5 if (s->ctx_out_rate != af->data->rate || s->ctx_in_rate != data->rate || s->ctx_filter_size != s->filter_length || s->ctx_phase_shift != s->phase_shift || s->ctx_linear != s->linear || s->ctx_cutoff != s->cutoff) { swr_free(&s->swrctx); if((s->swrctx=swr_alloc()) == NULL) return AF_ERROR; av_opt_set_int(s->swrctx, "out_sample_rate", af->data->rate, 0); av_opt_set_int(s->swrctx, "in_sample_rate", data->rate, 0); av_opt_set_int(s->swrctx, "filter_size", s->filter_length, 0); av_opt_set_int(s->swrctx, "phase_shift", s->phase_shift, 0); av_opt_set_int(s->swrctx, "linear_interp", s->linear, 0); av_opt_set_double(s->swrctx, "cutoff", s->cutoff, 0); av_opt_set_sample_fmt(s->swrctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_sample_fmt(s->swrctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(s->swrctx, "in_channel_count", af->data->nch, 0); av_opt_set_int(s->swrctx, "out_channel_count", af->data->nch, 0); if(swr_init(s->swrctx) < 0) return AF_ERROR; s->ctx_out_rate = af->data->rate; s->ctx_in_rate = data->rate; s->ctx_filter_size = s->filter_length; s->ctx_phase_shift = s->phase_shift; s->ctx_linear = s->linear; s->ctx_cutoff = s->cutoff; } // hack to make af_test_output ignore the samplerate change out_rate = af->data->rate; af->data->rate = data->rate; test_output_res = af_test_output(af, (af_data_t*)arg); af->data->rate = out_rate; return test_output_res; case AF_CONTROL_COMMAND_LINE:{ s->cutoff= 0.0; sscanf((char*)arg,"%d:%d:%d:%d:%lf", &af->data->rate, &s->filter_length, &s->linear, &s->phase_shift, &s->cutoff); if(s->cutoff <= 0.0) s->cutoff= FFMAX(1.0 - 6.5/(s->filter_length+8), 0.80); return AF_OK; } case AF_CONTROL_RESAMPLE_RATE | AF_CONTROL_SET: af->data->rate = *(int*)arg; return AF_OK; } return AF_UNKNOWN; }
void init_opts(void) { #if CONFIG_SWSCALE sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC, NULL, NULL, NULL); #endif swr_opts = swr_alloc(); }
void init_opts(void) { if(CONFIG_SWSCALE) sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC, NULL, NULL, NULL); if(CONFIG_SWRESAMPLE) swr_opts = swr_alloc(); }
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) { AVCodecContext *c; int nb_samples; int ret; AVDictionary *opt = NULL; c = ost->st->codec; /* open it */ av_dict_copy(&opt, opt_arg, 0); ret = avcodec_open2(c, codec, &opt); av_dict_free(&opt); if (ret < 0) { fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret)); exit(1); } /* init signal generator */ ost->t = 0; ost->tincr = 2 * M_PI * 110.0 / c->sample_rate; /* increment frequency by 110 Hz per second */ ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) nb_samples = 10000; else nb_samples = c->frame_size; ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples); ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout, c->sample_rate, nb_samples); /* create resampler context */ ost->swr_ctx = swr_alloc(); if (!ost->swr_ctx) { fprintf(stderr, "Could not allocate resampler context\n"); exit(1); } /* set options */ av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0); /* initialize the resampling context */ if ((ret = swr_init(ost->swr_ctx)) < 0) { fprintf(stderr, "Failed to initialize the resampling context\n"); exit(1); } }
bool FFMPEGer::open_audio(AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg){ AVCodecContext *c; int nb_samples; int ret; AVDictionary *opt = NULL; c = ost->st->codec; /* open it */ av_dict_copy(&opt, opt_arg, 0); ret = avcodec_open2(c, codec, &opt); av_dict_free(&opt); if (ret < 0) { ALOGE("Could not open audio codec: %s", av_err2str(ret)); return false; } if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) nb_samples = 10000; else nb_samples = c->frame_size; ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples); ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout, c->sample_rate, nb_samples); /* create resampler context */ ost->swr_ctx = swr_alloc(); if (!ost->swr_ctx) { ALOGE("Could not allocate resampler context"); return false; } /* set options */ av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0); /* initialize the resampling context */ ret = swr_init(ost->swr_ctx); if (ret < 0){ ALOGE("Failed to initialize the resampling context"); return false; } return true; }
value ffmpeg_stream_new_audio(value ctx, value audio_info_) { CAMLparam2(ctx, audio_info_); CAMLlocal1(stream); AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_AAC); stream = caml_alloc_tuple(StreamSize); int ret; Stream_aux_direct_val(stream) = caml_alloc_custom(&streamaux_ops, sizeof(struct StreamAux), 0, 1); Stream_aux_val(stream)->type = Val_int(STREAM_AUDIO); Stream_context_direct_val(stream) = ctx; Stream_aux_val(stream)->avstream = avformat_new_stream(Context_val(ctx)->fmtCtx, codec); Stream_aux_val(stream)->avstream->codec->codec_id = AV_CODEC_ID_AAC; Stream_aux_val(stream)->avstream->codec->sample_rate = Int_val(Field(audio_info_, 0)); Stream_aux_val(stream)->avstream->codec->channels = Int_val(Field(audio_info_, 1)); Stream_aux_val(stream)->avstream->codec->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP; Stream_aux_val(stream)->avstream->codec->channel_layout = AV_CH_LAYOUT_STEREO; //Stream_aux_val(stream)->avstream->codec->channels = av_get_channel_layout_nb_channels(Stream_aux_val(stream)->avstream->codec->channel_layout); if (Context_val(ctx)->fmtCtx->oformat->flags & AVFMT_GLOBALHEADER) { Stream_aux_val(stream)->avstream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } Stream_aux_val(stream)->avstream->time_base = (AVRational) {1, 10000}; AVDictionary* codecOpts = NULL; AVCodecContext* codecCtx = Stream_aux_val(stream)->avstream->codec; caml_enter_blocking_section(); ret = avcodec_open2(codecCtx, codec, &codecOpts); raise_and_leave_blocking_section_if_not(ret >= 0, ExnOpen, ret); caml_leave_blocking_section(); if (Stream_aux_val(stream)->avstream->codec->sample_fmt != AV_SAMPLE_FMT_S16) { Stream_aux_val(stream)->swrCtx = swr_alloc(); assert(Stream_aux_val(stream)->swrCtx); av_opt_set_int (Stream_aux_val(stream)->swrCtx, "in_channel_count", Stream_aux_val(stream)->avstream->codec->channels, 0); av_opt_set_int (Stream_aux_val(stream)->swrCtx, "in_sample_rate", Stream_aux_val(stream)->avstream->codec->sample_rate, 0); av_opt_set_sample_fmt(Stream_aux_val(stream)->swrCtx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int (Stream_aux_val(stream)->swrCtx, "out_channel_count", Stream_aux_val(stream)->avstream->codec->channels, 0); av_opt_set_int (Stream_aux_val(stream)->swrCtx, "out_sample_rate", Stream_aux_val(stream)->avstream->codec->sample_rate, 0); av_opt_set_sample_fmt(Stream_aux_val(stream)->swrCtx, "out_sample_fmt", Stream_aux_val(stream)->avstream->codec->sample_fmt, 0); } CAMLreturn((value) stream); }
static void OpenAudio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg, int sample_rate) { AVCodecContext *c = NULL; int nb_samples = 0; int ret = 0; AVDictionary *opt = NULL; c = ost->st->codec; // コーデックを初期化 av_dict_copy(&opt, opt_arg, 0); ret = avcodec_open2(c, codec, &opt); av_dict_free(&opt); if (ret < 0) { fprintf(stderr, "Could not open audio codec: %s\n", MakeErrorString(ret)); return; } if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) nb_samples = 10000; else nb_samples = c->frame_size; ost->frame = AllocAudioFrame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples); ost->tmp_frame = AllocAudioFrame(AV_SAMPLE_FMT_S16, c->channel_layout, sample_rate, nb_samples/(c->sample_rate/sample_rate)); // サンプル変換部 ost->swr_ctx = swr_alloc(); if (!ost->swr_ctx) { fprintf(stderr, "Could not allocate resampler context\n"); return; } // 音声フォーマットの設定 av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "in_sample_rate", sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0); // サンプル変換部を初期化 if ((ret = swr_init(ost->swr_ctx)) < 0) { fprintf(stderr, "Failed to initialize the resampling context\n"); return; } }
AudioSource::AudioSource(void) { id = 0; position = 0; samples_frac = 0; volume = 1.0f; source = SOURCE_UNDETERMINED; looping = false; state = SOURCE_INITIAL; queue_index = 0; #if defined(LIBTAS_ENABLE_AVDUMPING) || defined(LIBTAS_ENABLE_SOUNDPLAYBACK) swr = swr_alloc(); #endif }
int ffmpeg_open_codec(struct ffmpeg_file *file) { AVStream *s = file->format->streams[file->stream]; AVCodec *decoder = avcodec_find_decoder(s->codec->codec_id); if (avcodec_open2(s->codec, decoder, NULL) < 0) { return -1; } file->codec = s->codec; file->pkt = av_malloc(sizeof(AVPacket)); av_init_packet(file->pkt); av_packet_unref(file->pkt); file->time = 0; file->frame = av_frame_alloc(); if (!file->frame) { return -1; } file->swr = swr_alloc(); if (!file->swr) { return -1; } av_opt_set_int(file->swr, "in_channel_count", file->codec->channels, 0); av_opt_set_int(file->swr, "out_channel_count", file->channels, 0); av_opt_set_int(file->swr, "in_channel_layout", file->codec->channel_layout, 0); av_opt_set_int(file->swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(file->swr, "in_sample_rate", file->codec->sample_rate, 0); av_opt_set_int(file->swr, "out_sample_rate", file->sample_rate, 0); av_opt_set_sample_fmt(file->swr, "in_sample_fmt", file->codec->sample_fmt, 0); av_opt_set_sample_fmt(file->swr, "out_sample_fmt", file->sample_fmt, 0); swr_init(file->swr); if (!swr_is_initialized(file->swr)) { return -1; } return 0; }
BOOL CVideoLivRecord::open_audio(AVStream *st, AVCodec* codec, AVDictionary* opt) { AVCodecContext * avcc = st->codec; int nb_samples; int ret; AVDictionary* opt_dst = NULL;//必须初始化为空。 av_dict_copy(&opt_dst, opt, 0); ret = avcodec_open2(avcc, codec, &opt_dst); av_dict_free(&opt_dst); if (ret < 0){ log("[CVideoLivRecord::open_audio] -- avcodec_open2() error"); return FALSE; } m_t = 0; m_tincr = 2 * M_PI * 110.0 / avcc->sample_rate; m_tincr2 = 2 * M_PI * 110.0 / avcc->sample_rate / avcc->sample_rate; if (avcc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE){ nb_samples = 10000; } else { nb_samples = avcc->frame_size; } m_pAudioFrame = alloc_audio_frame(avcc->sample_fmt, avcc->channel_layout, avcc->sample_rate, nb_samples); m_pAudioBkFrame = alloc_audio_frame(AV_SAMPLE_FMT_S16, avcc->channel_layout, avcc->sample_rate, nb_samples); m_pAudioSwrctx = swr_alloc(); if (!m_pAudioSwrctx){ log("[CVideoLivRecord::open_audio] -- swr_alloc() error"); return FALSE; } av_opt_set_int (m_pAudioSwrctx, "in_channel_count", avcc->channels, 0); av_opt_set_int (m_pAudioSwrctx, "in_sample_rate", avcc->sample_rate, 0); av_opt_set_sample_fmt(m_pAudioSwrctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int (m_pAudioSwrctx, "out_channel_count", avcc->channels, 0); av_opt_set_int (m_pAudioSwrctx, "out_sample_rate", avcc->sample_rate, 0); av_opt_set_sample_fmt(m_pAudioSwrctx, "out_sample_fmt", avcc->sample_fmt, 0); if (swr_init(m_pAudioSwrctx) < 0){ log("[CVideoLivRecord::open_audio] -- swr_init() error"); return FALSE; } return TRUE; }
int frame_puller_open_audio(frame_puller **o_fp, const char *path, int output_sample_rate) { *o_fp = NULL; int ret; frame_puller *fp; if ((ret = _frame_puller_new(&fp, path)) < 0) return ret; fp->type = FRAME_PULLER_AUDIO; if ((ret = _frame_puller_init(fp, AVMEDIA_TYPE_AUDIO)) < 0) return ret; fp->output_sample_rate = output_sample_rate > 0 ? output_sample_rate : fp->codec_ctx->sample_rate; fp->sample_scale_rate = (double)fp->output_sample_rate / (double)fp->codec_ctx->sample_rate; // Initialize the libswresample context for audio resampling. // > Create the buffer for the converted frame to store data fp->frame = av_frame_alloc(); fp->frame->format = AV_SAMPLE_FMT_S16P; fp->frame->channel_layout = fp->codec_ctx->channel_layout; fp->frame->sample_rate = fp->output_sample_rate; if ((fp->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) || !strcmp(fp->codec->name, "pcm_mulaw")) fp->frame->nb_samples = 4096; else fp->frame->nb_samples = fp->sample_scale_rate * fp->codec_ctx->frame_size; av_log(NULL, AV_LOG_INFO, "frame_puller: number of samples per frame = %d\n", fp->frame->nb_samples); if ((ret = av_frame_get_buffer(fp->frame, 0)) < 0) return ret; // > Create the SwrContext fp->libsw.swr_ctx = swr_alloc(); if (!fp->libsw.swr_ctx) { av_log(NULL, AV_LOG_ERROR, "frame_puller: Cannot initialize audio resampling library" "(possibly caused by insufficient memory)\n"); return AVERROR_UNKNOWN; } // > Provide options for the SwrContext av_opt_set_channel_layout(fp->libsw.swr_ctx, "in_channel_layout", fp->codec_ctx->channel_layout, 0); av_opt_set_channel_layout(fp->libsw.swr_ctx, "out_channel_layout", fp->codec_ctx->channel_layout, 0); av_opt_set_int(fp->libsw.swr_ctx, "in_sample_rate", fp->codec_ctx->sample_rate, 0); av_opt_set_int(fp->libsw.swr_ctx, "out_sample_rate", fp->output_sample_rate, 0); av_opt_set_sample_fmt(fp->libsw.swr_ctx, "in_sample_fmt", fp->codec_ctx->sample_fmt, 0); av_opt_set_sample_fmt(fp->libsw.swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16P, 0); // > Fully initialize the SwrContext if ((ret = swr_init(fp->libsw.swr_ctx)) < 0) return ret; // For use in @ref frame_puller_last_time. fp->frame->pts = -233333; *o_fp = fp; return 0; }
void ensureAudioPostProcess() { if (host.state < CodecBoxDecoderState::Metadata) return; if (swr) return; swr = swr_alloc(); JIF(!swr, "failed to alloc audio resampler."); av_opt_set_channel_layout(swr, "in_channel_layout", audioCodecContext->channel_layout, 0); av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(swr, "in_sample_rate", audioCodecContext->sample_rate, 0); av_opt_set_int(swr, "out_sample_rate", host.sampleRate, 0); av_opt_set_sample_fmt(swr, "in_sample_fmt", audioCodecContext->sample_fmt, 0); av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_FLT, 0); JIF(swr_init(swr), "failed to init audio resampler."); return; err: host.state = CodecBoxDecoderState::Failed; close(); }
void init_swr(){ uint64_t out_channel_layout=AV_CH_LAYOUT_STEREO; //nb_samples: AAC-1024 MP3-1152 out_sample_rate=pCodecCtx->sample_rate; out_channels=av_get_channel_layout_nb_channels(out_channel_layout); out_buffer=(uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE*out_channels); //FIX:Some Codec's Context Information is missing int in_channel_layout=av_get_default_channel_layout(pCodecCtx->channels); //Swr au_convert_ctx = swr_alloc(); swr_alloc_set_opts(au_convert_ctx,out_channel_layout, out_sample_fmt, out_sample_rate, in_channel_layout, pCodecCtx->sample_fmt , pCodecCtx->sample_rate,0, NULL); if(swr_init(au_convert_ctx)<0){ au_convert_ctx=NULL; } createBufferQueueAudioPlayer(2,out_sample_rate); }
void AudioResampleImpl::initResampler() { m_resampler.reset(swr_alloc(), [](SwrContext * context){ swr_free(&context); }); assert(m_from.isValid()); assert(m_to.isValid()); av_opt_set_int(m_resampler.get(), "in_channel_layout", av_get_default_channel_layout(m_from.channelCount()), 0); /// @todo Дополнить раскладкой AudioFormat av_opt_set_int(m_resampler.get(), "out_channel_layout", av_get_default_channel_layout(m_to.channelCount()), 0); av_opt_set_int(m_resampler.get(), "in_sample_rate", m_from.sampleRate(), 0); av_opt_set_int(m_resampler.get(), "out_sample_rate", m_to.sampleRate(), 0); av_opt_set_sample_fmt(m_resampler.get(), "in_sample_fmt", m_from.sampleFormat(), 0); av_opt_set_sample_fmt(m_resampler.get(), "out_sample_fmt", m_to.sampleFormat(), 0); /// Non planar int res = swr_init(m_resampler.get()); if (res != 0) throw FFException(res); }
struct SwrContext *swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx){ if(!s) s= swr_alloc(); if(!s) return NULL; s->log_level_offset= log_offset; s->log_ctx= log_ctx; if (av_opt_set_int(s, "ocl", out_ch_layout, 0) < 0) goto fail; if (av_opt_set_int(s, "osf", out_sample_fmt, 0) < 0) goto fail; if (av_opt_set_int(s, "osr", out_sample_rate, 0) < 0) goto fail; if (av_opt_set_int(s, "icl", in_ch_layout, 0) < 0) goto fail; if (av_opt_set_int(s, "isf", in_sample_fmt, 0) < 0) goto fail; if (av_opt_set_int(s, "isr", in_sample_rate, 0) < 0) goto fail; if (av_opt_set_int(s, "tsf", AV_SAMPLE_FMT_NONE, 0) < 0) goto fail; if (av_opt_set_int(s, "ich", av_get_channel_layout_nb_channels(s-> user_in_ch_layout), 0) < 0) goto fail; if (av_opt_set_int(s, "och", av_get_channel_layout_nb_channels(s->user_out_ch_layout), 0) < 0) goto fail; av_opt_set_int(s, "uch", 0, 0); return s; fail: av_log(s, AV_LOG_ERROR, "Failed to set option\n"); swr_free(&s); return NULL; }
struct SwrContext *swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx){ if(!s) s= swr_alloc(); if(!s) return NULL; s->log_level_offset= log_offset; s->log_ctx= log_ctx; av_opt_set_int(s, "ocl", out_ch_layout, 0); av_opt_set_int(s, "osf", out_sample_fmt, 0); av_opt_set_int(s, "osr", out_sample_rate, 0); av_opt_set_int(s, "icl", in_ch_layout, 0); av_opt_set_int(s, "isf", in_sample_fmt, 0); av_opt_set_int(s, "isr", in_sample_rate, 0); av_opt_set_int(s, "tsf", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(s, "ich", av_get_channel_layout_nb_channels(s-> in_ch_layout), 0); av_opt_set_int(s, "och", av_get_channel_layout_nb_channels(s->out_ch_layout), 0); return s; }
int AudioResampler::InitAudioResampling(IN const std::shared_ptr<MediaFrame>& _pFrame) { // for fdkaac encoder, input samples should be PCM signed16le, otherwise do resampling if (_pFrame->AvFrame()->format != sampleFormat) { #ifdef LOG_TRADITIONAL loginfo("input sample_format=%d, need sample_format=%d, initiate resampling", #else loginfo("input sample_format={} need sample_format={}, initiate resampling", #endif _pFrame->AvFrame()->format, sampleFormat); pSwr_ = swr_alloc(); av_opt_set_int(pSwr_, "in_channel_layout", av_get_default_channel_layout(_pFrame->AvFrame()->channels), 0); av_opt_set_int(pSwr_, "out_channel_layout", av_get_default_channel_layout(nChannel), 0); av_opt_set_int(pSwr_, "in_sample_rate", _pFrame->AvFrame()->sample_rate, 0); av_opt_set_int(pSwr_, "out_sample_rate", nSampleRate, 0); av_opt_set_sample_fmt(pSwr_, "in_sample_fmt", static_cast<AVSampleFormat>(_pFrame->AvFrame()->format), 0); av_opt_set_sample_fmt(pSwr_, "out_sample_fmt", sampleFormat, 0); if (swr_init(pSwr_) != 0) { logerror("could not initiate resampling"); return -1; } }
void XAudioStream::setSpeed(float speed) { if(m_speed == speed || speed <= 0.0f) return; m_speed = speed; swr_free(&m_pSwrContext); m_pSwrContext = swr_alloc(); if(m_pSwrContext == NULL) return; if(m_pAudioCodecCtx->channel_layout == 0) { swr_alloc_set_opts(m_pSwrContext,av_get_default_channel_layout(XEG.getAudioChannelSum()),getSampleFormat(),XEG.getAudioSampleRate() * m_speed, av_get_default_channel_layout(m_pAudioCodecCtx->channels),m_pAudioCodecCtx->sample_fmt,m_pAudioCodecCtx->sample_rate,0,NULL); }else { swr_alloc_set_opts(m_pSwrContext,av_get_default_channel_layout(XEG.getAudioChannelSum()),getSampleFormat(),XEG.getAudioSampleRate() * m_speed, m_pAudioCodecCtx->channel_layout,m_pAudioCodecCtx->sample_fmt,m_pAudioCodecCtx->sample_rate,0,NULL); } if(swr_init(m_pSwrContext) < 0) { LogStr("swr_init() fail"); return; } }
void CTransCoder::SwrInit( AVCodecContext* ctx ) { //Out Audio Param m_out_channel_layout = AV_CH_LAYOUT_STEREO; //nb_samples: m_out_nb_samples = 1024; m_out_sample_fmt = AV_SAMPLE_FMT_S16; m_out_sample_rate = 44100; m_out_channels = av_get_channel_layout_nb_channels(m_out_channel_layout); //Out buffer size int out_buffer_size = av_samples_get_buffer_size( NULL, m_out_channels, m_out_nb_samples, m_out_sample_fmt, 1 ); m_audioBuffer = (uint8_t *)av_malloc( MAX_AUDIO_FRAME_SIZE ); int64_t in_channel_layout = av_get_default_channel_layout(ctx->channels); m_au_convert_ctx = swr_alloc(); m_au_convert_ctx = swr_alloc_set_opts( m_au_convert_ctx, m_out_channel_layout, m_out_sample_fmt, m_out_sample_rate, in_channel_layout, ctx->sample_fmt, ctx->sample_rate, 0, NULL); swr_init(m_au_convert_ctx); }
bool AudioResamplerFfmpeg::init(AVCodecContext* ctx) { if ((ctx->sample_rate != 44100) || #if defined(HAVE_SWRESAMPLE_H) || defined(HAVE_AVRESAMPLE_H) (ctx->sample_fmt != AV_SAMPLE_FMT_S16) || #endif (ctx->channels != 2)) { if (! _context) { #ifdef HAVE_SWRESAMPLE_H _context = swr_alloc(); #elif HAVE_AVRESAMPLE_H _context = avresample_alloc_context(); #else _context = av_audio_resample_init(2, ctx->channels, 44100, ctx->sample_rate, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16, 16, 10, 0, 0.8); #endif #if defined(HAVE_SWRESAMPLE_H) || defined(HAVE_AVRESAMPLE_H) av_opt_set_int(_context, "in_channel_layout", av_get_default_channel_layout(ctx->channels), 0); av_opt_set_int(_context, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(_context, "in_sample_rate", ctx->sample_rate, 0); av_opt_set_int(_context, "out_sample_rate", 44100, 0); av_opt_set_int(_context, "in_sample_fmt", ctx->sample_fmt, 0); av_opt_set_int(_context, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); #endif #ifdef HAVE_SWRESAMPLE_H swr_init(_context); #elif HAVE_AVRESAMPLE_H avresample_open(_context); #endif } return true; } return false; }
/** * Allocates and initializes the SwrContext so that we don't have to deal with planar sample formats. * * @param env JNIEnv * @param aio FFAudioIO * @return a negative value should an error occur */ static int init_swr(JNIEnv *env, FFAudioIO *aio) { int res = 0; aio->swr_context = swr_alloc(); if (!aio->swr_context) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not allocate swr context."); goto bail; } av_opt_set_sample_fmt(aio->swr_context, "in_sample_fmt", aio->stream->codecpar->format, 0); // make sure we get interleaved/packed output av_opt_set_sample_fmt(aio->swr_context, "out_sample_fmt", av_get_packed_sample_fmt(aio->stream->codecpar->format), 0); // keep everything else the way it was... av_opt_set_int(aio->swr_context, "in_channel_count", aio->stream->codecpar->channels, 0); av_opt_set_int(aio->swr_context, "out_channel_count", aio->stream->codecpar->channels, 0); av_opt_set_int(aio->swr_context, "in_channel_layout", aio->stream->codecpar->channel_layout, 0); av_opt_set_int(aio->swr_context, "out_channel_layout", aio->stream->codecpar->channel_layout, 0); av_opt_set_int(aio->swr_context, "in_sample_rate", aio->stream->codecpar->sample_rate, 0); av_opt_set_int(aio->swr_context, "out_sample_rate", aio->stream->codecpar->sample_rate, 0); res = swr_init(aio->swr_context); if (res < 0) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not initialize swr context"); goto bail; } //fprintf(stderr, "init_swr: dither context: %d\n", aio->swr_context->dither); //fprintf(stderr, "init_swr: output sample bits: %d\n", aio->swr_context->dither.output_sample_bits); bail: return res; }
int main (int argc, char **argv){ int ret = 0, got_frame; AVFormatContext *ofmt_ctx = NULL; AVOutputFormat *ofmt = NULL; uint8_t *sample_buf; if (argc != 4 && argc != 5) { fprintf(stderr, "input 1.source file:%s\n" "2.output_video\n" "3.output_audio\n" "4.mux video file(Optional)\n" "\n", argv[0]); exit(1); } src_filename = argv[1]; video_dst_filename = argv[2]; audio_dst_filename = argv[3]; //optional mux to any type video if(argc == 5){ out_filename = argv[4]; } /* register all formats and codecs */ av_register_all(); //for network stream avformat_network_init(); ret = init_input(); if(ret){ goto end; } ret = init_video_out_context(); if(ret){ goto end; } ret = init_audio_out_context(sample_buf); if(ret){ goto end; }else{ int aud_buffer_size; //alloc frame and packet AudFrame = av_frame_alloc(); AudFrame->nb_samples = AudCodecCtx->frame_size; AudFrame->format = AudCodecCtx->sample_fmt; AudFrame->channel_layout = AudCodecCtx->channel_layout; aud_buffer_size = av_samples_get_buffer_size(NULL, AudCodecCtx->channels,AudCodecCtx->frame_size,AudCodecCtx->sample_fmt, 1); sample_buf = (uint8_t *)av_malloc(aud_buffer_size); avcodec_fill_audio_frame(AudFrame, AudCodecCtx->channels, AudCodecCtx->sample_fmt,(const uint8_t*)sample_buf, aud_buffer_size, 1); av_new_packet(&AudPkt,aud_buffer_size); } if(argc == 5){ //alloc memory avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); if (!ofmt_ctx) { printf( "Could not create output context\n"); ret = AVERROR_UNKNOWN; return 1; } ofmt = ofmt_ctx->oformat; ret = init_output(ofmt_ctx); if(ret){ printf("Init output ERROR\n"); goto end; } } if (!(ofmt->flags & AVFMT_NOFILE)) { ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE); if (ret < 0) { printf( "Could not open output file '%s'", out_filename); goto end; } } ret = avformat_write_header(ofmt_ctx, NULL); if (ret < 0) { printf( "Error occurred when opening output file\n"); goto end; } //this will fill up by decoder(|read frame|->packet->|decoder|->frame) frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Could not allocate frame\n"); ret = AVERROR(ENOMEM); goto end; } if (video_stream) printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename); if (audio_stream) printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename); //Write video Header avformat_write_header(pFormatCtx,NULL); //Write audio Header avformat_write_header(AudFormatCtx,NULL); //alloc packet to get copy from pkt av_new_packet(&epkt,picture_size); /*setup the convert parameter *due to input sample format AV_SAMPLE_FMT_FLTP *can't be converted to AV_SAMPLE_FMT_S16 *which only accepted by the aac encoder */ swr = swr_alloc(); av_opt_set_int(swr, "in_channel_layout", audio_dec_ctx->channel_layout, 0); av_opt_set_int(swr, "out_channel_layout", AudCodecCtx->channel_layout, 0); av_opt_set_int(swr, "in_sample_rate", audio_dec_ctx->sample_rate, 0); av_opt_set_int(swr, "out_sample_rate", AudCodecCtx->sample_rate, 0); av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); swr_init(swr); /*start read frames from the file */ while (av_read_frame(fmt_ctx, &pkt) >= 0) { //do demux & decode -> encode -> output h264 & aac file ret = decode_packet(); if (ret < 0) break; if(argc == 5){ remux_packet(ofmt_ctx,&pkt); } av_free_packet(&pkt); } /* flush cached frames */ pkt.data = NULL; pkt.size = 0; //Flush Encoder int retfe = flush_encoder(pFormatCtx,0); if (retfe < 0) { printf("Flushing encoder failed\n"); return -1; } //Flush Encoder ret = flush_encoder(pFormatCtx,0); if (ret < 0) { printf("Flushing encoder failed\n"); return -1; } //Write video trailer av_write_trailer(pFormatCtx); //Write audio Trailer av_write_trailer(AudFormatCtx); //Write remux Trailer if(argc == 5){ av_write_trailer(ofmt_ctx); } printf("Output succeeded!!!!\n"); end: //free remux if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx->pb); avformat_free_context(ofmt_ctx); //free audio if (audio_st){ avcodec_close(audio_st->codec); av_free(AudFrame); av_free(sample_buf); } avio_close(AudFormatCtx->pb); avformat_free_context(AudFormatCtx); //free video if (video_st){ avcodec_close(video_st->codec); av_free(pFrame); av_free(picture_buf); } avio_close(pFormatCtx->pb); avformat_free_context(pFormatCtx); //free decode avcodec_close(video_dec_ctx); avcodec_close(audio_dec_ctx); avformat_close_input(&fmt_ctx); if (video_dst_file) fclose(video_dst_file); if (audio_dst_file) fclose(audio_dst_file); av_frame_free(&frame); return ret < 0; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx = NULL; AVPacket pkt1, *packet = &pkt1; AVDictionary *io_dict = NULL; AVIOInterruptCB callback; int video_index = -1; int audio_index = -1; int i; is->videoStream = -1; is->audioStream = -1; is->audio_need_resample = 0; global_video_state = is; // will interrupt blocking functions if we quit! callback.callback = decode_interrupt_cb; callback.opaque = is; if(avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) { fprintf(stderr, "Unable to open I/O for %s\n", is->filename); return -1; } // Open video file if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0) { return -1; // Couldn't open file } is->pFormatCtx = pFormatCtx; // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL) < 0) { return -1; // Couldn't find stream information } // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i = 0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index = i; } if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index = i; } } if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 && is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); goto fail; } #ifdef __RESAMPLER__ if( audio_index >= 0 && pFormatCtx->streams[audio_index]->codec->sample_fmt != AV_SAMPLE_FMT_S16) { is->audio_need_resample = 1; is->pResampledOut = NULL; is->pSwrCtx = NULL; printf("Configure resampler: "); #ifdef __LIBAVRESAMPLE__ printf("libAvResample\n"); is->pSwrCtx = avresample_alloc_context(); #endif #ifdef __LIBSWRESAMPLE__ printf("libSwResample\n"); is->pSwrCtx = swr_alloc(); #endif // Some MP3/WAV don't tell this so make assumtion that // They are stereo not 5.1 if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 2) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO; } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 1) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_MONO; } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 0) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO; pFormatCtx->streams[audio_index]->codec->channels = 2; } av_opt_set_int(is->pSwrCtx, "in_channel_layout", pFormatCtx->streams[audio_index]->codec->channel_layout, 0); av_opt_set_int(is->pSwrCtx, "in_sample_fmt", pFormatCtx->streams[audio_index]->codec->sample_fmt, 0); av_opt_set_int(is->pSwrCtx, "in_sample_rate", pFormatCtx->streams[audio_index]->codec->sample_rate, 0); av_opt_set_int(is->pSwrCtx, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(is->pSwrCtx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(is->pSwrCtx, "out_sample_rate", 44100, 0); #ifdef __LIBAVRESAMPLE__ if (avresample_open(is->pSwrCtx) < 0) { #else if (swr_init(is->pSwrCtx) < 0) { #endif fprintf(stderr, " ERROR!! From Samplert: %d Hz Sample format: %s\n", pFormatCtx->streams[audio_index]->codec->sample_rate, av_get_sample_fmt_name(pFormatCtx->streams[audio_index]->codec->sample_fmt)); fprintf(stderr, " To 44100 Sample format: s16\n"); is->audio_need_resample = 0; is->pSwrCtx = NULL;; } } #endif // main decode loop for(;;) { if(is->quit) { break; } // seek stuff goes here if(is->seek_req) { int stream_index = -1; int64_t seek_target = is->seek_pos; if(is->videoStream >= 0) { stream_index = is->videoStream; } else if(is->audioStream >= 0) { stream_index = is->audioStream; } if(stream_index >= 0) { seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base); } if(av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags) < 0) { fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename); } else { if(is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if(is->videoStream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } } is->seek_req = 0; } if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if(av_read_frame(is->pFormatCtx, packet) < 0) { if(is->pFormatCtx->pb->error == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while(!is->quit) { SDL_Delay(100); } fail: { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; } void stream_seek(VideoState *is, int64_t pos, int rel) { if(!is->seek_req) { is->seek_pos = pos; is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0; is->seek_req = 1; } } int main(int argc, char *argv[]) { SDL_Event event; //double pts; VideoState *is; is = av_mallocz(sizeof(VideoState)); if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(640, 480, 0, 0); #else screen = SDL_SetVideoMode(640, 480, 24, 0); #endif if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } av_strlcpy(is->filename, argv[1], 1024); is->pictq_mutex = SDL_CreateMutex(); is->pictq_cond = SDL_CreateCond(); schedule_refresh(is, 40); is->av_sync_type = DEFAULT_AV_SYNC_TYPE; is->parse_tid = SDL_CreateThread(decode_thread, is); if(!is->parse_tid) { av_free(is); return -1; } av_init_packet(&flush_pkt); flush_pkt.data = (unsigned char *)"FLUSH"; for(;;) { double incr, pos; SDL_WaitEvent(&event); switch(event.type) { case SDL_KEYDOWN: switch(event.key.keysym.sym) { case SDLK_LEFT: incr = -10.0; goto do_seek; case SDLK_RIGHT: incr = 10.0; goto do_seek; case SDLK_UP: incr = 60.0; goto do_seek; case SDLK_DOWN: incr = -60.0; goto do_seek; do_seek: if(global_video_state) { pos = get_master_clock(global_video_state); pos += incr; stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr); } break; default: break; } break; case FF_QUIT_EVENT: case SDL_QUIT: is->quit = 1; /* * If the video has finished playing, then both the picture and * audio queues are waiting for more data. Make them stop * waiting and terminate normally. */ SDL_CondSignal(is->audioq.cond); SDL_CondSignal(is->videoq.cond); SDL_Quit(); exit(0); break; case FF_ALLOC_EVENT: alloc_picture(event.user.data1); break; case FF_REFRESH_EVENT: video_refresh_timer(event.user.data1); break; default: break; } } return 0; }
int initializeMuxer(const char *formatName, const char *fileName, PCAST_CONFIGURATION config) { AVOutputFormat *format; AVCodec *audCodec; int ret; av_register_all(); avformat_network_init(); hasWrittenStreamHeader = 0; spsLength = 0; ppsLength = 0; pthread_mutex_init(&streamLock, NULL); format = av_guess_format(formatName, fileName, NULL); if (format == NULL) { fprintf(stderr, "av_guess_format() failed\n"); return -1; } formatContext = avformat_alloc_context(); if (formatContext == NULL) { fprintf(stderr, "avformat_alloc_context() failed\n"); return -2; } // Initialize the AVFormatContext formatContext->oformat = format; strcpy(formatContext->filename, fileName); if (config->muxEnableFlags & ENABLE_VIDEO) { // Add the video stream videoStream = avformat_new_stream(formatContext, NULL); if (videoStream == NULL) { fprintf(stderr, "avformat_new_stream() #1 failed\n"); return -3; } // Configure the video codec videoCodecCtx = videoStream->codec; videoCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; videoCodecCtx->codec_id = AV_CODEC_ID_H264; videoCodecCtx->bit_rate = 0; videoCodecCtx->width = config->width; videoCodecCtx->height = config->height; videoCodecCtx->time_base.den = config->frameRate; videoCodecCtx->time_base.num = 1; videoCodecCtx->gop_size = config->frameRate * config->iFrameInterval; videoCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P; } if (config->muxEnableFlags & ENABLE_AUDIO) { audCodec = avcodec_find_encoder(AV_CODEC_ID_AAC); if (audCodec == NULL) { fprintf(stderr, "avcodec_find_encoder failed\n"); return -4; } // Add the audio stream audioStream = avformat_new_stream(formatContext, audCodec); if (audioStream == NULL) { fprintf(stderr, "avformat_new_stream() #2 failed\n"); return -5; } // Configure the audio codec audioCodecCtx = audioStream->codec; audioCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO; audioCodecCtx->codec_id = AV_CODEC_ID_AAC; audioCodecCtx->bit_rate = config->audioBitrate; audioCodecCtx->sample_rate = 44100; audioCodecCtx->channels = config->audioChannels; audioCodecCtx->sample_fmt = AV_SAMPLE_FMT_FLTP; audioCodecCtx->profile = FF_PROFILE_AAC_LOW; audioCodecCtx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; ret = avcodec_open2(audioCodecCtx, audCodec, NULL); if (ret < 0) { fprintf(stderr, "avcodec_open2() failed: %d\n", ret); return ret; } srcSamplesCount = audioCodecCtx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ? 10000 : audioCodecCtx->frame_size; ret = av_samples_alloc_array_and_samples(&srcSamplesData, &srcSamplesLinesize, audioCodecCtx->channels, srcSamplesCount, audioCodecCtx->sample_fmt, 0); if (ret < 0) { fprintf(stderr, "av_samples_alloc_array_and_samples() failed: %d\n", ret); return ret; } // Our input is 16-bit signed samples so we'll need a resampler to convert to FP swrContext = swr_alloc(); if (swrContext == NULL) { fprintf(stderr, "swr_alloc() failed\n"); return -1; } av_opt_set_int(swrContext, "in_channel_count", audioCodecCtx->channels, 0); av_opt_set_int(swrContext, "in_sample_rate", audioCodecCtx->sample_rate, 0); av_opt_set_int(swrContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(swrContext, "out_channel_count", audioCodecCtx->channels, 0); av_opt_set_int(swrContext, "out_sample_rate", audioCodecCtx->sample_rate, 0); av_opt_set_int(swrContext, "out_sample_fmt", audioCodecCtx->sample_fmt, 0); ret = swr_init(swrContext); if (ret < 0) { fprintf(stderr, "swr_init() failed: %d\n", ret); return ret; } maxDstSamplesCount = srcSamplesCount; ret = av_samples_alloc_array_and_samples(&dstSamplesData, &dstSamplesLinesize, audioCodecCtx->channels, maxDstSamplesCount, audioCodecCtx->sample_fmt, 0); if (ret < 0) { fprintf(stderr, "av_samples_alloc_array_and_samples() failed: %d\n", ret); return ret; } dstSamplesSize = av_samples_get_buffer_size(NULL, audioCodecCtx->channels, maxDstSamplesCount, audioCodecCtx->sample_fmt, 0); } if (format->flags & AVFMT_GLOBALHEADER) { if (videoCodecCtx != NULL) { videoCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER; } if (audioCodecCtx != NULL) { audioCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER; } } if ((format->flags & AVFMT_NOFILE) == 0) { if ((ret = avio_open(&formatContext->pb, fileName, AVIO_FLAG_WRITE)) < 0) { fprintf(stderr, "avio_open() failed: %d\n", ret); return -4; } } return 0; }
inline ResampleContext *resample_alloc() { return swr_alloc(); }
int main(int argc, char **argv) { int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND; int src_rate = 48000, dst_rate = 44100; uint8_t **src_data = NULL, **dst_data = NULL; int src_nb_channels = 0, dst_nb_channels = 0; int src_linesize, dst_linesize; int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples; enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16; const char *dst_filename = NULL; FILE *dst_file; int dst_bufsize; const char *fmt; struct SwrContext *swr_ctx; double t; int ret; if (argc != 2) { fprintf(stderr, "Usage: %s output_file\n" "API example program to show how to resample an audio stream with libswresample.\n" "This program generates a series of audio frames, resamples them to a specified " "output format and rate and saves them to an output file named output_file.\n", argv[0]); exit(1); } dst_filename = argv[1]; dst_file = fopen(dst_filename, "wb"); if (!dst_file) { fprintf(stderr, "Could not open destination file %s\n", dst_filename); exit(1); } /* create resampler context */ swr_ctx = swr_alloc(); if (!swr_ctx) { fprintf(stderr, "Could not allocate resampler context\n"); ret = AVERROR(ENOMEM); goto end; } /* set options */ av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0); av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0); av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0); av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0); av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0); av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0); /* initialize the resampling context */ if ((ret = swr_init(swr_ctx)) < 0) { fprintf(stderr, "Failed to initialize the resampling context\n"); goto end; } /* allocate source and destination samples buffers */ src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout); ret = alloc_samples_array_and_data(&src_data, &src_linesize, src_nb_channels, src_nb_samples, src_sample_fmt, 0); if (ret < 0) { fprintf(stderr, "Could not allocate source samples\n"); goto end; } /* compute the number of converted samples: buffering is avoided * ensuring that the output buffer will contain at least all the * converted input samples */ max_dst_nb_samples = dst_nb_samples = av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP); /* buffer is going to be directly written to a rawaudio file, no alignment */ dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout); ret = alloc_samples_array_and_data(&dst_data, &dst_linesize, dst_nb_channels, dst_nb_samples, dst_sample_fmt, 0); if (ret < 0) { fprintf(stderr, "Could not allocate destination samples\n"); goto end; } t = 0; do { /* generate synthetic audio */ fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t); /* compute destination number of samples */ dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) + src_nb_samples, dst_rate, src_rate, AV_ROUND_UP); if (dst_nb_samples > max_dst_nb_samples) { av_free(dst_data[0]); ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels, dst_nb_samples, dst_sample_fmt, 1); if (ret < 0) break; max_dst_nb_samples = dst_nb_samples; } /* convert to destination format */ ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples); if (ret < 0) { fprintf(stderr, "Error while converting\n"); goto end; } dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels, ret, dst_sample_fmt, 1); printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret); fwrite(dst_data[0], 1, dst_bufsize, dst_file); } while (t < 10); if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt) < 0)) goto end; fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n" "ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n", fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename); end: if (dst_file) fclose(dst_file); if (src_data) av_freep(&src_data[0]); av_freep(&src_data); if (dst_data) av_freep(&dst_data[0]); av_freep(&dst_data); swr_free(&swr_ctx); return ret < 0; }