static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) { AVCodecContext *c; int nb_samples; int ret; AVDictionary *opt = NULL; c = ost->st->codec; /* open it */ av_dict_copy(&opt, opt_arg, 0); ret = avcodec_open2(c, codec, &opt); av_dict_free(&opt); if (ret < 0) { fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret)); exit(1); } /* init signal generator */ ost->t = 0; ost->tincr = 2 * M_PI * 110.0 / c->sample_rate; /* increment frequency by 110 Hz per second */ ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) nb_samples = 10000; else nb_samples = c->frame_size; ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples); ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout, c->sample_rate, nb_samples); /* create resampler context */ ost->swr_ctx = swr_alloc(); if (!ost->swr_ctx) { fprintf(stderr, "Could not allocate resampler context\n"); exit(1); } /* set options */ av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0); /* initialize the resampling context */ if ((ret = swr_init(ost->swr_ctx)) < 0) { fprintf(stderr, "Failed to initialize the resampling context\n"); exit(1); } }
bool FFMPEGer::open_audio(AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg){ AVCodecContext *c; int nb_samples; int ret; AVDictionary *opt = NULL; c = ost->st->codec; /* open it */ av_dict_copy(&opt, opt_arg, 0); ret = avcodec_open2(c, codec, &opt); av_dict_free(&opt); if (ret < 0) { ALOGE("Could not open audio codec: %s", av_err2str(ret)); return false; } if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) nb_samples = 10000; else nb_samples = c->frame_size; ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples); ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout, c->sample_rate, nb_samples); /* create resampler context */ ost->swr_ctx = swr_alloc(); if (!ost->swr_ctx) { ALOGE("Could not allocate resampler context"); return false; } /* set options */ av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0); av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0); /* initialize the resampling context */ ret = swr_init(ost->swr_ctx); if (ret < 0){ ALOGE("Failed to initialize the resampling context"); return false; } return true; }
BOOL CVideoLivRecord::open_audio(AVStream *st, AVCodec* codec, AVDictionary* opt) { AVCodecContext * avcc = st->codec; int nb_samples; int ret; AVDictionary* opt_dst = NULL;//必须初始化为空。 av_dict_copy(&opt_dst, opt, 0); ret = avcodec_open2(avcc, codec, &opt_dst); av_dict_free(&opt_dst); if (ret < 0){ log("[CVideoLivRecord::open_audio] -- avcodec_open2() error"); return FALSE; } m_t = 0; m_tincr = 2 * M_PI * 110.0 / avcc->sample_rate; m_tincr2 = 2 * M_PI * 110.0 / avcc->sample_rate / avcc->sample_rate; if (avcc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE){ nb_samples = 10000; } else { nb_samples = avcc->frame_size; } m_pAudioFrame = alloc_audio_frame(avcc->sample_fmt, avcc->channel_layout, avcc->sample_rate, nb_samples); m_pAudioBkFrame = alloc_audio_frame(AV_SAMPLE_FMT_S16, avcc->channel_layout, avcc->sample_rate, nb_samples); m_pAudioSwrctx = swr_alloc(); if (!m_pAudioSwrctx){ log("[CVideoLivRecord::open_audio] -- swr_alloc() error"); return FALSE; } av_opt_set_int (m_pAudioSwrctx, "in_channel_count", avcc->channels, 0); av_opt_set_int (m_pAudioSwrctx, "in_sample_rate", avcc->sample_rate, 0); av_opt_set_sample_fmt(m_pAudioSwrctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int (m_pAudioSwrctx, "out_channel_count", avcc->channels, 0); av_opt_set_int (m_pAudioSwrctx, "out_sample_rate", avcc->sample_rate, 0); av_opt_set_sample_fmt(m_pAudioSwrctx, "out_sample_fmt", avcc->sample_fmt, 0); if (swr_init(m_pAudioSwrctx) < 0){ log("[CVideoLivRecord::open_audio] -- swr_init() error"); return FALSE; } return TRUE; }
static int ffmpegdrv_open_audio(AVFormatContext *oc, AVStream *st) { AVCodecContext *c; int audio_inbuf_samples; int ret; AVDictionary *opts = NULL; c = st->codec; /* open codec */ /* aac encoder ist flaged 'experimental' */ VICE_P_AV_DICT_SET(&opts, "strict", "experimental", 0); ret = VICE_P_AVCODEC_OPEN2(c, avcodecaudio, &opts); VICE_P_AV_DICT_FREE(&opts); if (ret < 0) { log_debug("ffmpegdrv: could not open audio codec (%d)", ret); return -1; } audio_is_open = 1; if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) { audio_inbuf_samples = 10000; } else { audio_inbuf_samples = c->frame_size; } audio_st.frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, audio_inbuf_samples); audio_st.tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout, c->sample_rate, audio_inbuf_samples); if (!audio_st.frame || !audio_st.tmp_frame) { return -1; } ffmpegdrv_audio_in.size = audio_inbuf_samples * c->channels; ffmpegdrv_audio_in.buffer = (SWORD*)audio_st.tmp_frame->data[0]; return 0; }
static void open_audio(AVFormatContext *oc, OutputStream *ost) { AVCodecContext *c; int nb_samples, ret; c = ost->enc; /* open it */ if (avcodec_open2(c, NULL, NULL) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } /* init signal generator */ ost->t = 0; ost->tincr = 2 * M_PI * 110.0 / c->sample_rate; /* increment frequency by 110 Hz per second */ ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) nb_samples = 10000; else nb_samples = c->frame_size; ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples); ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, AV_CH_LAYOUT_STEREO, 44100, nb_samples); /* copy the stream parameters to the muxer */ ret = avcodec_parameters_from_context(ost->st->codecpar, c); if (ret < 0) { fprintf(stderr, "Could not copy the stream parameters\n"); exit(1); } }
void write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st) { Priv *p = (Priv*)o->user_data; AVCodecContext *c = st->codec; int sample_count = 100000; static AVPacket pkt = { 0 }; if (pkt.size == 0) { av_init_packet (&pkt); } /* first we add incoming frames audio samples */ { int i; int sample_count = gegl_audio_fragment_get_sample_count (o->audio); GeglAudioFragment *af = gegl_audio_fragment_new (gegl_audio_fragment_get_sample_rate (o->audio), gegl_audio_fragment_get_channels (o->audio), gegl_audio_fragment_get_channel_layout (o->audio), sample_count); gegl_audio_fragment_set_sample_count (af, sample_count); for (i = 0; i < sample_count; i++) { af->data[0][i] = o->audio->data[0][i]; af->data[1][i] = o->audio->data[1][i]; } gegl_audio_fragment_set_pos (af, p->audio_pos); p->audio_pos += sample_count; p->audio_track = g_list_append (p->audio_track, af); } if (!(c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) sample_count = c->frame_size; /* then we encode as much as we can in a loop using the codec frame size */ while (p->audio_pos - p->audio_read_pos > sample_count) { long i; int ret; int got_packet = 0; AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout, c->sample_rate, sample_count); switch (c->sample_fmt) { case AV_SAMPLE_FMT_FLT: for (i = 0; i < sample_count; i++) { float left = 0, right = 0; get_sample_data (p, i + p->audio_read_pos, &left, &right); ((float*)frame->data[0])[c->channels*i+0] = left; ((float*)frame->data[0])[c->channels*i+1] = right; } break; case AV_SAMPLE_FMT_FLTP: for (i = 0; i < sample_count; i++) { float left = 0, right = 0; get_sample_data (p, i + p->audio_read_pos, &left, &right); ((float*)frame->data[0])[i] = left; ((float*)frame->data[1])[i] = right; } break; case AV_SAMPLE_FMT_S16: for (i = 0; i < sample_count; i++) { float left = 0, right = 0; get_sample_data (p, i + p->audio_read_pos, &left, &right); ((int16_t*)frame->data[0])[c->channels*i+0] = left * (1<<15); ((int16_t*)frame->data[0])[c->channels*i+1] = right * (1<<15); } break; case AV_SAMPLE_FMT_S32: for (i = 0; i < sample_count; i++) { float left = 0, right = 0; get_sample_data (p, i + p->audio_read_pos, &left, &right); ((int32_t*)frame->data[0])[c->channels*i+0] = left * (1<<31); ((int32_t*)frame->data[0])[c->channels*i+1] = right * (1<<31); } break; case AV_SAMPLE_FMT_S32P: for (i = 0; i < sample_count; i++) { float left = 0, right = 0; get_sample_data (p, i + p->audio_read_pos, &left, &right); ((int32_t*)frame->data[0])[i] = left * (1<<31); ((int32_t*)frame->data[1])[i] = right * (1<<31); } break; case AV_SAMPLE_FMT_S16P: for (i = 0; i < sample_count; i++) { float left = 0, right = 0; get_sample_data (p, i + p->audio_read_pos, &left, &right); ((int16_t*)frame->data[0])[i] = left * (1<<15); ((int16_t*)frame->data[1])[i] = right * (1<<15); } break; default: fprintf (stderr, "eeeek unhandled audio format\n"); break; } frame->pts = p->next_apts; p->next_apts += sample_count; av_frame_make_writable (frame); ret = avcodec_encode_audio2 (c, &pkt, frame, &got_packet); av_packet_rescale_ts (&pkt, st->codec->time_base, st->time_base); if (ret < 0) { fprintf (stderr, "Error encoding audio frame: %s\n", av_err2str (ret)); } if (got_packet) { pkt.stream_index = st->index; av_interleaved_write_frame (oc, &pkt); av_free_packet (&pkt); } av_frame_free (&frame); p->audio_read_pos += sample_count; } }