示例#1
0
int frame_puller_open_audio(frame_puller **o_fp, const char *path, int output_sample_rate)
{
    *o_fp = NULL;
    int ret;
    frame_puller *fp;

    if ((ret = _frame_puller_new(&fp, path)) < 0) return ret;
    fp->type = FRAME_PULLER_AUDIO;
    if ((ret = _frame_puller_init(fp, AVMEDIA_TYPE_AUDIO)) < 0) return ret;
    fp->output_sample_rate = output_sample_rate > 0 ? output_sample_rate : fp->codec_ctx->sample_rate;
    fp->sample_scale_rate = (double)fp->output_sample_rate / (double)fp->codec_ctx->sample_rate;
    // Initialize the libswresample context for audio resampling.
    // > Create the buffer for the converted frame to store data
    fp->frame = av_frame_alloc();
    fp->frame->format = AV_SAMPLE_FMT_S16P;
    fp->frame->channel_layout = fp->codec_ctx->channel_layout;
    fp->frame->sample_rate = fp->output_sample_rate;
    if ((fp->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) || !strcmp(fp->codec->name, "pcm_mulaw"))
        fp->frame->nb_samples = 4096;
    else fp->frame->nb_samples = fp->sample_scale_rate * fp->codec_ctx->frame_size;
    av_log(NULL, AV_LOG_INFO, "frame_puller: number of samples per frame = %d\n", fp->frame->nb_samples);
    if ((ret = av_frame_get_buffer(fp->frame, 0)) < 0) return ret;
    // > Create the SwrContext
    fp->libsw.swr_ctx = swr_alloc();
    if (!fp->libsw.swr_ctx) {
        av_log(NULL, AV_LOG_ERROR, "frame_puller: Cannot initialize audio resampling library"
            "(possibly caused by insufficient memory)\n");
        return AVERROR_UNKNOWN;
    }
    // > Provide options for the SwrContext
    av_opt_set_channel_layout(fp->libsw.swr_ctx, "in_channel_layout", fp->codec_ctx->channel_layout, 0);
    av_opt_set_channel_layout(fp->libsw.swr_ctx, "out_channel_layout", fp->codec_ctx->channel_layout, 0);
    av_opt_set_int(fp->libsw.swr_ctx, "in_sample_rate", fp->codec_ctx->sample_rate, 0);
    av_opt_set_int(fp->libsw.swr_ctx, "out_sample_rate", fp->output_sample_rate, 0);
    av_opt_set_sample_fmt(fp->libsw.swr_ctx, "in_sample_fmt", fp->codec_ctx->sample_fmt, 0);
    av_opt_set_sample_fmt(fp->libsw.swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16P, 0);
    // > Fully initialize the SwrContext
    if ((ret = swr_init(fp->libsw.swr_ctx)) < 0) return ret;

    // For use in @ref frame_puller_last_time.
    fp->frame->pts = -233333;

    *o_fp = fp;
    return 0;
}
示例#2
0
 void ensureAudioPostProcess()
 {
     if (host.state < CodecBoxDecoderState::Metadata) return;
     if (swr) return;
     swr = swr_alloc();
     JIF(!swr, "failed to alloc audio resampler.");
     av_opt_set_channel_layout(swr, "in_channel_layout", audioCodecContext->channel_layout, 0);
     av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
     av_opt_set_int(swr, "in_sample_rate", audioCodecContext->sample_rate, 0);
     av_opt_set_int(swr, "out_sample_rate", host.sampleRate, 0);
     av_opt_set_sample_fmt(swr, "in_sample_fmt", audioCodecContext->sample_fmt, 0);
     av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_FLT,  0);
     JIF(swr_init(swr), "failed to init audio resampler.");
     return;
 err:
     host.state = CodecBoxDecoderState::Failed;
     close();
 }
示例#3
0
void FFMS_AudioSource::SetOutputFormat(FFMS_ResampleOptions const& opt) {
	if (opt.SampleRate != AP.SampleRate)
		throw FFMS_Exception(FFMS_ERROR_RESAMPLING, FFMS_ERROR_UNSUPPORTED,
			"Sample rate changes are currently unsupported.");

#ifndef FFMS_RESAMPLING_ENABLED
	if (opt.SampleFormat != AP.SampleFormat || opt.SampleRate != AP.SampleRate || opt.ChannelLayout != AP.ChannelLayout)
		throw FFMS_Exception(FFMS_ERROR_RESAMPLING, FFMS_ERROR_UNSUPPORTED,
			"FFMS was not built with resampling enabled. The only supported conversion is interleaving planar audio.");
#endif
#ifdef WITH_AVRESAMPLE
	if (opt.SampleFormat != AP.SampleFormat || opt.ChannelLayout != AP.ChannelLayout)
		throw FFMS_Exception(FFMS_ERROR_RESAMPLING, FFMS_ERROR_UNSUPPORTED,
			"FFMS was not built with FFMPEG resampling enabled.");
#endif

	// Cache stores audio in the output format, so clear it and reopen the file
	Cache.clear();
	PacketNumber = 0;
	ReopenFile();
	FlushBuffers(CodecContext);

	BytesPerSample = av_get_bytes_per_sample(static_cast<AVSampleFormat>(opt.SampleFormat)) * av_get_channel_layout_nb_channels(opt.ChannelLayout);
	NeedsResample =
		opt.SampleFormat != (int)CodecContext->sample_fmt ||
		opt.SampleRate != AP.SampleRate ||
		opt.ChannelLayout != AP.ChannelLayout ||
		opt.ForceResample;

#ifdef FFMS_RESAMPLING_ENABLED
	if (!NeedsResample) return;

	FFResampleContext newContext;
	SetOptions(opt, newContext, resample_options);
	av_opt_set_int(newContext, "in_sample_rate", AP.SampleRate, 0);
	av_opt_set_int(newContext, "in_sample_fmt", CodecContext->sample_fmt, 0);
	av_opt_set_int(newContext, "in_channel_layout", AP.ChannelLayout, 0);

	av_opt_set_int(newContext, "out_sample_rate", opt.SampleRate, 0);

#ifdef WITH_SWRESAMPLE
	av_opt_set_channel_layout(newContext, "out_channel_layout", opt.ChannelLayout, 0);
	av_opt_set_sample_fmt(newContext, "out_sample_fmt", (AVSampleFormat)opt.SampleFormat, 0);
#endif

	if (ffms_open(newContext))
		throw FFMS_Exception(FFMS_ERROR_RESAMPLING, FFMS_ERROR_UNKNOWN,
			"Could not open avresample context");
	newContext.swap(ResampleContext);
#endif
}
示例#4
0
int frame_pusher_open(frame_pusher **o_fp, const char *path,
    int aud_samplerate, AVRational vid_framerate,
    int width, int height, int vid_bitrate)
{
    *o_fp = NULL;
    int ret;
    frame_pusher *fp = (frame_pusher *)av_malloc(sizeof(frame_pusher));

    // Guess the format
    AVOutputFormat *ofmt = av_guess_format(NULL, path, NULL);
    if (!ofmt) {
        ofmt = av_oformat_next(NULL);   // Use the first format available
        av_log(NULL, AV_LOG_WARNING, "Unsupported container format. Using %s instead.\n", ofmt->name);
        // TODO: Add the extension to the path.
    }
    av_log(NULL, AV_LOG_INFO, "Using format %s\n", ofmt->name);
    // Open output file
    AVIOContext *io_ctx;
    if ((ret = avio_open2(&io_ctx, path, AVIO_FLAG_WRITE, NULL, NULL)) < 0) return ret;
    // Create the format context
    fp->fmt_ctx = avformat_alloc_context();
    fp->fmt_ctx->oformat = ofmt;
    fp->fmt_ctx->pb = io_ctx;
    // > Create the streams. Here we simply create one for video and one for audio.
    // >> The audio stream
    AVCodec *aud_codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
    fp->aud_stream = avformat_new_stream(fp->fmt_ctx, aud_codec);
    fp->aud_stream->id = 0;
    fp->aud_stream->codec->codec_id = AV_CODEC_ID_AAC;
    fp->aud_stream->codec->bit_rate = 64000;
    fp->aud_stream->codec->sample_rate = fp->aud_samplerate = aud_samplerate;
    // >>> http://stackoverflow.com/questions/22989838
    // >>> TODO: Add an option to set the codec and the sample format.
    fp->aud_stream->codec->sample_fmt = fp->aud_stream->codec->codec->sample_fmts[0];
    fp->aud_stream->codec->channel_layout = AV_CH_LAYOUT_STEREO;
    fp->aud_stream->codec->channels = 2;
    fp->aud_stream->codec->time_base = fp->aud_stream->time_base = (AVRational){1, aud_samplerate};
    // >> The video stream
    AVCodec *vid_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    fp->vid_stream = avformat_new_stream(fp->fmt_ctx, vid_codec);
    fp->vid_width = fp->vid_stream->codec->width = width;
    fp->vid_height = fp->vid_stream->codec->height = height;
    fp->vid_stream->id = 1;
    // >>> * ATTENTION: fp->vid_stream->codec is an (AVCodecContext *) rather than (AVCodec *)!
    fp->vid_stream->codec->codec_id = AV_CODEC_ID_H264;
    fp->vid_stream->codec->bit_rate = vid_bitrate > 0 ? vid_bitrate : 1200000;
    fp->vid_stream->codec->pix_fmt = AV_PIX_FMT_YUV420P;
    fp->vid_stream->codec->gop_size = 24;
    fp->vid_stream->codec->time_base = fp->vid_stream->time_base = (AVRational){vid_framerate.den, vid_framerate.num};
    // >> Enable experimental codecs such as AAC
    fp->aud_stream->codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
    fp->vid_stream->codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
    // >> Some formats want stream headers to be separate.
    // >> XXX: MPEG-4 doesn't have AVFMT_GLOBALHEADER in its format flags??
    //if (fp->fmt_ctx->flags & AVFMT_GLOBALHEADER)
        fp->aud_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        fp->vid_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    if ((ret = avcodec_open2(fp->aud_stream->codec, aud_codec, NULL)) < 0) return ret;
    if ((ret = avcodec_open2(fp->vid_stream->codec, vid_codec, NULL)) < 0) return ret;
    // Trigger a full initialization on the format context and write the header.
    avformat_write_header(fp->fmt_ctx, NULL);

    // Miscellaneous initializations
    fp->first_packet = 1;
    fp->last_aud_pts = fp->last_vid_pts = 0;
    fp->nb_aud_buffered_samples = 0;
    // > Video
    fp->vid_frame = av_frame_alloc();
    fp->pict_bufsize = avpicture_get_size(AV_PIX_FMT_YUV420P, width, height);
    fp->pict_buf = (uint8_t *)av_malloc(fp->pict_bufsize);
    // >> Assign the video frame with the allocated buffer
    avpicture_fill((AVPicture *)fp->vid_frame, fp->pict_buf, AV_PIX_FMT_YUV420P, width, height);
    fp->sws_ctx = sws_getContext(
        width, height, PIX_FMT_RGB24, width, height, AV_PIX_FMT_YUV420P,
        SWS_BILINEAR, NULL, NULL, NULL);
    // > Audio
    fp->aud_frame = av_frame_alloc();
    fp->aud_buf = av_frame_alloc();
    fp->aud_buf->format = fp->aud_frame->format = fp->aud_stream->codec->sample_fmt;
    fp->aud_buf->channel_layout = fp->aud_frame->channel_layout = fp->aud_stream->codec->channel_layout;
    fp->aud_buf->sample_rate = fp->aud_frame->sample_rate = fp->aud_stream->codec->sample_rate;
    if (aud_codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) {
        fp->nb_aud_samples_per_frame = 4096;
        av_log(NULL, AV_LOG_INFO, "frame_pusher: codec has variable frame size capability\n");
    } else fp->nb_aud_samples_per_frame = fp->aud_stream->codec->frame_size;
    fp->aud_buf->nb_samples = fp->aud_frame->nb_samples = fp->nb_aud_samples_per_frame;
    av_log(NULL, AV_LOG_INFO, "frame_pusher: number of samples per frame = %d\n", fp->nb_aud_samples_per_frame);
    if ((ret = av_frame_get_buffer(fp->aud_frame, 0)) < 0) return ret;
    if ((ret = av_frame_get_buffer(fp->aud_buf, 0)) < 0) return ret;
    // >> The audio resampling context
    fp->swr_ctx = swr_alloc();
    if (!fp->swr_ctx) {
        av_log(NULL, AV_LOG_ERROR, "frame_pusher: Cannot initialize audio resampling library"
            "(possibly caused by insufficient memory)\n");
        return AVERROR_UNKNOWN;
    }
    av_opt_set_channel_layout(fp->swr_ctx, "in_channel_layout", fp->aud_stream->codec->channel_layout, 0);
    av_opt_set_channel_layout(fp->swr_ctx, "out_channel_layout", fp->aud_stream->codec->channel_layout, 0);
    av_opt_set_int(fp->swr_ctx, "in_sample_rate", fp->aud_stream->codec->sample_rate, 0);
    av_opt_set_int(fp->swr_ctx, "out_sample_rate", fp->aud_stream->codec->sample_rate, 0);
    av_opt_set_sample_fmt(fp->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
    av_opt_set_sample_fmt(fp->swr_ctx, "out_sample_fmt", fp->aud_stream->codec->sample_fmt, 0);
    if ((ret = swr_init(fp->swr_ctx)) < 0) return ret;

    *o_fp = fp;
    return 0;
}