Пример #1
0
bool OutputFormat::open(QString fileName,
                        QMap<QString, OutputParams> outputParams,
                        QVariantMap outputOptions)
{
    if (fileName.isEmpty())
        return false;

    if (this->isOpen())
        return !this->isOpen();

    AVFormatContext *outputContext = NULL;

    if (outputOptions.contains("f"))
        avformat_alloc_output_context2(&outputContext,
                                       NULL,
                                       outputOptions["f"].toString().toStdString().c_str(),
                                       fileName.toStdString().c_str());
    else
        avformat_alloc_output_context2(&outputContext,
                                       NULL,
                                       NULL,
                                       fileName.toStdString().c_str());

    this->m_isOpen = false;

    if (!outputContext)
        return this->m_isOpen;

    this->m_outputContext = FormatContextPtr(outputContext, CustomDeleters::deleteFormatContext);
    this->m_streams.clear();

    foreach (QString input, outputParams.keys())
        if (!this->addStream(input, outputParams[input]))
        {
            this->m_streams.clear();

            return this->m_isOpen;
        }

    av_dump_format(this->m_outputContext.data(),
                   0,
                   fileName.toStdString().c_str(),
                   1);

    if (!(this->m_outputContext->oformat->flags & AVFMT_NOFILE))
        if (avio_open(&this->m_outputContext->pb,
                      fileName.toStdString().c_str(),
                      AVIO_FLAG_WRITE) < 0)
            return this->m_isOpen;

    if (avformat_write_header(this->m_outputContext.data(), NULL) < 0)
        return this->m_isOpen;

    return this->m_isOpen = true;
}
Пример #2
0
int CAudioMuxer::CreateOutputContext(const char* pFileName, int nBitrate, int nSampleRate, int nChannels)
{
	// avoid re-creating the context
	if (m_pCtx) {
		return E_HANDLED;
	}

	/* allocate the output media context */
	avformat_alloc_output_context2(&m_pCtx, NULL, NULL, pFileName);
	if (!m_pCtx) {
		Log("Could not deduce output format from file extension: using MPEG.\n");
		avformat_alloc_output_context2(&m_pCtx, NULL, "mpeg", pFileName);
		if (!m_pCtx) {
			return E_FAIL;
		}
	}
	m_pFmt = m_pCtx->oformat;

	// set the input bitrate, sample rate and channels
	m_nBitrate    = nBitrate;
	m_nSampleRate = nSampleRate;
	m_nChannels   = nChannels;

	/* Add the audio and video streams using the default format codecs
	 * and initialize the codecs. */
	AVCodec *pAudioCodec;
	m_pAudioStream = NULL;

	AVCodecID eCodecID = m_pFmt->audio_codec;
	if (eCodecID != AV_CODEC_ID_NONE) {
		m_pAudioStream = AddAudioStream(&pAudioCodec, eCodecID);
	}

	/* Now that all the parameters are set, we can open the audio and
	 * video codecs and allocate the necessary encode buffers. */
	if (m_pAudioStream) {
		OpenAudio(pAudioCodec);
	}

	/* open the output file, if needed */
	if (!(m_pFmt->flags & AVFMT_NOFILE)) {
		if (avio_open(&m_pCtx->pb, pFileName, AVIO_FLAG_WRITE) < 0) {
			Log("Could not open '%s'\n", pFileName);
			return E_IO;
		}
	}

	return S_OK;
}
Пример #3
0
OutputContext* output_context_new (const char * filename, const AVFrame * input_frame) {
    int ok = 0;

    // prepare muxer
    AVFormatContext * pfc = NULL;
    avformat_alloc_output_context2(&pfc, NULL, NULL, filename);
    if (!pfc) {
        goto failed;
    }

    // prepare encoding stream
    AVStream * pst = avformat_new_stream(pfc, NULL);
    if (!pst) {
        goto close_muxer;
    }

    // find encoder
    enum AVCodecID codec_id = av_guess_codec(pfc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO);
    if (codec_id == AV_CODEC_ID_NONE) {
        goto close_muxer;
    }
    AVCodec * pc = avcodec_find_encoder(codec_id);
    if (!pc) {
        goto close_muxer;
    }

    // prepare encoder
    AVCodecContext * pcc = avcodec_alloc_context3(pc);
    pcc->pix_fmt = pc->pix_fmts[0];
    pcc->codec_id = pc->id;
    pcc->codec_type = pc->type;
    pcc->time_base.num = 1;
    pcc->time_base.den = 1;
    pcc->width = input_frame->width;
    pcc->height = input_frame->height;
    ok = avcodec_open2(pcc, pc, NULL);
    if (ok != 0) {
        goto free_encoder;
    }

    ok = avcodec_parameters_from_context(pst->codecpar, pcc);
    if (ok < 0) {
        goto free_encoder;
    }

    OutputContext * context = malloc(sizeof(OutputContext));
    context->format_context = pfc;
    context->stream = pst;
    context->codec = pc;
    context->codec_context = pcc;

    return context;

free_encoder:
    avcodec_free_context(&pcc);
close_muxer:
    avformat_free_context(pfc);
failed:
    return NULL;
}
Пример #4
0
int H264BS2Video::openVideoFile(const char* videofile, int width, int height, int fps, int gop, int bitrate)
{
    int ret = 0;

    av_register_all(); // 注册协议,等

    avformat_alloc_output_context2(&m_outfctx, NULL, NULL, videofile);

    m_outstream = avformat_new_stream(m_outfctx, NULL);
    if (!m_outstream)
    {
        debug("avformat_new_stream failed.\n");
        return -1;
    }

    // 注:使用以下参数,无法生成正常的mp4
    if (m_outfctx->oformat->flags & AVFMT_GLOBALHEADER)
    m_outstream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

    m_outstream->codec->codec_id = AV_CODEC_ID_H264;
    m_outstream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    m_outstream->codec->bit_rate = bitrate;
    m_outstream->codec->width = width;
    m_outstream->codec->height = height;

    m_outstream->time_base.num = 1;
    m_outstream->time_base.den = fps;
    //m_outstream->codec->time_base.num = 1;
    //m_outstream->codec->time_base.den = fps;
    m_outstream->codec->gop_size = gop;
    m_outstream->codec->pix_fmt = AV_PIX_FMT_YUV420P;
    m_outstream->codec->max_b_frames = 0;
    m_outstream->r_frame_rate.den = 1;
    m_outstream->r_frame_rate.num = fps;

    if (!(m_outfctx->flags & AVFMT_NOFILE))
    {
        if (avio_open(&m_outfctx->pb,videofile,AVIO_FLAG_WRITE)<0)
        {
            debug("avio_open failed.\n");
            return -1;
        }
    }
    if (!m_outfctx->nb_streams)
    {
        debug("nb_streams failed.\n");
        return -1;
    }

    ret = avformat_write_header(m_outfctx, NULL);
    if (ret < 0)
    {
        debug("avformat_write_header failed %d\n", ret);
        return -1;
    }

    m_isfile = 1;

    return 0;
}
Пример #5
0
int GifWriter::init(AVFrame *frame)
{
	int ret = -1;

	mEncpkt.data = nullptr;
	mEncpkt.size = 0;

	avformat_alloc_output_context2(&mFmtCtxPtr, NULL, "gif", mOutputFile.c_str());
	if (!mFmtCtxPtr) {
		LOGE("Alloc format Context failed");
		goto end;
	}

	LOGD("format: %s", mFmtCtxPtr->oformat->name);

	mStreamPtr = avformat_new_stream(mFmtCtxPtr, NULL);
	if (!mStreamPtr) {
		LOGE("Failed allocating output stream");
        goto end;
	}

	mCodecCtxPtr             = mStreamPtr->codec;
	mCodecCtxPtr->width      = frame->width;
	mCodecCtxPtr->height     = frame->height;
	mCodecCtxPtr->pix_fmt    = AV_PIX_FMT_RGB8;
	mCodecCtxPtr->codec_id   = AV_CODEC_ID_GIF;
	mCodecCtxPtr->codec_type = AVMEDIA_TYPE_VIDEO;
	mCodecCtxPtr->time_base  = (AVRational){1, 25};

	mCodecPtr = avcodec_find_encoder(mCodecCtxPtr->codec_id);
	if (!mCodecPtr) {
		LOGE("Can't find codec");
		goto end;
	}

	if (avcodec_open2(mCodecCtxPtr, mCodecPtr, nullptr) < 0) {
		LOGE("Can't open codec");
		goto end;
	}

	if (mFmtCtxPtr->oformat->flags & AVFMT_NOFILE) {
		LOGE("AVFMT_NOFILE");
		goto end;
	}

	ret = avio_open(&mFmtCtxPtr->pb, mOutputFile.c_str(), AVIO_FLAG_WRITE);
	if (ret < 0) {
		LOGE("avio_open failed");
		goto end;
	}

	ret = avformat_write_header(mFmtCtxPtr, NULL);
	if (ret < 0) {
		LOGE("Writer header failed");
		goto end;
	}

end:
	return ret;
}
Пример #6
0
    /**
     * Create context.
     */
    bool create(const StString& theFile) {
        if(myFormat == NULL) {
            return false;
        }

    #if !defined(ST_LIBAV_FORK)
        avformat_alloc_output_context2(&Context, myFormat, NULL, theFile.toCString());
    #else
        Context = avformat_alloc_context();
        if(Context == NULL) {
            return false;
        }

        Context->oformat = myFormat;
        if(Context->oformat->priv_data_size > 0) {
            Context->priv_data = av_mallocz(Context->oformat->priv_data_size);
            if(!Context->priv_data) {
                //goto nomem;
            }
            if(Context->oformat->priv_class) {
                *(const AVClass**)Context->priv_data = Context->oformat->priv_class;
                //av_opt_set_defaults(aCtxOut->priv_data);
            }
        } else {
            Context->priv_data = NULL;
        }

        const size_t aStrLen = stMin(theFile.Size + 1, size_t(1024));
        stMemCpy(Context->filename, theFile.toCString(), aStrLen);
        Context->filename[1023] = '\0';
    #endif
        return Context != NULL;
    }
AVFormatContextPtr dt_av_create_output_format_context(const char * _FormatName, const char * _Filename)
{
    AVOutputFormat * outputFormat = dt_av_guess_format(_FormatName, NULL);
    
    AVFormatContext * outputCtx = NULL;
    int dt_err = avformat_alloc_output_context2(&outputCtx, outputFormat, NULL, _Filename);
    FF_CHECK_ERR(dt_err);

    if (FF_ERR(dt_err))
    {
        //// TODO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1 Error
        DT_THROW_AVERROR( errors::create_file() , dt_err, "avformat_alloc_output_context2");
        return AVFormatContextPtr((AVFormatContext*)NULL);
    }

    AVFormatContextPtr outputCtxPtr( outputCtx, details::_AVOutputFormatContextDestruct() );

    if (!(outputCtx->oformat->flags & AVFMT_NOFILE)) 
    {
        dt_err = avio_open(&outputCtx->pb, _Filename, AVIO_FLAG_WRITE);
        if (FF_ERR(dt_err))
        {
            //// TODO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1 Error
            DT_THROW_AVERROR( errors::create_file() , dt_err, "avformat_alloc_output_context2");
            return AVFormatContextPtr((AVFormatContext*)NULL);
        }
    }

    return outputCtxPtr;
}
Пример #8
0
int video_combine(char* filelist[],int fileCnt)
{
	char* outfilename=filelist[fileCnt-1];
	videoFile outVideo;
	videoFile inVideo;
	open_video(filelist[1],&inVideo);
	avformat_alloc_output_context2(&(outVideo.pFormatCtx), NULL, NULL, outfilename);
	AVFormatContext* pFormatCtx=outVideo.pFormatCtx;
	init_output(&inVideo,outfilename,&outVideo);
	av_dump_format(outVideo.pFormatCtx,0,outfilename,1);
    if (avio_open(&(pFormatCtx->pb),outfilename, AVIO_FLAG_READ_WRITE) < 0)  
    {  
        printf("输出文件打开失败");
        return -1; 
    }
	
	if(avformat_write_header(pFormatCtx,NULL)<0){fprintf(stderr,"写入文件头失败\n");return -1;}
	
	int i=0;
	for(i=1;i<fileCnt-1;i++)
	{	
		open_video(filelist[i],&inVideo);
		write_video2(&inVideo,&outVideo);
	}
	
	if(av_write_trailer(pFormatCtx)!=0){ERROR("写入文件尾失败\n");}
	avio_close(pFormatCtx->pb);
	avformat_free_context(pFormatCtx);
	return 0;
}
Пример #9
0
static int chunk_mux_init(AVFormatContext *s)
{
    WebMChunkContext *wc = s->priv_data;
    AVFormatContext *oc;
    int ret;

    ret = avformat_alloc_output_context2(&wc->avf, wc->oformat, NULL, NULL);
    if (ret < 0)
        return ret;
    oc = wc->avf;

    oc->interrupt_callback = s->interrupt_callback;
    oc->max_delay          = s->max_delay;
    av_dict_copy(&oc->metadata, s->metadata, 0);

    *(const AVClass**)oc->priv_data = oc->oformat->priv_class;
    av_opt_set_defaults(oc->priv_data);
    av_opt_set_int(oc->priv_data, "dash", 1, 0);
    av_opt_set_int(oc->priv_data, "cluster_time_limit", wc->chunk_duration, 0);
    av_opt_set_int(oc->priv_data, "live", 1, 0);

    oc->streams = s->streams;
    oc->nb_streams = s->nb_streams;

    return 0;
}
JNIEXPORT jint JNICALL Java_com_jiuan_it_ipc_utils_RtspRecordVideo_start(JNIEnv *env,jobject ob,jstring str)
{
	int ret = -1;

	char *inFilePath = NULL;
	inFilePath = (*env)->GetStringUTFChars(env, str, NULL);
	obj = (AV_RecordVideo_Content *)malloc(sizeof(AV_RecordVideo_Content));

	obj->o_fmt_ctx = NULL;
	obj->o_video_stream = NULL;
	obj->last_pts = 0;
	obj->last_dts = 0;

	avcodec_register_all();
	av_register_all();
	int re = avformat_alloc_output_context2(&obj->o_fmt_ctx, NULL, NULL, inFilePath); //初始化输出文件
	if (!obj->o_fmt_ctx || re < 0) {
		return -1;
	}

	obj->o_video_stream = avformat_new_stream(obj->o_fmt_ctx, NULL);
	{
		AVCodecContext *c;
		c = obj->o_video_stream->codec;
		c->bit_rate = 400000; //90000
		c->codec_id = AV_CODEC_ID_H264;
		c->codec_type = AVMEDIA_TYPE_VIDEO;
		c->time_base.num = 1;
		c->time_base.den = 90000; //90000
		c->width = 1280;
		c->height = 720;
		c->pix_fmt = AV_PIX_FMT_YUV420P;
		c->flags = 0;
		c->flags |= CODEC_FLAG_GLOBAL_HEADER;
		c->me_range = 0;
		c->max_qdiff = 3;
		c->gop_size = 12;
		c->qmin = 2;
		c->qmax = 31;
		c->qcompress = 0.50000000;
	}

    avio_open(&obj->o_fmt_ctx->pb, inFilePath, AVIO_FLAG_WRITE);
    //avformat_write_header(obj->o_fmt_ctx, NULL);

    obj->timebase_1MHz.num = 1;
    obj->timebase_1MHz.den = 1000000;

    obj->timebase_9000Hz.num = 1;
    obj->timebase_9000Hz.den = 90000;

    obj->last_pts = 50000;
    obj->last_dts = 50000;

    obj->IsFirst = 1;

	return 0;
}
Пример #11
0
/* slightly difference scanning function here so can't re-use lookup_default */
struct codec_ent encode_getcontainer(const char* const requested,
	int dst, const char* remote)
{
	AVFormatContext* ctx;
	struct codec_ent res = {0};

	if (requested && strcmp(requested, "stream") == 0){
		res.storage.container.format = av_guess_format("flv", NULL, NULL);

		if (!res.storage.container.format)
			LOG("(encode) couldn't setup streaming output.\n");
		else {
			ctx = avformat_alloc_context();
			ctx->oformat = res.storage.container.format;
			res.storage.container.context = ctx;
			res.setup.muxer = default_format_setup;
			int rv = avio_open2(&ctx->pb, remote, AVIO_FLAG_WRITE, NULL, NULL);
			LOG("(encode) attempting to open: %s, result: %d\n", remote, rv);
		}

		return res;
	}

	if (requested)
		res.storage.container.format = av_guess_format(requested, NULL, NULL);

	if (!res.storage.container.format){
		LOG("(encode) couldn't find a suitable container matching (%s),"
			"	reverting to matroska (MKV)\n", requested);
		res.storage.container.format = av_guess_format("matroska", NULL, NULL);
	} else
		LOG("(encode) requested container (%s) found.\n", requested);

/* no stream, nothing requested that matched and default didn't work.
 * Give up and cascade. */
	if (!res.storage.container.format){
		LOG("(encode) couldn't find a suitable container.\n");
		return res;
	}

	avformat_alloc_output_context2(&ctx, res.storage.container.format,
	NULL, NULL);

/*
 * Since there's no sane way for us to just pass a file descriptor and
 * not be limited to pipe behaviors, we have to provide an entire
 * custom avio class..
 */
	int* fdbuf = malloc(sizeof(int));
	*fdbuf = dst;
	ctx->pb = avio_alloc_context(av_malloc(4096), 4096, 1, fdbuf, fdr, fdw, fds);

	res.storage.container.context = ctx;
	res.setup.muxer = default_format_setup;

	return res;
}
Пример #12
0
AVFormatContext* avFormatContextForOutputPath(const char *path, const char *formatName){
    AVFormatContext *outputFormatContext;
    LOGI("avFormatContextForOutputPath format: %s path: %s", formatName, path);
    int openOutputValue = avformat_alloc_output_context2(&outputFormatContext, NULL, formatName, path);
    if (openOutputValue < 0) {
        avformat_free_context(outputFormatContext);
    }
    return outputFormatContext;
}
int32_t CEncoder::enc_open(void)
{
	avformat_alloc_output_context2(&oc, NULL, format_name, filename);
	if (oc == NULL)
	{
		av_log(NULL, AV_LOG_ERROR, "Could not find suitable output format:%s(%s)\n", format_name, filename);
		return -1;
	}

	fmt = oc->oformat;
	oc->max_delay = (int32_t)(mux_max_delay * AV_TIME_BASE);
	if (packet_size != NO_VALUE) oc->packet_size = packet_size;
	strcpy(oc->filename, filename);

	if (g_enc_opt.m_VideoDisable == 0)
	{
		if ((video_codec_id == CODEC_ID_NONE) && (video_codec_name == NULL))
		{
			video_codec_id = fmt->video_codec;
		}
		video_st = add_video_stream(video_codec_id, video_codec_name);
		if (video_st == NULL) return -1;
	}
	if (g_enc_opt.m_AudioDisable == 0)
	{
		if ((audio_codec_id == CODEC_ID_NONE) && (audio_codec_name == NULL))
		{
			audio_codec_id = fmt->audio_codec;
		}
		audio_st = add_audio_stream(audio_codec_id, audio_codec_name);
		if (audio_st == NULL) return -1;
	}

	if (!(oc->oformat->flags & AVFMT_NOSTREAMS))
	{
		av_dump_format(oc, 0, filename, 1);
	}

	/* open the output file, if needed */
	if (!(oc->oformat->flags & AVFMT_NOFILE))
	{
		if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0)
		{
			av_log(NULL, AV_LOG_ERROR, "Could not open '%s'\n", filename);
			return -8;
		}
	}

	/* write the stream header, if any */
	if (avformat_write_header(oc, NULL) != 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Failed to av_write_header !\n");
		return -9;
	}

	return 0;
}
Пример #14
0
////////////////////////////////////////////////////////////////
// ビデオキャプチャ開始
//
// 引数:	filename	出力ファイル名
//			sw			スクリーンの幅
//			sh			スクリーンの高さ
//			vrate		フレームレート(fps)
//			arate		音声サンプリングレート(Hz)
//			bpp			色深度(16,24,32)
// 返値:	bool		true:成功 false:失敗
////////////////////////////////////////////////////////////////
bool AVI6::StartAVI( const char *filename, int sw, int sh, int vrate, int arate, int bpp )
{
#ifndef NOAVI
	cCritical::Lock();
	Init();
	
	ABPP = bpp;

	// オーディオバッファ作成
	ABuf.InitBuffer( arate / vrate * 2 );
	
	// 出力コンテキスト作成
	avformat_alloc_output_context2(&oc, NULL, NULL, filename);
	if (!oc) return false;

	fmt = oc->oformat;

	// 音声、ビデオストリームを作成
	if (fmt->video_codec != AV_CODEC_ID_NONE) {
		// ビデオコーデックにVP9を選択されると画像が崩れるため、暫定措置として強制的にVP8にする。
		fmt->video_codec = AV_CODEC_ID_VP8;
		AddStream(&video_st, oc, &video_codec, fmt->video_codec, sw, sh);
	}
	if (fmt->audio_codec != AV_CODEC_ID_NONE) {
		// オーディオコーデックにOPUSを選択されると落ちるため、暫定措置として強制的にVORBISにする。
		fmt->audio_codec = AV_CODEC_ID_VORBIS;
		AddStream(&audio_st, oc, &audio_codec, fmt->audio_codec, sw, sh);
	}

	OpenVideo(oc, video_codec, &video_st, opt);
	OpenAudio(oc, audio_codec, &audio_st, opt, arate);

	av_dump_format(oc, 0, filename, 1);

	int ret = 0;
	// ファイルを開く
	if (!(fmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
		if (0 > ret) {
			return false;
		}
	}

	// ストリームヘッダを書き込み
	ret = avformat_write_header(oc, &opt);
	if (0 > ret) {
		return false;
	}

	isAVI = true;
	cCritical::UnLock();
	return true;
#else
	return false;
#endif
}
Пример #15
0
int init_output(OUTPUT_CONTEXT *ptr_output_ctx, char* output_file ,INPUT_CONTEXT *ptr_input_ctx){

	//set AVOutputFormat
    /* allocate the output media context */
	printf("output_file = %s \n" ,output_file);
    avformat_alloc_output_context2(&ptr_output_ctx->ptr_format_ctx, NULL, NULL, output_file);
    if (ptr_output_ctx->ptr_format_ctx == NULL) {
        printf("Could not deduce[推断] output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&ptr_output_ctx->ptr_format_ctx, NULL, "mpeg", output_file);
        if(ptr_output_ctx->ptr_format_ctx == NULL){
        	 printf("Could not find suitable output format\n");
        	 exit(NOT_GUESS_OUT_FORMAT);
        }
    }
    //in here ,if I get AVOutputFormat succeed ,the filed audio_codec and video_codec will be set default.
    ptr_output_ctx->fmt = ptr_output_ctx->ptr_format_ctx->oformat;


    /* add audio stream and video stream 	*/
    ptr_output_ctx->audio_stream = NULL;

//    ptr_output_ctx->audio_codec_id = CODEC_ID_MP2; //aac
    ptr_output_ctx->audio_codec_id = AV_CODEC_ID_PCM_S16LE; //pcm

    if (ptr_output_ctx->fmt->audio_codec != CODEC_ID_NONE) {

    	ptr_output_ctx->audio_stream = add_audio_stream(ptr_output_ctx->ptr_format_ctx, ptr_output_ctx->audio_codec_id ,ptr_input_ctx);
    	if(ptr_output_ctx->audio_stream == NULL){
    		printf(".in output ,add audio stream failed \n");
    		exit(ADD_AUDIO_STREAM_FAIL);
    	}
    }



    /*	init some member value */
    ptr_output_ctx->audio_resample = 0;
    ptr_output_ctx->swr = NULL;

    /*output the file information */
    av_dump_format(ptr_output_ctx->ptr_format_ctx, 0, output_file, 1);

}
Пример #16
0
int avdevice_list_output_sinks(AVOutputFormat *device, const char *device_name,
                               AVDictionary *device_options, AVDeviceInfoList **device_list)
{
    AVFormatContext *s = NULL;
    int ret;

    if ((ret = avformat_alloc_output_context2(&s, device, device_name, NULL)) < 0)
        return ret;
    return list_devices_for_context(s, device_options, device_list);
}
Пример #17
0
bool AVMuxer::open()
{
    // avformatcontext will be allocated in avformat_alloc_output_context2()
    //d->format_ctx->interrupt_callback = *d->interrupt_hanlder;

    d->applyOptionsForDict();
    // check special dict keys
    // d->format_forced can be set from AVFormatContext.format_whitelist
    if (!d->format_forced.isEmpty()) {
        d->format = av_guess_format(d->format_forced.toUtf8().constData(), NULL, NULL);
        qDebug() << "force format: " << d->format_forced;
    }

    //d->interrupt_hanlder->begin(InterruptHandler::Open);
    if (d->io) {
        if (d->io->accessMode() == MediaIO::Read) {
            qWarning("wrong MediaIO accessMode. MUST be Write");
        }
        AV_ENSURE_OK(avformat_alloc_output_context2(&d->format_ctx, d->format, d->format_forced.isEmpty() ? 0 : d->format_forced.toUtf8().constData(), ""), false);
        d->format_ctx->pb = (AVIOContext*)d->io->avioContext();
        d->format_ctx->flags |= AVFMT_FLAG_CUSTOM_IO;
        //d->format_ctx->flags |= AVFMT_FLAG_GENPTS;
    } else {
        AV_ENSURE_OK(avformat_alloc_output_context2(&d->format_ctx, d->format, d->format_forced.isEmpty() ? 0 : d->format_forced.toUtf8().constData(), fileName().toUtf8().constData()), false);
    }
    //d->interrupt_hanlder->end();

    if (!d->prepareStreams()) {
        return false;
    }
    // TODO: AVFMT_NOFILE ? examples/muxing.c only check AVFMT_NOFILE
    // a custome io does not need avio_open. it open resource in it's own way, e.g. QIODevice.open
    if (!(d->format_ctx->oformat->flags & AVFMT_NOFILE) && !(d->format_ctx->flags & AVFMT_FLAG_CUSTOM_IO)) {
        // avio_open2?
        AV_ENSURE_OK(avio_open(&d->format_ctx->pb, fileName().toUtf8().constData(), AVIO_FLAG_WRITE), false);
    }
    // d->format_ctx->start_time_realtime
    AV_ENSURE_OK(avformat_write_header(d->format_ctx, &d->dict), false);
    d->started = false;

    return true;
}
Пример #18
0
void ffmpegwrite::enter(const std::vector<sgstreamspec*> &in,const std::vector<std::string> &out)
{
	int error;
	av_register_all();
	avdevice_register_all();
	if (in.size()!=1 || out.size()!=0)
		throw(sgraph::sgraphStreamException("outstreams specified"));
	if (this->outformat=="")
		avformat_alloc_output_context2(&this->form_context, NULL, NULL, this->outsink.c_str());
	else
		avformat_alloc_output_context2(&this->form_context, NULL, this->outformat.c_str(), this->outsink.c_str());
	//this->cod_context = this->form_context->streams[video_stream_index]->codec;
	//av_opt_set_int(this->codec, "refcounted_frames", 1, 0);
	/* init the video decoder */



	error = avformat_write_header(this->form_context, NULL);
	if (error < 0) {
		throw(sgraph::sgraphStreamException("Error occurred when opening output file"));
	}
}
Пример #19
0
void Parser::CreateContext()
{
    int error = 0;

    error = avformat_alloc_output_context2(&fmt_ctx_out, NULL,
                        audioFormatList[audioFormat].c_str(), fileName.c_str());

    if (error < 0)
        throw ContextCreatorException() << errno_code(MIR_ERR_BADALLOC_CONTEXT);

    if(audioFormat == AUDIOFORMAT::raw)
        fmt_ctx_out->oformat->flags |= AVFMT_NOFILE;
}
Пример #20
0
void CVideoLivRecord::InitRecoder(LPCSTR lpFileName,LONG lWidth,LONG lHeight,INT iKeyFrameInterval,int iOnlyVideo)
{
	m_Width = lWidth;
	m_Height = lHeight;
	m_videoduriation = iKeyFrameInterval;
	m_audioduriation = iKeyFrameInterval;

	int ret = 0;
	char filename[MAX_PATH] = {0};
	memcpy(filename, lpFileName, strlen(lpFileName));
// 	strcat(filename, ".");
// 	strcat(filename, FILE_SUFFIX);

	avformat_alloc_output_context2(&m_pAVFormatContext, NULL, NULL, filename);
	if (!m_pAVFormatContext){
		log("[CVideoLivRecord::InitRecoder] -- avformat_alloc_output_context2() error");
		return ;
	}
	//video
	if (m_pAVFormatContext->oformat->video_codec != AV_CODEC_ID_NONE){
		add_stream(&m_pVideoStream, &m_pVideoCodec, m_pAVFormatContext->oformat->video_codec);
		m_bHasVideo = TRUE;
		m_bEncodeVideo = TRUE;
	}
	//audio
	if (iOnlyVideo == 0 && m_pAVFormatContext->oformat->audio_codec != AV_CODEC_ID_NONE){
		add_stream(&m_pAudioStream, &m_pAudioCodec, m_pAVFormatContext->oformat->audio_codec);
		m_bHasAudio = TRUE;
		m_bEncodeAudio = TRUE;
	}
	if (m_bHasVideo){
		open_video(m_pVideoStream, m_pVideoCodec, m_pOpt);
	}
	if (m_bHasAudio){
		open_audio(m_pAudioStream, m_pAudioCodec, m_pOpt);
	}

	if (!(m_pAVFormatContext->oformat->flags & AVFMT_NOFILE)){
		ret = avio_open(&m_pAVFormatContext->pb, filename, AVIO_FLAG_WRITE);
		if (ret < 0){
			log("[CVideoLivRecord::InitRecoder] -- avio_open() error");
			return ;
		}
	}

	ret = avformat_write_header(m_pAVFormatContext, &m_pOpt);
	if (ret < 0){
		log("[CVideoLivRecord::InitRecoder] -- avformat_write_header() error");
		return ;
	}
}
Пример #21
0
void initmp4(){
		AVCodec * c;

		avformat_alloc_output_context2(&mp4FmtCtx, NULL, "mp4", "my.mp4");
		mp4OutFmt = mp4FmtCtx->oformat;
        c = avcodec_find_encoder(AV_CODEC_ID_H264);
        if (!c){
            printf("Can not find encoder! \n");
            exit(4);
        }
		mp4Stream = avformat_new_stream(mp4FmtCtx, c);
		if(mp4Stream == NULL){
			printf("avformat_new_stream fail\n");
			exit(3);
		}
		mp4Stream->time_base = (AVRational){1,25};
		mp4Stream->codec->codec_id = AV_CODEC_ID_H264;
		mp4Stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
		mp4Stream->codec->pix_fmt = video_enc_ctx->pix_fmt;
		mp4Stream->codec->width = video_dec_ctx->width * SCALE_MULTIPLE;
		mp4Stream->codec->height = video_dec_ctx->height * SCALE_MULTIPLE;
        mp4Stream->codec->time_base = (AVRational){1,25};
		mp4Stream->codec->gop_size = video_dec_ctx->gop_size ;
		mp4Stream->codec->max_b_frames = video_dec_ctx->max_b_frames;
		mp4Stream->codec->qmin = 15;
		mp4Stream->codec->qmax = 35;
		if(mp4Stream->codec->codec_id == AV_CODEC_ID_H264){
				printf("set priv_data\n");
	   	    av_opt_set(mp4Stream->codec->priv_data, "preset", "slow", 0);
		}
printf("dump inof:\n");
        av_dump_format(mp4FmtCtx, 0, "my.pm4", 1);

        if (avcodec_open2(mp4Stream->codec, c, NULL) < 0){
            printf("Failed to open encoder! \n");
            exit(5);
         }  

		if(avio_open(&mp4FmtCtx->pb, "my.mp4", AVIO_FLAG_READ_WRITE) < 0){
			printf("avio_open my.pm4 fail\n");
			exit(3);
		}
        if(avformat_write_header(mp4FmtCtx, NULL)<0){
			printf("avformat_write_header fail\n");
			exit(3);
		}
        video_enc_ctx = mp4Stream->codec;
        
}
Пример #22
0
int H264BS2Video::openVideoMem(const char* fmt, int width, int height, int fps, int gop, int bitrate)
{
    int ret = 0;

    if (m_isfile) return 0;

    av_register_all();

    m_avio =avio_alloc_context((unsigned char *)g_szIOBuffer, IO_BUFFER_SIZE, 1,
                &m_avbuffer, NULL, writeBuffer, seekBuffer);

    // 根据传递的fmt来确定是何种封装格式
    avformat_alloc_output_context2(&m_outfctx, NULL, fmt, NULL);
    m_outfctx->pb=m_avio;
    m_outfctx->flags=AVFMT_FLAG_CUSTOM_IO;
    debug("guess format: %s(%s) flag: %d\n", m_outfctx->oformat->name, m_outfctx->oformat->long_name, m_outfctx->oformat->flags);

    m_outstream = avformat_new_stream(m_outfctx, NULL);
    if (!m_outstream)
    {
        debug("avformat_new_stream failed.\n");
        return -1;
    }
    m_outstream->codec->codec_id = AV_CODEC_ID_H264;
    m_outstream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    m_outstream->codec->bit_rate = bitrate;
    m_outstream->codec->width = width;
    m_outstream->codec->height = height;

    m_outstream->time_base.num = 1;
    m_outstream->time_base.den = fps;
    m_outstream->codec->gop_size = gop;
    m_outstream->codec->pix_fmt = AV_PIX_FMT_YUV420P;
    m_outstream->codec->max_b_frames = 0;

    m_outstream->r_frame_rate.den = 1;
    m_outstream->r_frame_rate.num = fps;

    ret = avformat_write_header(m_outfctx, NULL);
    if (ret < 0)
    {
        debug("avformat_write_header failed %d\n", ret);
        return -1;
    }

    return 0;
}
Пример #23
0
value
ffmpeg_create(value filename_)
{
  CAMLparam1(filename_);
  CAMLlocal1(ctx);

  av_register_all(); // this is fast to redo

  ctx = caml_alloc_custom(&context_ops, sizeof(struct Context), 0, 1);
  Context_val(ctx)->filename = strdup((char*) filename_);

  int ret;
  AVFormatContext* fmtCtx;
  caml_enter_blocking_section();
  ret = avformat_alloc_output_context2(&fmtCtx, NULL, NULL, (char*) filename_);
  caml_leave_blocking_section();
  raise_if_not(ret >= 0, ExnContextAlloc, ret);

  Context_val(ctx)->fmtCtx = fmtCtx;
  CAMLreturn(ctx);
}
Пример #24
0
/*视频合成主调用*/
int video_combine2(char* filelist[],int fileCnt)
{
	char* outfilename=filelist[fileCnt-1];
	videoFile outVideo;
	videoFile inVideo;
	open_video(filelist[1],&inVideo);
	avformat_alloc_output_context2(&(outVideo.pFormatCtx), NULL, NULL, outfilename);
	AVFormatContext* pFormatCtx=outVideo.pFormatCtx;
	init_output(&inVideo,outfilename,&outVideo);
	av_dump_format(outVideo.pFormatCtx,0,outfilename,1);
    if (avio_open(&(pFormatCtx->pb),outfilename, AVIO_FLAG_READ_WRITE) < 0)  
    {  
        printf("输出文件打开失败");
        return -1; 
    }
	
	if(avformat_write_header(pFormatCtx,NULL)<0){ERROR("写入文件尾失败");}
	AVFrame* frameArr[10000];
	int index[10]={0};
	int i=0;
	for(i=1;i<fileCnt-1;i++)
	{	
		open_video(filelist[i],&inVideo);
		//printf("%d\n",read_video(&inVideo,frameArr,index[i-1]));
		if(i==1){index[i]=read_video(&inVideo,frameArr,0)-1;}
		else{index[i]=read_video(&inVideo,frameArr,index[i-1]+1)+index[i-1];}
	}
	//STOP;
	//printf("%d %d %d %d\n",index[0],index[1],index[2],index[3]);
	
	write_video3(&outVideo,&inVideo,frameArr,index,fileCnt-2);
	
	if(av_write_trailer(pFormatCtx)!=0){ERROR("写入文件尾失败");}
	avio_close(pFormatCtx->pb);
	avformat_free_context(pFormatCtx);
}
Пример #25
0
static int ffmpeg_mux_init_context(struct ffmpeg_mux *ffm)
{
	AVOutputFormat *output_format;
	int ret;

	output_format = av_guess_format(NULL, ffm->params.file, NULL);
	if (output_format == NULL) {
		printf("Couldn't find an appropriate muxer for '%s'\n",
				ffm->params.file);
		return FFM_ERROR;
	}

	ret = avformat_alloc_output_context2(&ffm->output, output_format,
			NULL, NULL);
	if (ret < 0) {
		printf("Couldn't initialize output context: %s\n",
				av_err2str(ret));
		return FFM_ERROR;
	}

	ffm->output->oformat->video_codec = AV_CODEC_ID_NONE;
	ffm->output->oformat->audio_codec = AV_CODEC_ID_NONE;

	if (!init_streams(ffm)) {
		free_avformat(ffm);
		return FFM_ERROR;
	}

	ret = open_output_file(ffm);
	if (ret != FFM_SUCCESS) {
		free_avformat(ffm);
		return ret;
	}

	return FFM_SUCCESS;
}
Пример #26
0
static int init_encoder(struct liveStream *ctx, const char* oname)
{
	int ret = 0;
	char arr_string[128] = "";
	AVOutputFormat *fmt;
	AVCodec *video_codec = NULL;
	AVStream *vst = NULL;
	AVFormatContext *loc;

	/* allocate the output media context */
	avformat_alloc_output_context2(&ctx->oc, NULL, "flv", oname);
	if (!ctx->oc)
	{
		av_log(NULL, AV_LOG_ERROR, "Could not deduce output format\n");
		ret = -1;
		goto end;
	}
	//save output context in local context
	loc = ctx->oc;

	fmt = loc->oformat;
	if (fmt->video_codec != AV_CODEC_ID_NONE)
	{
		vst = add_webcam_stream(ctx, &video_codec, fmt->video_codec);
	}
	if (!vst)
	{
		ret = -1;
		goto end;
	}

	if(vst)
	{
		/* open the codec */
		ret = avcodec_open2(vst->codec, video_codec, NULL);
		if (ret < 0)
		{
			av_log(NULL, AV_LOG_ERROR, "Could not open video codec: %s\n",
					av_make_error_string(arr_string, 128, ret));
			ret = -1;
			goto end;
		}
	}

	/* open the output file, if needed */
	if (!(fmt->flags & AVFMT_NOFILE))
	{
		ret = avio_open(&loc->pb, oname, AVIO_FLAG_WRITE);
		if (ret < 0)
		{
			av_log(NULL, AV_LOG_ERROR, "Could not open '%s': %s\n", oname,
					av_make_error_string(arr_string, 128, ret));
			ret = -1;
			goto end;
		}
	}
	av_dump_format(loc, 0, "Output", 1);
	/* Write the stream header, if any. */
	ret = avformat_write_header(loc, NULL);
	if (ret < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Error occurred when writing header: %s\n",
				av_make_error_string(arr_string, 128, ret));
		ret = -1;
		goto end;
	}

end:
        if(ret < 0)  
		dinit_encoder(&ctx->oc);
	return ret;
}
Пример #27
0
int main (int argc, char **argv){
    int ret = 0, got_frame;
    AVFormatContext *ofmt_ctx = NULL;
    AVOutputFormat *ofmt = NULL;
    


    uint8_t *sample_buf;
    
    
    if (argc != 4 && argc != 5) {
        fprintf(stderr, "input  1.source file:%s\n"
                "2.output_video\n"
                "3.output_audio\n"
                "4.mux video file(Optional)\n"
                "\n", argv[0]);
        exit(1);
    }
    
    src_filename = argv[1];
    video_dst_filename = argv[2];
    audio_dst_filename = argv[3];
    //optional mux to any type video
    if(argc == 5){
        out_filename = argv[4];
    }
    
    /* register all formats and codecs */
    av_register_all();
    //for network stream
    avformat_network_init();
    
    ret = init_input();
    if(ret){
        goto end;
    }


    ret = init_video_out_context();
    if(ret){
        goto end;
    }


    ret = init_audio_out_context(sample_buf);
    if(ret){
        goto end;
    }else{
        int aud_buffer_size;
        //alloc frame and packet
        AudFrame = av_frame_alloc();
        AudFrame->nb_samples     = AudCodecCtx->frame_size;
        AudFrame->format         = AudCodecCtx->sample_fmt;
        AudFrame->channel_layout = AudCodecCtx->channel_layout;
        
        aud_buffer_size = av_samples_get_buffer_size(NULL, AudCodecCtx->channels,AudCodecCtx->frame_size,AudCodecCtx->sample_fmt, 1);
        sample_buf = (uint8_t *)av_malloc(aud_buffer_size);
        avcodec_fill_audio_frame(AudFrame, AudCodecCtx->channels, AudCodecCtx->sample_fmt,(const uint8_t*)sample_buf, aud_buffer_size, 1);
        av_new_packet(&AudPkt,aud_buffer_size);
    }
    
    
    if(argc == 5){
        //alloc memory
        avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
        if (!ofmt_ctx) {
            printf( "Could not create output context\n");
            ret = AVERROR_UNKNOWN;
            return 1;
        }
        ofmt = ofmt_ctx->oformat;

        ret = init_output(ofmt_ctx);
        if(ret){
            printf("Init output ERROR\n");
            goto end;
        }
    }
    
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            printf( "Could not open output file '%s'", out_filename);
            goto end;
        }
    }

    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        printf( "Error occurred when opening output file\n");
        goto end;
    }
    
    //this will fill up by decoder(|read frame|->packet->|decoder|->frame)
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }
    
    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
    
    
    //Write video Header
    avformat_write_header(pFormatCtx,NULL);
    //Write audio Header
    avformat_write_header(AudFormatCtx,NULL);
    
    //alloc packet to get copy from pkt
    av_new_packet(&epkt,picture_size);
    
    /*setup the convert parameter
     *due to input sample format AV_SAMPLE_FMT_FLTP
     *can't be converted to AV_SAMPLE_FMT_S16
     *which only accepted by the aac encoder
     */
    swr = swr_alloc();
    av_opt_set_int(swr, "in_channel_layout",  audio_dec_ctx->channel_layout, 0);
    av_opt_set_int(swr, "out_channel_layout", AudCodecCtx->channel_layout,  0);
    av_opt_set_int(swr, "in_sample_rate",     audio_dec_ctx->sample_rate, 0);
    av_opt_set_int(swr, "out_sample_rate",    AudCodecCtx->sample_rate, 0);
    av_opt_set_sample_fmt(swr, "in_sample_fmt",  AV_SAMPLE_FMT_FLTP, 0);
    av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16,  0);
    swr_init(swr);
    
    
    
    
    /*start read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        //do demux & decode -> encode -> output h264 & aac file
        ret = decode_packet();

        if (ret < 0)
            break;
        if(argc == 5){
            remux_packet(ofmt_ctx,&pkt);
        }
        
        av_free_packet(&pkt);
    }
    
    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    
    
    //Flush Encoder
    int retfe = flush_encoder(pFormatCtx,0);
    if (retfe < 0) {
        printf("Flushing encoder failed\n");
        return -1;
    }
    
    //Flush Encoder
    ret = flush_encoder(pFormatCtx,0);
    if (ret < 0) {
        printf("Flushing encoder failed\n");
        return -1;
    }
    
    //Write video trailer
    av_write_trailer(pFormatCtx);
    
    //Write audio Trailer
    av_write_trailer(AudFormatCtx);
    
    //Write remux Trailer
    if(argc == 5){
        av_write_trailer(ofmt_ctx);
    }
    
    
    printf("Output succeeded!!!!\n");
    
    
    
    
    
    
    
    
    
end:
    //free remux
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    
    //free audio
    if (audio_st){
        avcodec_close(audio_st->codec);
        av_free(AudFrame);
        av_free(sample_buf);
    }
    avio_close(AudFormatCtx->pb);
    avformat_free_context(AudFormatCtx);
    
    //free video
    if (video_st){
        avcodec_close(video_st->codec);
        av_free(pFrame);
        av_free(picture_buf);
    }
    avio_close(pFormatCtx->pb);  
    avformat_free_context(pFormatCtx);
    
    //free decode
    avcodec_close(video_dec_ctx);
    avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);    
    av_frame_free(&frame);
    return ret < 0;
}
Пример #28
0
int open_output_file()
{
	AVStream *outStream = NULL;
	AVStream *inStream = NULL;
	AVCodecContext *decCtx = NULL, *encCtx = NULL;
	AVOutputFormat *ofmt = NULL;
	AVCodec *encoder = NULL;
	int ret;
	int streamIdx = 0;
	unsigned int i;

	avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, file_out);
	if (!ofmt_ctx)
	{
		av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
		return AVERROR_UNKNOWN;
	}

	ofmt = ofmt_ctx->oformat;
	ofmt->video_codec = CODEC;

	if (ofmt->video_codec != AV_CODEC_ID_NONE)
		outStream = add_stream(inStream, ofmt->video_codec, &encoder);

	if (outStream)
	{
		encCtx = outStream->codec;
		ret = avcodec_open2(encCtx, encoder, NULL);
		if (ret < 0)
		{
			av_log(NULL, AV_LOG_ERROR, "Could not open video codec\n");
			return ret;

		}
	}

	av_dump_format(ofmt_ctx, 0, file_out, 1);

	if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
	{
		ret = avio_open(&ofmt_ctx->pb, file_out, AVIO_FLAG_WRITE);
		if (ret < 0)
		{
			av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", file_out);
			return ret;
		}
	}

	//initialize muxer, write output file header
	ret = avformat_write_header(ofmt_ctx, NULL);
	if (ret < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
		return ret;
	}

	ofmt_ctx->streams[streamIdx]->codec->time_base.den = time_base_den;
	ofmt_ctx->streams[streamIdx]->codec->time_base.num = time_base_num;


	return 0;
}
Пример #29
0
static int open_slave(AVFormatContext *avf, char *slave, TeeSlave *tee_slave)
{
    int i, ret;
    AVDictionary *options = NULL;
    AVDictionaryEntry *entry;
    char *filename;
    char *format = NULL, *select = NULL;
    AVFormatContext *avf2 = NULL;
    AVStream *st, *st2;
    int stream_count;

    if ((ret = parse_slave_options(avf, slave, &options, &filename)) < 0)
        return ret;

#define STEAL_OPTION(option, field) do {                                \
        if ((entry = av_dict_get(options, option, NULL, 0))) {          \
            field = entry->value;                                       \
            entry->value = NULL; /* prevent it from being freed */      \
            av_dict_set(&options, option, NULL, 0);                     \
        }                                                               \
    } while (0)

    STEAL_OPTION("f", format);
    STEAL_OPTION("select", select);

    ret = avformat_alloc_output_context2(&avf2, NULL, format, filename);
    if (ret < 0)
        goto end;
    av_dict_copy(&avf2->metadata, avf->metadata, 0);

    tee_slave->stream_map = av_calloc(avf->nb_streams, sizeof(*tee_slave->stream_map));
    if (!tee_slave->stream_map) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    stream_count = 0;
    for (i = 0; i < avf->nb_streams; i++) {
        st = avf->streams[i];
        if (select) {
            ret = avformat_match_stream_specifier(avf, avf->streams[i], select);
            if (ret < 0) {
                av_log(avf, AV_LOG_ERROR,
                       "Invalid stream specifier '%s' for output '%s'\n",
                       select, slave);
                goto end;
            }

            if (ret == 0) { /* no match */
                tee_slave->stream_map[i] = -1;
                continue;
            }
        }
        tee_slave->stream_map[i] = stream_count++;

        if (!(st2 = avformat_new_stream(avf2, NULL))) {
            ret = AVERROR(ENOMEM);
            goto end;
        }
        st2->id = st->id;
        st2->r_frame_rate        = st->r_frame_rate;
        st2->time_base           = st->time_base;
        st2->start_time          = st->start_time;
        st2->duration            = st->duration;
        st2->nb_frames           = st->nb_frames;
        st2->disposition         = st->disposition;
        st2->sample_aspect_ratio = st->sample_aspect_ratio;
        st2->avg_frame_rate      = st->avg_frame_rate;
        av_dict_copy(&st2->metadata, st->metadata, 0);
        if ((ret = avcodec_copy_context(st2->codec, st->codec)) < 0)
            goto end;
    }

    if (!(avf2->oformat->flags & AVFMT_NOFILE)) {
        if ((ret = avio_open(&avf2->pb, filename, AVIO_FLAG_WRITE)) < 0) {
            av_log(avf, AV_LOG_ERROR, "Slave '%s': error opening: %s\n",
                   slave, av_err2str(ret));
            goto end;
        }
    }

    if ((ret = avformat_write_header(avf2, &options)) < 0) {
        av_log(avf, AV_LOG_ERROR, "Slave '%s': error writing header: %s\n",
               slave, av_err2str(ret));
        goto end;
    }

    tee_slave->avf = avf2;
    tee_slave->bsfs = av_calloc(avf2->nb_streams, sizeof(TeeSlave));
    if (!tee_slave->bsfs) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    entry = NULL;
    while (entry = av_dict_get(options, "bsfs", NULL, AV_DICT_IGNORE_SUFFIX)) {
        const char *spec = entry->key + strlen("bsfs");
        if (*spec) {
            if (strspn(spec, slave_bsfs_spec_sep) != 1) {
                av_log(avf, AV_LOG_ERROR,
                       "Specifier separator in '%s' is '%c', but only characters '%s' "
                       "are allowed\n", entry->key, *spec, slave_bsfs_spec_sep);
                return AVERROR(EINVAL);
            }
            spec++; /* consume separator */
        }

        for (i = 0; i < avf2->nb_streams; i++) {
            ret = avformat_match_stream_specifier(avf2, avf2->streams[i], spec);
            if (ret < 0) {
                av_log(avf, AV_LOG_ERROR,
                       "Invalid stream specifier '%s' in bsfs option '%s' for slave "
                       "output '%s'\n", spec, entry->key, filename);
                goto end;
            }

            if (ret > 0) {
                av_log(avf, AV_LOG_DEBUG, "spec:%s bsfs:%s matches stream %d of slave "
                       "output '%s'\n", spec, entry->value, i, filename);
                if (tee_slave->bsfs[i]) {
                    av_log(avf, AV_LOG_WARNING,
                           "Duplicate bsfs specification associated to stream %d of slave "
                           "output '%s', filters will be ignored\n", i, filename);
                    continue;
                }
                ret = parse_bsfs(avf, entry->value, &tee_slave->bsfs[i]);
                if (ret < 0) {
                    av_log(avf, AV_LOG_ERROR,
                           "Error parsing bitstream filter sequence '%s' associated to "
                           "stream %d of slave output '%s'\n", entry->value, i, filename);
                    goto end;
                }
            }
        }

        av_dict_set(&options, entry->key, NULL, 0);
    }

    if (options) {
        entry = NULL;
        while ((entry = av_dict_get(options, "", entry, AV_DICT_IGNORE_SUFFIX)))
            av_log(avf2, AV_LOG_ERROR, "Unknown option '%s'\n", entry->key);
        ret = AVERROR_OPTION_NOT_FOUND;
        goto end;
    }

end:
    av_free(format);
    av_free(select);
    av_dict_free(&options);
    return ret;
}
Пример #30
0
//链接h264流
int joinmp4(char (*h264file)[400] ,char (*aacfile)[400],char * mp4,int length,int usefilter)
{
	//AVOutputFormat *ofmt = NULL;
	AVPacket pkt;
	AVStream *out_vstream = NULL;
	AVStream *out_astream = NULL;
	AVFormatContext *ofmt_ctx = NULL;
	int join_index = 0;
	AVBitStreamFilterContext* aacbsfc = NULL;
	long  last_video_pts = 0;
	long last_audio_pts = 0;
	long end_video_pts = 0;
	long end_audio_pts = 0;
	int videoindex_out = -1;
	int audioindex_out = -1;
    //Input AVFormatContext and Output AVFormatContext
    AVFormatContext * ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL;

    int ret, i,retu =0,filter_ret=0;
    //	int fps;
    int videoindex_v=-1;
    int audioindex_a=-1;
    int frame_index=0;
    int64_t cur_pts_v=0,cur_pts_a=0;
    //set file path
    char *in_filename_v = h264file[join_index];
    char *in_filename_a = aacfile[join_index];
    char *out_filename = mp4;
joinone:
    //Input AVFormatContext and Output AVFormatContext
    ifmt_ctx_v = NULL;
    ifmt_ctx_a = NULL;

    ret = 0; i = 0;retu =0;filter_ret=0;
    //	int fps;
    videoindex_v=-1;
    audioindex_a=-1;
    frame_index=0;
    cur_pts_v=0;cur_pts_a=0;
    //set file path
    in_filename_v = h264file[join_index];
    in_filename_a = aacfile[join_index];
    out_filename = mp4;

	//register before use
	av_register_all();
	//open Input and set avformatcontext
	if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) {
		retu = -1;//-1 mean audio file opened failed
		
		goto end;
	}
	if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {
		retu = -2; //-2 mean video file opened failed
		
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {

		retu = -3; //-3 mean get video info failed
		goto end;
	}


	if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {
		retu = -4;//-4 mean get audio info failed
		goto end;
	}

	//open Output
	if(join_index == 0)
	{
		avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
		if (!ofmt_ctx) {
			retu = -5;
			goto end;
		}
	}

	//ofmt = ofmt_ctx->oformat;
	//find all video stream input type
	for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
		//Create output AVStream according to input AVStream
		if(ifmt_ctx_v->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			AVStream *in_stream = ifmt_ctx_v->streams[i];
			videoindex_v=i;

			if(join_index == 0)
			{
				out_vstream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
				videoindex_out=out_vstream->index;
				//Copy the settings of AVCodecContext
				if (avcodec_copy_context(out_vstream->codec, in_stream->codec) < 0) {
					retu = -7;
					goto end;
				}
				out_vstream->codec->codec_tag = 0;
				if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
					out_vstream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			else
			{
				out_vstream->duration += in_stream->duration;
				//printf("duration = %ld\n",out_vstream->duration);
			}
			if (!out_vstream) {
				retu = -6;
				goto end;
			}
			break;
		}
	}

	//find all audio stream input type
	for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {
		//Create output AVStream according to input AVStream
		if(ifmt_ctx_a->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
			AVStream *in_stream = ifmt_ctx_a->streams[i];
			audioindex_a=i;

			if(join_index == 0)
			{
				out_astream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
				audioindex_out=out_astream->index;
				//Copy the settings of AVCodecContext
				if (avcodec_copy_context(out_astream->codec, in_stream->codec) < 0) {
					retu = -7;
					goto end;
				}
				out_astream->codec->codec_tag = 0;
				if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
					out_astream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			else
			{
				out_astream->duration += in_stream->duration;
				//printf("duration = %ld\n",out_astream->duration);
			}
			if (!out_astream) {
				retu = -6;
				goto end;
			}
			break;
		}
	}
	if(join_index == 0)
	{
			//Open output file
		if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
			if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
				retu = -10;
				goto end;
			}
		}
		//Write file header
		if (avformat_write_header(ofmt_ctx, NULL) < 0) {
			retu = -11;
			goto end;
		}
	}
	if(usefilter&& aacbsfc == NULL)
		aacbsfc = av_bitstream_filter_init("aac_adtstoasc");


	while (true) {
		AVFormatContext *ifmt_ctx;
		int stream_index=0;
		AVStream *in_stream, *out_stream;
		//Get an AVPacket
		if(av_compare_ts(cur_pts_v,ifmt_ctx_v->streams[videoindex_v]->time_base,cur_pts_a,
					ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0)
		{
			ifmt_ctx=ifmt_ctx_v;
			stream_index=videoindex_out;
			if(av_read_frame(ifmt_ctx, &pkt) >= 0){

				do{
					in_stream  = ifmt_ctx->streams[pkt.stream_index];
					out_stream = out_vstream;
					if(pkt.stream_index==videoindex_v){

						//Simple Write PTS
						if(pkt.pts==AV_NOPTS_VALUE){

							//Write PTS
							AVRational time_base1=in_stream->time_base;
							//Duration between 2 frames (us)
							int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
							//Parameters
							pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							pkt.dts=pkt.pts;
							pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							frame_index++;
						}
						cur_pts_v=pkt.pts;
						break;
					}
				}
				while(av_read_frame(ifmt_ctx, &pkt) >= 0);
			}
			else
			{
				//printf("pkt.duration = %ld\n",pkt.duration);
				join_index++;
				end_video_pts = last_video_pts;
				end_audio_pts = last_audio_pts;

					break;
			}
		}
		else
		{
			ifmt_ctx=ifmt_ctx_a;
			stream_index=audioindex_out;
			if(av_read_frame(ifmt_ctx, &pkt) >= 0){
				do
				{
					in_stream  = ifmt_ctx->streams[pkt.stream_index];
					out_stream = out_astream;
					if(pkt.stream_index==audioindex_a)
					{
						//Simple Write PTS
						if(pkt.pts==AV_NOPTS_VALUE)
						{
							//Write PTS
							AVRational time_base1=in_stream->time_base;
							//Duration between 2 frames (us)
							int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
							//Parameters
							pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							pkt.dts=pkt.pts;
							pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							frame_index++;
						}
						cur_pts_a=pkt.pts;
						break;
					}
				}
				while(av_read_frame(ifmt_ctx, &pkt) >= 0);
			}
			else
			{
				join_index++;
				end_video_pts = last_video_pts;
				end_audio_pts = last_audio_pts;

				break;
			}

		}
		if(usefilter)
			filter_ret = av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data,&pkt.size, pkt.data, pkt.size, 0);
		if(filter_ret)
		{
			retu = -10;
			goto end;

		}
		//Convert PTS/DTS
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);


		pkt.pos = -1;
		pkt.stream_index=stream_index;
		if(pkt.stream_index == audioindex_out)
		{
			pkt.pts += end_audio_pts;
			pkt.dts += end_audio_pts;
			last_audio_pts = pkt.pts+pkt.duration;
		//	printf("audio pts = %lld ,audio dts = %lld\n",pkt.pts,pkt.dts);
		}
		else
		{
			pkt.pts += end_video_pts;
			pkt.dts += end_video_pts;
			last_video_pts = pkt.pts+pkt.duration;
		}


		//Write
		if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
			av_free_packet(&pkt);
			break;
		}
		//av_packet_unref(&pkt);
			//av_interleaved_write_frame(ofmt_ctx, &pkt);
		av_free_packet(&pkt);
	}


end:


	avformat_close_input(&ifmt_ctx_v);
	avformat_close_input(&ifmt_ctx_a);


    avformat_free_context(ifmt_ctx_v);
    avformat_free_context(ifmt_ctx_a);
	if (ret < 0 && ret != AVERROR_EOF) {
	}
	if(join_index < length)
		goto joinone;
	
	av_write_trailer(ofmt_ctx);

	
	if(usefilter)
		av_bitstream_filter_close(aacbsfc);
	/* close output */
	if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);
	return retu;
}