int main(int argc, char* argv[])
{
	AVFormatContext *ifmt_ctx=NULL;
	AVFormatContext *ofmt_ctx;
	AVInputFormat* ifmt;
	AVStream* video_st;
	AVCodecContext* pCodecCtx;
	AVCodec* pCodec;
	AVPacket *dec_pkt, enc_pkt;
	AVFrame *pframe, *pFrameYUV;
	struct SwsContext *img_convert_ctx;

	char capture_name[80] = {0};
	char device_name[80] = {0};
	int framecnt=0;
	int videoindex;
	int i;
	int ret;
	HANDLE  hThread;

	const char* out_path = "rtmp://localhost/live/livestream";	 
	int dec_got_frame,enc_got_frame;

	av_register_all();
	//Register Device
	avdevice_register_all();
	avformat_network_init();
	
	//Show Dshow Device  
	show_dshow_device();
	
	printf("\nChoose capture device: ");
	if (gets(capture_name) == 0)
	{
		printf("Error in gets()\n");
		return -1;
	}
	sprintf(device_name, "video=%s", capture_name);

	ifmt=av_find_input_format("dshow");
	
	//Set own video device's name
	if (avformat_open_input(&ifmt_ctx, device_name, ifmt, NULL) != 0){
		printf("Couldn't open input stream.(无法打开输入流)\n");
		return -1;
	}
	//input initialize
	if (avformat_find_stream_info(ifmt_ctx, NULL)<0)
	{
		printf("Couldn't find stream information.(无法获取流信息)\n");
		return -1;
	}
	videoindex = -1;
	for (i = 0; i<ifmt_ctx->nb_streams; i++)
		if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			videoindex = i;
			break;
		}
	if (videoindex == -1)
	{
		printf("Couldn't find a video stream.(没有找到视频流)\n");
		return -1;
	}
	if (avcodec_open2(ifmt_ctx->streams[videoindex]->codec, avcodec_find_decoder(ifmt_ctx->streams[videoindex]->codec->codec_id), NULL)<0)
	{
		printf("Could not open codec.(无法打开解码器)\n");
		return -1;
	}

	//output initialize
	avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_path);
	//output encoder initialize
	pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!pCodec){
		printf("Can not find encoder! (没有找到合适的编码器!)\n");
		return -1;
	}
	pCodecCtx=avcodec_alloc_context3(pCodec);
	pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
	pCodecCtx->width = ifmt_ctx->streams[videoindex]->codec->width;
	pCodecCtx->height = ifmt_ctx->streams[videoindex]->codec->height;
	pCodecCtx->time_base.num = 1;
	pCodecCtx->time_base.den = 25;
	pCodecCtx->bit_rate = 400000;
	pCodecCtx->gop_size = 250;
	/* Some formats want stream headers to be separate. */
	if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
		pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

	//H264 codec param
	//pCodecCtx->me_range = 16;
	//pCodecCtx->max_qdiff = 4;
	//pCodecCtx->qcompress = 0.6;
	pCodecCtx->qmin = 10;
	pCodecCtx->qmax = 51;
	//Optional Param
	pCodecCtx->max_b_frames = 3;
	// Set H264 preset and tune
	AVDictionary *param = 0;
	av_dict_set(&param, "preset", "fast", 0);
	av_dict_set(&param, "tune", "zerolatency", 0);

	if (avcodec_open2(pCodecCtx, pCodec,&param) < 0){
		printf("Failed to open encoder! (编码器打开失败!)\n");
		return -1;
	}

	//Add a new stream to output,should be called by the user before avformat_write_header() for muxing
	video_st = avformat_new_stream(ofmt_ctx, pCodec);
	if (video_st == NULL){
		return -1;
	}
	video_st->time_base.num = 1;
	video_st->time_base.den = 25;
	video_st->codec = pCodecCtx;

	//Open output URL,set before avformat_write_header() for muxing
	if (avio_open(&ofmt_ctx->pb,out_path, AVIO_FLAG_READ_WRITE) < 0){
	printf("Failed to open output file! (输出文件打开失败!)\n");
	return -1;
	}

	//Show some Information
	av_dump_format(ofmt_ctx, 0, out_path, 1);

	//Write File Header
	avformat_write_header(ofmt_ctx,NULL);

	//prepare before decode and encode
	dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));
	//enc_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));
	//camera data has a pix fmt of RGB,convert it to YUV420
	img_convert_ctx = sws_getContext(ifmt_ctx->streams[videoindex]->codec->width, ifmt_ctx->streams[videoindex]->codec->height, 
		ifmt_ctx->streams[videoindex]->codec->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	pFrameYUV = av_frame_alloc();
	uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
	
	printf("\n --------call started----------\n\n");
	printf("Press enter to stop...");
	hThread = CreateThread(
		NULL,                   // default security attributes
		0,                      // use default stack size  
		MyThreadFunction,       // thread function name
		NULL,          // argument to thread function 
		0,                      // use default creation flags 
		NULL);   // returns the thread identifier 
	
	//start decode and encode
	int64_t start_time=av_gettime();
	while (av_read_frame(ifmt_ctx, dec_pkt) >= 0){	
		if (exit_thread)
			break;
		av_log(NULL, AV_LOG_DEBUG, "Going to reencode the frame\n");
		pframe = av_frame_alloc();
		if (!pframe) {
			ret = AVERROR(ENOMEM);
			return -1;
		}
		//av_packet_rescale_ts(dec_pkt, ifmt_ctx->streams[dec_pkt->stream_index]->time_base,
		//	ifmt_ctx->streams[dec_pkt->stream_index]->codec->time_base);
		ret = avcodec_decode_video2(ifmt_ctx->streams[dec_pkt->stream_index]->codec, pframe,
			&dec_got_frame, dec_pkt);
		if (ret < 0) {
			av_frame_free(&pframe);
			av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
			break;
		}
		if (dec_got_frame){
			sws_scale(img_convert_ctx, (const uint8_t* const*)pframe->data, pframe->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);	

			enc_pkt.data = NULL;
			enc_pkt.size = 0;
			av_init_packet(&enc_pkt);
			ret = avcodec_encode_video2(pCodecCtx, &enc_pkt, pFrameYUV, &enc_got_frame);
			av_frame_free(&pframe);
			if (enc_got_frame == 1){
				//printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, enc_pkt.size);
				framecnt++;	
				enc_pkt.stream_index = video_st->index;

				//Write PTS
				AVRational time_base = ofmt_ctx->streams[videoindex]->time_base;//{ 1, 1000 };
				AVRational r_framerate1 = ifmt_ctx->streams[videoindex]->r_frame_rate;// { 50, 2 };
				AVRational time_base_q = { 1, AV_TIME_BASE };
				//Duration between 2 frames (us)
				int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));	//内部时间戳
				//Parameters
				//enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
				enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
				enc_pkt.dts = enc_pkt.pts;
				enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
				enc_pkt.pos = -1;
				
				//Delay
				int64_t pts_time = av_rescale_q(enc_pkt.dts, time_base, time_base_q);
				int64_t now_time = av_gettime() - start_time;
				if (pts_time > now_time)
					av_usleep(pts_time - now_time);

				ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
				av_free_packet(&enc_pkt);
			}
		}
		else {
			av_frame_free(&pframe);
		}
		av_free_packet(dec_pkt);
	}
	//Flush Encoder
	ret = flush_encoder(ifmt_ctx,ofmt_ctx,0,framecnt);
	if (ret < 0) {
		printf("Flushing encoder failed\n");
		return -1;
	}

	//Write file trailer
	av_write_trailer(ofmt_ctx);

	//Clean
	if (video_st)
		avcodec_close(video_st->codec);
	av_free(out_buffer);
	avio_close(ofmt_ctx->pb);
	avformat_free_context(ifmt_ctx);
	avformat_free_context(ofmt_ctx);
	CloseHandle(hThread);
	return 0;
}
int ff_qsv_decode(AVCodecContext *avctx, QSVContext *q,
                  AVFrame *frame, int *got_frame,
                  AVPacket *avpkt)
{
    QSVFrame *out_frame;
    mfxFrameSurface1 *insurf;
    mfxFrameSurface1 *outsurf;
    mfxSyncPoint sync;
    mfxBitstream bs = { { { 0 } } };
    int ret;
    int n_out_frames;
    int buffered = 0;

    if (!q->engine_ready) {
        ret = ff_qsv_decode_init(avctx, q, avpkt);
        if (ret)
            return ret;
    }

    if (avpkt->size ) {
        if (av_fifo_size(q->input_fifo)) {
            /* we have got rest of previous packet into buffer */
            if (av_fifo_space(q->input_fifo) < avpkt->size) {
                ret = av_fifo_grow(q->input_fifo, avpkt->size);
                if (ret < 0)
                    return ret;
            }
            av_fifo_generic_write(q->input_fifo, avpkt->data, avpkt->size, NULL);
            bs.Data       = q->input_fifo->rptr;
            bs.DataLength = av_fifo_size(q->input_fifo);
            buffered = 1;
        } else {
            bs.Data       = avpkt->data;
            bs.DataLength = avpkt->size;
        }
        bs.MaxLength  = bs.DataLength;
        bs.TimeStamp  = avpkt->pts;
    }

    while (1) {
        ret = get_surface(avctx, q, &insurf);
        if (ret < 0)
            return ret;
        do {
            ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
                                                  insurf, &outsurf, &sync);
            if (ret != MFX_WRN_DEVICE_BUSY)
                break;
            av_usleep(500);
        } while (1);

        if (MFX_WRN_VIDEO_PARAM_CHANGED==ret) {
            /* TODO: handle here sequence header changing */
        }

        if (sync) {
            QSVFrame *out_frame = find_frame(q, outsurf);

            if (!out_frame) {
                av_log(avctx, AV_LOG_ERROR,
                       "The returned surface does not correspond to any frame\n");
                return AVERROR_BUG;
            }

            out_frame->queued = 1;
            av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
            av_fifo_generic_write(q->async_fifo, &sync,      sizeof(sync),      NULL);

            continue;
        }
        if (MFX_ERR_MORE_SURFACE != ret && ret < 0)
            break;
    }

    /* make sure we do not enter an infinite loop if the SDK
     * did not consume any data and did not return anything */
    if (!sync && !bs.DataOffset) {
        av_log(avctx, AV_LOG_WARNING, "A decode call did not consume any data\n");
        bs.DataOffset = avpkt->size;
    }

    if (buffered) {
        qsv_fifo_relocate(q->input_fifo, bs.DataOffset);
    } else if (bs.DataOffset!=avpkt->size) {
        /* some data of packet was not consumed. store it to local buffer */
        av_fifo_generic_write(q->input_fifo, avpkt->data+bs.DataOffset,
                              avpkt->size - bs.DataOffset, NULL);
    }

    if (MFX_ERR_MORE_DATA!=ret && ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error %d during QSV decoding.\n", ret);
        return ff_qsv_error(ret);
    }
    n_out_frames = av_fifo_size(q->async_fifo) / (sizeof(out_frame)+sizeof(sync));

    if (n_out_frames > q->async_depth || (!avpkt->size && n_out_frames) ) {
        AVFrame *src_frame;

        av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
        av_fifo_generic_read(q->async_fifo, &sync,      sizeof(sync),      NULL);
        out_frame->queued = 0;

        MFXVideoCORE_SyncOperation(q->session, sync, 60000);

        src_frame = out_frame->frame;

        ret = av_frame_ref(frame, src_frame);
        if (ret < 0)
            return ret;

        outsurf = out_frame->surface;

        frame->pkt_pts = frame->pts = outsurf->Data.TimeStamp;

        frame->repeat_pict =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
        frame->top_field_first =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
        frame->interlaced_frame =
            !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);

        *got_frame = 1;
    }

    return avpkt->size;
}
示例#3
0
int ff_dxva2_common_end_frame(AVCodecContext *avctx, AVFrame *frame,
                              const void *pp, unsigned pp_size,
                              const void *qm, unsigned qm_size,
                              int (*commit_bs_si)(AVCodecContext *,
                                                  DXVA2_DecodeBufferDesc *bs,
                                                  DXVA2_DecodeBufferDesc *slice))
{
    struct dxva_context *ctx = avctx->hwaccel_context;
    unsigned               buffer_count = 0;
    DXVA2_DecodeBufferDesc buffer[4];
    DXVA2_DecodeExecuteParams exec = { 0 };
    int result, runs = 0;
    HRESULT hr;

    do {
        hr = IDirectXVideoDecoder_BeginFrame(ctx->decoder,
                                             ff_dxva2_get_surface(frame),
                                             NULL);
        if (hr == E_PENDING)
            av_usleep(2000);
    } while (hr == E_PENDING && ++runs < 50);

    if (FAILED(hr)) {
        av_log(avctx, AV_LOG_ERROR, "Failed to begin frame: 0x%lx\n", hr);
        return -1;
    }

    result = ff_dxva2_commit_buffer(avctx, ctx, &buffer[buffer_count],
                                    DXVA2_PictureParametersBufferType,
                                    pp, pp_size, 0);
    if (result) {
        av_log(avctx, AV_LOG_ERROR,
               "Failed to add picture parameter buffer\n");
        goto end;
    }
    buffer_count++;

    if (qm_size > 0) {
        result = ff_dxva2_commit_buffer(avctx, ctx, &buffer[buffer_count],
                                        DXVA2_InverseQuantizationMatrixBufferType,
                                        qm, qm_size, 0);
        if (result) {
            av_log(avctx, AV_LOG_ERROR,
                   "Failed to add inverse quantization matrix buffer\n");
            goto end;
        }
        buffer_count++;
    }

    result = commit_bs_si(avctx,
                          &buffer[buffer_count + 0],
                          &buffer[buffer_count + 1]);
    if (result) {
        av_log(avctx, AV_LOG_ERROR,
               "Failed to add bitstream or slice control buffer\n");
        goto end;
    }
    buffer_count += 2;

    /* TODO Film Grain when possible */

    assert(buffer_count == 1 + (qm_size > 0) + 2);

    exec.NumCompBuffers      = buffer_count;
    exec.pCompressedBuffers  = buffer;
    exec.pExtensionData      = NULL;
    hr = IDirectXVideoDecoder_Execute(ctx->decoder, &exec);
    if (FAILED(hr)) {
        av_log(avctx, AV_LOG_ERROR, "Failed to execute: 0x%lx\n", hr);
        result = -1;
    }

end:
    hr = IDirectXVideoDecoder_EndFrame(ctx->decoder, NULL);
    if (FAILED(hr)) {
        av_log(avctx, AV_LOG_ERROR, "Failed to end frame: 0x%lx\n", hr);
        result = -1;
    }

    return result;
}
示例#4
0
文件: pktdumper.c 项目: AVbin/libav
int main(int argc, char **argv)
{
    char fntemplate[FILENAME_BUF_SIZE];
    char pktfilename[FILENAME_BUF_SIZE];
    AVFormatContext *fctx = NULL;
    AVPacket pkt;
    int64_t pktnum  = 0;
    int64_t maxpkts = 0;
    int donotquit   = 0;
    int nowrite     = 0;
    int err;

    if ((argc > 1) && !strncmp(argv[1], "-", 1)) {
        if (strchr(argv[1], 'w'))
            donotquit = 1;
        if (strchr(argv[1], 'n'))
            nowrite = 1;
        argv++;
        argc--;
    }
    if (argc < 2)
        return usage(1);
    if (argc > 2)
        maxpkts = atoi(argv[2]);
    av_strlcpy(fntemplate, argv[1], sizeof(fntemplate));
    if (strrchr(argv[1], '/'))
        av_strlcpy(fntemplate, strrchr(argv[1], '/') + 1, sizeof(fntemplate));
    if (strrchr(fntemplate, '.'))
        *strrchr(fntemplate, '.') = '\0';
    if (strchr(fntemplate, '%')) {
        fprintf(stderr, "can't use filenames containing '%%'\n");
        return usage(1);
    }
    if (strlen(fntemplate) + sizeof(PKTFILESUFF) >= sizeof(fntemplate) - 1) {
        fprintf(stderr, "filename too long\n");
        return usage(1);
    }
    strcat(fntemplate, PKTFILESUFF);
    printf("FNTEMPLATE: '%s'\n", fntemplate);

    // register all file formats
    av_register_all();

    err = avformat_open_input(&fctx, argv[1], NULL, NULL);
    if (err < 0) {
        fprintf(stderr, "cannot open input: error %d\n", err);
        return 1;
    }

    err = avformat_find_stream_info(fctx, NULL);
    if (err < 0) {
        fprintf(stderr, "avformat_find_stream_info: error %d\n", err);
        return 1;
    }

    av_init_packet(&pkt);

    while ((err = av_read_frame(fctx, &pkt)) >= 0) {
        int fd;
        snprintf(pktfilename, sizeof(pktfilename), fntemplate, pktnum,
                 pkt.stream_index, pkt.pts, pkt.size,
                 (pkt.flags & AV_PKT_FLAG_KEY) ? 'K' : '_');
        printf(PKTFILESUFF "\n", pktnum, pkt.stream_index, pkt.pts, pkt.size,
               (pkt.flags & AV_PKT_FLAG_KEY) ? 'K' : '_');
        if (!nowrite) {
            fd  = open(pktfilename, O_WRONLY | O_CREAT, 0644);
            err = write(fd, pkt.data, pkt.size);
            if (err < 0) {
                fprintf(stderr, "write: error %d\n", err);
                return 1;
            }
            close(fd);
        }
        av_free_packet(&pkt);
        pktnum++;
        if (maxpkts && (pktnum >= maxpkts))
            break;
    }

    avformat_close_input(&fctx);

    while (donotquit)
        av_usleep(60 * 1000000);

    return 0;
}
int main(int argc, char* argv[])
{
    AVFormatContext *ifmt_ctx = NULL;
    AVFormatContext *ifmt_ctx_a = NULL;
    AVFormatContext *ofmt_ctx;
    AVInputFormat* ifmt;
    AVStream* video_st;
    AVStream* audio_st;
    AVCodecContext* pCodecCtx;
    AVCodecContext* pCodecCtx_a;
    AVCodec* pCodec;
    AVCodec* pCodec_a;
    AVPacket *dec_pkt, enc_pkt;
    AVPacket *dec_pkt_a, enc_pkt_a;
    AVFrame *pframe, *pFrameYUV;
    struct SwsContext *img_convert_ctx;
    struct SwrContext *aud_convert_ctx;

    char capture_name[80] = { 0 };
	char device_name[80] = { 0 };
	char device_name_a[80] = { 0 };
    int framecnt = 0;
	int nb_samples = 0;
    int videoindex;
    int audioindex;
    int i;
    int ret;
    HANDLE  hThread;

	const char* out_path = "rtmp://localhost/live/livestream";
    int dec_got_frame, enc_got_frame;
	int dec_got_frame_a, enc_got_frame_a;

	int aud_next_pts = 0;
	int vid_next_pts = 0;
	int encode_video = 1, encode_audio = 1;

	AVRational time_base_q = { 1, AV_TIME_BASE };

    av_register_all();
    //Register Device
    avdevice_register_all();
    avformat_network_init();
#if USEFILTER
    //Register Filter
    avfilter_register_all();
    buffersrc = avfilter_get_by_name("buffer");
    buffersink = avfilter_get_by_name("buffersink");
#endif

    //Show Dshow Device  
    show_dshow_device();

    printf("\nChoose video capture device: ");
    if (gets(capture_name) == 0)
    {
		printf("Error in gets()\n");
		return -1;
    }
    sprintf(device_name, "video=%s", capture_name);

	printf("\nChoose audio capture device: ");
	if (gets(capture_name) == 0)
	{
		printf("Error in gets()\n");
		return -1;
	}
	sprintf(device_name_a, "audio=%s", capture_name);

    //wchar_t *cam = L"video=Integrated Camera";
	//wchar_t *cam = L"video=YY伴侣";
	//char *device_name_utf8 = dup_wchar_to_utf8(cam);
    //wchar_t *cam_a = L"audio=麦克风阵列 (Realtek High Definition Audio)";
	//char *device_name_utf8_a = dup_wchar_to_utf8(cam_a);

	ifmt = av_find_input_format("dshow");
    // Set device params
    AVDictionary *device_param = 0;
	//if not setting rtbufsize, error messages will be shown in cmd, but you can still watch or record the stream correctly in most time
	//setting rtbufsize will erase those error messages, however, larger rtbufsize will bring latency
    //av_dict_set(&device_param, "rtbufsize", "10M", 0);

    //Set own video device's name
	if (avformat_open_input(&ifmt_ctx, device_name, ifmt, &device_param) != 0){

        printf("Couldn't open input video stream.(无法打开输入流)\n");
        return -1;
    }
	//Set own audio device's name
	if (avformat_open_input(&ifmt_ctx_a, device_name_a, ifmt, &device_param) != 0){

        printf("Couldn't open input audio stream.(无法打开输入流)\n");
        return -1;
    }
    //input video initialize
    if (avformat_find_stream_info(ifmt_ctx, NULL) < 0)
    {
        printf("Couldn't find video stream information.(无法获取流信息)\n");
        return -1;
    }
    videoindex = -1;
    for (i = 0; i < ifmt_ctx->nb_streams; i++)
    if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
    {
        videoindex = i;
        break;
    }
    if (videoindex == -1)
    {
        printf("Couldn't find a video stream.(没有找到视频流)\n");
        return -1;
    }
    if (avcodec_open2(ifmt_ctx->streams[videoindex]->codec, avcodec_find_decoder(ifmt_ctx->streams[videoindex]->codec->codec_id), NULL) < 0)
    {
        printf("Could not open video codec.(无法打开解码器)\n");
        return -1;
    }
    //input audio initialize
    if (avformat_find_stream_info(ifmt_ctx_a, NULL) < 0)
    {
        printf("Couldn't find audio stream information.(无法获取流信息)\n");
        return -1;
    }
    audioindex = -1;
    for (i = 0; i < ifmt_ctx_a->nb_streams; i++)
    if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
    {
        audioindex = i;
        break;
    }
    if (audioindex == -1)
    {
        printf("Couldn't find a audio stream.(没有找到视频流)\n");
        return -1;
	}
    if (avcodec_open2(ifmt_ctx_a->streams[audioindex]->codec, avcodec_find_decoder(ifmt_ctx_a->streams[audioindex]->codec->codec_id), NULL) < 0)
    {
        printf("Could not open audio codec.(无法打开解码器)\n");
        return -1;
    }

    //output initialize
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_path);
    //output video encoder initialize
    pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!pCodec){
        printf("Can not find output video encoder! (没有找到合适的编码器!)\n");
        return -1;
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
    pCodecCtx->width = ifmt_ctx->streams[videoindex]->codec->width;
    pCodecCtx->height = ifmt_ctx->streams[videoindex]->codec->height;
    pCodecCtx->time_base.num = 1;
    pCodecCtx->time_base.den = 25;
    pCodecCtx->bit_rate = 300000;
    pCodecCtx->gop_size = 250;
    /* Some formats want stream headers to be separate. */
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

    //H264 codec param
    //pCodecCtx->me_range = 16;
    //pCodecCtx->max_qdiff = 4;
    //pCodecCtx->qcompress = 0.6;
    pCodecCtx->qmin = 10;
    pCodecCtx->qmax = 51;
    //Optional Param
    pCodecCtx->max_b_frames = 0;
    // Set H264 preset and tune
    AVDictionary *param = 0;
    av_dict_set(&param, "preset", "fast", 0);
    av_dict_set(&param, "tune", "zerolatency", 0);

    if (avcodec_open2(pCodecCtx, pCodec, &param) < 0){
        printf("Failed to open output video encoder! (编码器打开失败!)\n");
        return -1;
    }

    //Add a new stream to output,should be called by the user before avformat_write_header() for muxing
    video_st = avformat_new_stream(ofmt_ctx, pCodec);
    if (video_st == NULL){
        return -1;
    }
    video_st->time_base.num = 1;
    video_st->time_base.den = 25;
    video_st->codec = pCodecCtx;


    //output audio encoder initialize
    pCodec_a = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!pCodec_a){
        printf("Can not find output audio encoder! (没有找到合适的编码器!)\n");
        return -1;
    }
    pCodecCtx_a = avcodec_alloc_context3(pCodec_a);
    pCodecCtx_a->channels = 2;
    pCodecCtx_a->channel_layout = av_get_default_channel_layout(2);
	pCodecCtx_a->sample_rate = ifmt_ctx_a->streams[audioindex]->codec->sample_rate;
    pCodecCtx_a->sample_fmt = pCodec_a->sample_fmts[0];
    pCodecCtx_a->bit_rate = 32000;
    pCodecCtx_a->time_base.num = 1;
	pCodecCtx_a->time_base.den = pCodecCtx_a->sample_rate;
    /** Allow the use of the experimental AAC encoder */
    pCodecCtx_a->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
    /* Some formats want stream headers to be separate. */
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        pCodecCtx_a->flags |= CODEC_FLAG_GLOBAL_HEADER;
    if (avcodec_open2(pCodecCtx_a, pCodec_a, NULL) < 0){
        printf("Failed to open ouput audio encoder! (编码器打开失败!)\n");
        return -1;
    }

    //Add a new stream to output,should be called by the user before avformat_write_header() for muxing
    audio_st = avformat_new_stream(ofmt_ctx, pCodec_a);
    if (audio_st == NULL){
        return -1;
    }
    audio_st->time_base.num = 1;
	audio_st->time_base.den = pCodecCtx_a->sample_rate;
    audio_st->codec = pCodecCtx_a;

    //Open output URL,set before avformat_write_header() for muxing
    if (avio_open(&ofmt_ctx->pb, out_path, AVIO_FLAG_READ_WRITE) < 0){
        printf("Failed to open output file! (输出文件打开失败!)\n");
        return -1;
    }

    //Show some Information
    av_dump_format(ofmt_ctx, 0, out_path, 1);

    //Write File Header
    avformat_write_header(ofmt_ctx, NULL);

    //prepare before decode and encode
    dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));

#if USEFILTER
#else
	//camera data may has a pix fmt of RGB or sth else,convert it to YUV420
    img_convert_ctx = sws_getContext(ifmt_ctx->streams[videoindex]->codec->width, ifmt_ctx->streams[videoindex]->codec->height,
        ifmt_ctx->streams[videoindex]->codec->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
    
	// Initialize the resampler to be able to convert audio sample formats
	aud_convert_ctx = swr_alloc_set_opts(NULL,
		av_get_default_channel_layout(pCodecCtx_a->channels),
		pCodecCtx_a->sample_fmt,
		pCodecCtx_a->sample_rate,
		av_get_default_channel_layout(ifmt_ctx_a->streams[audioindex]->codec->channels),
		ifmt_ctx_a->streams[audioindex]->codec->sample_fmt,
		ifmt_ctx_a->streams[audioindex]->codec->sample_rate,
		0, NULL);
	
	/**
	* Perform a sanity check so that the number of converted samples is
	* not greater than the number of samples to be converted.
	* If the sample rates differ, this case has to be handled differently
	*/
	//av_assert0(pCodecCtx_a->sample_rate == ifmt_ctx_a->streams[audioindex]->codec->sample_rate);

	swr_init(aud_convert_ctx);

    
#endif
    //Initialize the buffer to store YUV frames to be encoded.
	pFrameYUV = av_frame_alloc();
    uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

	//Initialize the FIFO buffer to store audio samples to be encoded. 
    AVAudioFifo *fifo = NULL;
	fifo = av_audio_fifo_alloc(pCodecCtx_a->sample_fmt, pCodecCtx_a->channels, 1);

	//Initialize the buffer to store converted samples to be encoded.
	uint8_t **converted_input_samples = NULL;
	/**
	* Allocate as many pointers as there are audio channels.
	* Each pointer will later point to the audio samples of the corresponding
	* channels (although it may be NULL for interleaved formats).
	*/
	if (!(converted_input_samples = (uint8_t**)calloc(pCodecCtx_a->channels,
		sizeof(**converted_input_samples)))) {
		printf("Could not allocate converted input sample pointers\n");
		return AVERROR(ENOMEM);
	}


    printf("\n --------call started----------\n");
#if USEFILTER
    printf("\n Press differnet number for different filters:");
    printf("\n 1->Mirror");
    printf("\n 2->Add Watermark");
    printf("\n 3->Negate");
    printf("\n 4->Draw Edge");
    printf("\n 5->Split Into 4");
    printf("\n 6->Vintage");
    printf("\n Press 0 to remove filter\n");
#endif
    printf("\nPress enter to stop...\n");
    hThread = CreateThread(
        NULL,                   // default security attributes
        0,                      // use default stack size  
        MyThreadFunction,       // thread function name
        NULL,          // argument to thread function 
        0,                      // use default creation flags 
        NULL);   // returns the thread identifier 

    //start decode and encode
    int64_t start_time = av_gettime();
    while (encode_video || encode_audio)
    {
        if (encode_video &&
			(!encode_audio || av_compare_ts(vid_next_pts, time_base_q,
			aud_next_pts, time_base_q) <= 0))
        {
            if ((ret=av_read_frame(ifmt_ctx, dec_pkt)) >= 0){

                if (exit_thread)
                    break;

                av_log(NULL, AV_LOG_DEBUG, "Going to reencode the frame\n");
                pframe = av_frame_alloc();
                if (!pframe) {
                    ret = AVERROR(ENOMEM);
                    return ret;
                }
                ret = avcodec_decode_video2(ifmt_ctx->streams[dec_pkt->stream_index]->codec, pframe,
                    &dec_got_frame, dec_pkt);
                if (ret < 0) {
                    av_frame_free(&pframe);
                    av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                    break;
                }
                if (dec_got_frame){
#if USEFILTER
                    pframe->pts = av_frame_get_best_effort_timestamp(pframe);

                    if (filter_change)
                        apply_filters(ifmt_ctx);
                    filter_change = 0;
                    /* push the decoded frame into the filtergraph */
                    if (av_buffersrc_add_frame(buffersrc_ctx, pframe) < 0) {
                        printf("Error while feeding the filtergraph\n");
                        break;
                    }
                    picref = av_frame_alloc();

                    /* pull filtered pictures from the filtergraph */
                    while (1) {
                        ret = av_buffersink_get_frame_flags(buffersink_ctx, picref, 0);
                        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                            break;
                        if (ret < 0)
                            return ret;

                        if (picref) {
                            img_convert_ctx = sws_getContext(picref->width, picref->height, (AVPixelFormat)picref->format, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
                            sws_scale(img_convert_ctx, (const uint8_t* const*)picref->data, picref->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
                            sws_freeContext(img_convert_ctx);
                            pFrameYUV->width = picref->width;
                            pFrameYUV->height = picref->height;
                            pFrameYUV->format = PIX_FMT_YUV420P;
#else
                    sws_scale(img_convert_ctx, (const uint8_t* const*)pframe->data, pframe->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
                    pFrameYUV->width = pframe->width;
                    pFrameYUV->height = pframe->height;
                    pFrameYUV->format = PIX_FMT_YUV420P;
#endif					
                    enc_pkt.data = NULL;
                    enc_pkt.size = 0;
                    av_init_packet(&enc_pkt);
                    ret = avcodec_encode_video2(pCodecCtx, &enc_pkt, pFrameYUV, &enc_got_frame);
                    av_frame_free(&pframe);
                    if (enc_got_frame == 1){
                        //printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, enc_pkt.size);
                        framecnt++;
                        enc_pkt.stream_index = video_st->index;						

                        //Write PTS
						AVRational time_base = ofmt_ctx->streams[0]->time_base;//{ 1, 1000 };
                        AVRational r_framerate1 = ifmt_ctx->streams[videoindex]->r_frame_rate;//{ 50, 2 }; 
                        //Duration between 2 frames (us)
                        int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));	//内部时间戳
                        //Parameters
                        //enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
                        enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
                        enc_pkt.dts = enc_pkt.pts;
                        enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
                        enc_pkt.pos = -1;
                        //printf("video pts : %d\n", enc_pkt.pts);

						vid_next_pts=framecnt*calc_duration; //general timebase

                        //Delay
						int64_t pts_time = av_rescale_q(enc_pkt.pts, time_base, time_base_q);
						int64_t now_time = av_gettime() - start_time;						
						if ((pts_time > now_time) && ((vid_next_pts + pts_time - now_time)<aud_next_pts))
							av_usleep(pts_time - now_time);
						
                        ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
                        av_free_packet(&enc_pkt);
                    }
#if USEFILTER
                    av_frame_unref(picref);
                }
            }
#endif
        }
        else {
            av_frame_free(&pframe);
        }
        av_free_packet(dec_pkt);
    }
    else
		if (ret == AVERROR_EOF)
			encode_video = 0;
		else
		{
			printf("Could not read video frame\n");
			return ret;
		}
    }
    else
    {
        //audio trancoding here
        const int output_frame_size = pCodecCtx_a->frame_size;

		if (exit_thread)
			break;

        /**
        * Make sure that there is one frame worth of samples in the FIFO
        * buffer so that the encoder can do its work.
        * Since the decoder's and the encoder's frame size may differ, we
        * need to FIFO buffer to store as many frames worth of input samples
        * that they make up at least one frame worth of output samples.
        */
        while (av_audio_fifo_size(fifo) < output_frame_size) {
            /**
            * Decode one frame worth of audio samples, convert it to the
            * output sample format and put it into the FIFO buffer.
            */
			AVFrame *input_frame = av_frame_alloc();
			if (!input_frame)
			{
				ret = AVERROR(ENOMEM);
				return ret;
			}			
			
			/** Decode one frame worth of audio samples. */
			/** Packet used for temporary storage. */
			AVPacket input_packet;
			av_init_packet(&input_packet);
			input_packet.data = NULL;
			input_packet.size = 0;
			
			/** Read one audio frame from the input file into a temporary packet. */
			if ((ret = av_read_frame(ifmt_ctx_a, &input_packet)) < 0) {
				/** If we are at the end of the file, flush the decoder below. */
				if (ret == AVERROR_EOF)
				{
					encode_audio = 0;
				}
				else
				{
					printf("Could not read audio frame\n");
					return ret;
				}					
			}

			/**
			* Decode the audio frame stored in the temporary packet.
			* The input audio stream decoder is used to do this.
			* If we are at the end of the file, pass an empty packet to the decoder
			* to flush it.
			*/
			if ((ret = avcodec_decode_audio4(ifmt_ctx_a->streams[audioindex]->codec, input_frame,
				&dec_got_frame_a, &input_packet)) < 0) {
				printf("Could not decode audio frame\n");
				return ret;
			}
			av_packet_unref(&input_packet);
			/** If there is decoded data, convert and store it */
			if (dec_got_frame_a) {
				/**
				* Allocate memory for the samples of all channels in one consecutive
				* block for convenience.
				*/
				if ((ret = av_samples_alloc(converted_input_samples, NULL,
					pCodecCtx_a->channels,
					input_frame->nb_samples,
					pCodecCtx_a->sample_fmt, 0)) < 0) {
					printf("Could not allocate converted input samples\n");
					av_freep(&(*converted_input_samples)[0]);
					free(*converted_input_samples);
					return ret;
				}

				/**
				* Convert the input samples to the desired output sample format.
				* This requires a temporary storage provided by converted_input_samples.
				*/
				/** Convert the samples using the resampler. */
				if ((ret = swr_convert(aud_convert_ctx,
					converted_input_samples, input_frame->nb_samples,
					(const uint8_t**)input_frame->extended_data, input_frame->nb_samples)) < 0) {
					printf("Could not convert input samples\n");
					return ret;
				}

				/** Add the converted input samples to the FIFO buffer for later processing. */
				/**
				* Make the FIFO as large as it needs to be to hold both,
				* the old and the new samples.
				*/
				if ((ret = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + input_frame->nb_samples)) < 0) {
					printf("Could not reallocate FIFO\n");
					return ret;
				}

				/** Store the new samples in the FIFO buffer. */
				if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
					input_frame->nb_samples) < input_frame->nb_samples) {
					printf("Could not write data to FIFO\n");
					return AVERROR_EXIT;
				}				
			}
        }

        /**
        * If we have enough samples for the encoder, we encode them.
        * At the end of the file, we pass the remaining samples to
        * the encoder.
        */
        if (av_audio_fifo_size(fifo) >= output_frame_size)
            /**
            * Take one frame worth of audio samples from the FIFO buffer,
            * encode it and write it to the output file.
            */
        {
            /** Temporary storage of the output samples of the frame written to the file. */
			AVFrame *output_frame=av_frame_alloc();
			if (!output_frame)
			{
				ret = AVERROR(ENOMEM);
				return ret;
			}
			/**
			* Use the maximum number of possible samples per frame.
			* If there is less than the maximum possible frame size in the FIFO
			* buffer use this number. Otherwise, use the maximum possible frame size
			*/
			const int frame_size = FFMIN(av_audio_fifo_size(fifo),
				pCodecCtx_a->frame_size);
			
			/** Initialize temporary storage for one output frame. */
			/**
			* Set the frame's parameters, especially its size and format.
			* av_frame_get_buffer needs this to allocate memory for the
			* audio samples of the frame.
			* Default channel layouts based on the number of channels
			* are assumed for simplicity.
			*/
			output_frame->nb_samples = frame_size;
			output_frame->channel_layout = pCodecCtx_a->channel_layout;
			output_frame->format = pCodecCtx_a->sample_fmt;
			output_frame->sample_rate = pCodecCtx_a->sample_rate;

			/**
			* Allocate the samples of the created frame. This call will make
			* sure that the audio frame can hold as many samples as specified.
			*/
			if ((ret = av_frame_get_buffer(output_frame, 0)) < 0) {
				printf("Could not allocate output frame samples\n");
				av_frame_free(&output_frame);
				return ret;
			}
			
			/**
			* Read as many samples from the FIFO buffer as required to fill the frame.
			* The samples are stored in the frame temporarily.
			*/
			if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
				printf("Could not read data from FIFO\n");
				return AVERROR_EXIT;
			}

			/** Encode one frame worth of audio samples. */
			/** Packet used for temporary storage. */
			AVPacket output_packet;
			av_init_packet(&output_packet);
			output_packet.data = NULL;
			output_packet.size = 0;
			
			/** Set a timestamp based on the sample rate for the container. */
			if (output_frame) {
				nb_samples += output_frame->nb_samples;
			}

			/**
			* Encode the audio frame and store it in the temporary packet.
			* The output audio stream encoder is used to do this.
			*/
			if ((ret = avcodec_encode_audio2(pCodecCtx_a, &output_packet,
				output_frame, &enc_got_frame_a)) < 0) {
				printf("Could not encode frame\n");
				av_packet_unref(&output_packet);
				return ret;
			}

			/** Write one audio frame from the temporary packet to the output file. */
			if (enc_got_frame_a) {

				output_packet.stream_index = 1;

				AVRational time_base = ofmt_ctx->streams[1]->time_base;
				AVRational r_framerate1 = { ifmt_ctx_a->streams[audioindex]->codec->sample_rate, 1 };// { 44100, 1};  
				int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));  //内部时间戳  

				output_packet.pts = av_rescale_q(nb_samples*calc_duration, time_base_q, time_base);
				output_packet.dts = output_packet.pts;
				output_packet.duration = output_frame->nb_samples;

				//printf("audio pts : %d\n", output_packet.pts);
				aud_next_pts = nb_samples*calc_duration;

				int64_t pts_time = av_rescale_q(output_packet.pts, time_base, time_base_q);
				int64_t now_time = av_gettime() - start_time;
				if ((pts_time > now_time) && ((aud_next_pts + pts_time - now_time)<vid_next_pts))
					av_usleep(pts_time - now_time);

				if ((ret = av_interleaved_write_frame(ofmt_ctx, &output_packet)) < 0) {
					printf("Could not write frame\n");
					av_packet_unref(&output_packet);
					return ret;
				}

				av_packet_unref(&output_packet);
			}			
			av_frame_free(&output_frame);		
        }      
	}
  }


    //Flush Encoder
    ret = flush_encoder(ifmt_ctx, ofmt_ctx, 0, framecnt);
    if (ret < 0) {
        printf("Flushing encoder failed\n");
        return -1;
    }
	ret = flush_encoder_a(ifmt_ctx_a, ofmt_ctx, 1, nb_samples);
	if (ret < 0) {
		printf("Flushing encoder failed\n");
		return -1;
	}



    //Write file trailer
    av_write_trailer(ofmt_ctx);

cleanup:
    //Clean
#if USEFILTER
    if (filter_graph)
        avfilter_graph_free(&filter_graph);
#endif
    if (video_st)
        avcodec_close(video_st->codec);
    if (audio_st)
        avcodec_close(audio_st->codec);
    av_free(out_buffer);
	if (converted_input_samples) {
		av_freep(&converted_input_samples[0]);
		//free(converted_input_samples);
	}
	if (fifo)
		av_audio_fifo_free(fifo);
    avio_close(ofmt_ctx->pb);
    avformat_free_context(ifmt_ctx);
	avformat_free_context(ifmt_ctx_a);
    avformat_free_context(ofmt_ctx);
    CloseHandle(hThread);
    return 0;
}
示例#6
0
文件: qsvdec.c 项目: AVLeo/libav
static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
                      AVFrame *frame, int *got_frame,
                      AVPacket *avpkt)
{
    QSVFrame *out_frame;
    mfxFrameSurface1 *insurf;
    mfxFrameSurface1 *outsurf;
    mfxSyncPoint *sync;
    mfxBitstream bs = { { { 0 } } };
    int ret;

    if (avpkt->size) {
        bs.Data       = avpkt->data;
        bs.DataLength = avpkt->size;
        bs.MaxLength  = bs.DataLength;
        bs.TimeStamp  = avpkt->pts;
    }

    sync = av_mallocz(sizeof(*sync));
    if (!sync) {
        av_freep(&sync);
        return AVERROR(ENOMEM);
    }

    do {
        ret = get_surface(avctx, q, &insurf);
        if (ret < 0)
            return ret;

        ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
                                              insurf, &outsurf, sync);
        if (ret == MFX_WRN_DEVICE_BUSY)
            av_usleep(1);

    } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);

    if (ret != MFX_ERR_NONE &&
        ret != MFX_ERR_MORE_DATA &&
        ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
        ret != MFX_ERR_MORE_SURFACE) {
        av_log(avctx, AV_LOG_ERROR, "Error during QSV decoding.\n");
        av_freep(&sync);
        return ff_qsv_error(ret);
    }

    /* make sure we do not enter an infinite loop if the SDK
     * did not consume any data and did not return anything */
    if (!*sync && !bs.DataOffset) {
        av_log(avctx, AV_LOG_WARNING, "A decode call did not consume any data\n");
        bs.DataOffset = avpkt->size;
    }

    if (*sync) {
        QSVFrame *out_frame = find_frame(q, outsurf);

        if (!out_frame) {
            av_log(avctx, AV_LOG_ERROR,
                   "The returned surface does not correspond to any frame\n");
            av_freep(&sync);
            return AVERROR_BUG;
        }

        out_frame->queued = 1;
        av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
        av_fifo_generic_write(q->async_fifo, &sync,      sizeof(sync),      NULL);
    } else {
        av_freep(&sync);
    }

    if (!av_fifo_space(q->async_fifo) ||
        (!avpkt->size && av_fifo_size(q->async_fifo))) {
        AVFrame *src_frame;

        av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
        av_fifo_generic_read(q->async_fifo, &sync,      sizeof(sync),      NULL);
        out_frame->queued = 0;

        do {
            ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
        } while (ret == MFX_WRN_IN_EXECUTION);

        av_freep(&sync);

        src_frame = out_frame->frame;

        ret = av_frame_ref(frame, src_frame);
        if (ret < 0)
            return ret;

        outsurf = out_frame->surface;

        frame->pkt_pts = frame->pts = outsurf->Data.TimeStamp;

        frame->repeat_pict =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
        frame->top_field_first =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
        frame->interlaced_frame =
            !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);

        *got_frame = 1;
    }

    return bs.DataOffset;
}
int main(int argc, char* argv[])
{
	AVOutputFormat *ofmt = NULL;
	//Input AVFormatContext and Output AVFormatContext
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
	AVPacket pkt;
	const char *in_filename, *out_filename;
	int ret, i;
	int videoindex=-1;
	int frame_index=0;
	int64_t start_time=0;
	//in_filename  = "cuc_ieschool.mov";
	//in_filename  = "cuc_ieschool.mkv";
	//in_filename  = "cuc_ieschool.ts";
	//in_filename  = "cuc_ieschool.mp4";
	//in_filename  = "cuc_ieschool.h264";
	in_filename  = "cuc_ieschool.flv";//输入URL(Input file URL)
	//in_filename  = "shanghai03_p.h264";
	
    out_filename = "rtmp://localhost/publishlive/livestream";//输出 URL(Output URL)[RTMP]
    out_filename = "http://192.168.0.53:10000";//输出 URL(Output URL)[RTMP]
	//out_filename = "rtp://233.233.233.233:6666";//输出 URL(Output URL)[UDP]

	av_register_all();
	//Network
	avformat_network_init();
	//Input
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
		printf( "Could not open input file.");
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		printf( "Failed to retrieve input stream information");
		goto end;
	}

	for(i=0; i<ifmt_ctx->nb_streams; i++) 
		if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}

	av_dump_format(ifmt_ctx, 0, in_filename, 0);

	//Output
	
	avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_filename); //RTMP
	//avformat_alloc_output_context2(&ofmt_ctx, NULL, "mpegts", out_filename);//UDP

	if (!ofmt_ctx) {
		printf( "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt = ofmt_ctx->oformat;
	for (i = 0; i < ifmt_ctx->nb_streams; i++) {
		//Create output AVStream according to input AVStream
		AVStream *in_stream = ifmt_ctx->streams[i];
		AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
		if (!out_stream) {
			printf( "Failed allocating output stream\n");
			ret = AVERROR_UNKNOWN;
			goto end;
		}
		//Copy the settings of AVCodecContext
		ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
		if (ret < 0) {
			printf( "Failed to copy context from input to output stream codec context\n");
			goto end;
		}
		out_stream->codec->codec_tag = 0;
		if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
	//Dump Format------------------
	av_dump_format(ofmt_ctx, 0, out_filename, 1);
	//Open output URL
	if (!(ofmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
		if (ret < 0) {
			printf( "Could not open output URL '%s'", out_filename);
			goto end;
		}
	}
	//Write file header
	ret = avformat_write_header(ofmt_ctx, NULL);
	if (ret < 0) {
		printf( "Error occurred when opening output URL\n");
		goto end;
	}

	start_time=av_gettime();
	while (1) {
		AVStream *in_stream, *out_stream;
		//Get an AVPacket
		ret = av_read_frame(ifmt_ctx, &pkt);
		if (ret < 0)
			break;
		//FIX:No PTS (Example: Raw H.264)
		//Simple Write PTS
		if(pkt.pts==AV_NOPTS_VALUE){
			//Write PTS
			AVRational time_base1=ifmt_ctx->streams[videoindex]->time_base;
			//Duration between 2 frames (us)
			int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(ifmt_ctx->streams[videoindex]->r_frame_rate);
			//Parameters
			pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
			pkt.dts=pkt.pts;
			pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
		}
		//Important:Delay
		if(pkt.stream_index==videoindex){
			AVRational time_base=ifmt_ctx->streams[videoindex]->time_base;
			AVRational time_base_q={1,AV_TIME_BASE};
			int64_t pts_time = av_rescale_q(pkt.dts, time_base, time_base_q);
			int64_t now_time = av_gettime() - start_time;
			if (pts_time > now_time)
				av_usleep(pts_time - now_time);

		}

		in_stream  = ifmt_ctx->streams[pkt.stream_index];
		out_stream = ofmt_ctx->streams[pkt.stream_index];
		/* copy packet */
		//Convert PTS/DTS
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
		pkt.pos = -1;
		//Print to Screen
		if(pkt.stream_index==videoindex){
			printf("Send %8d video frames to output URL\n",frame_index);
			frame_index++;
		}
		//ret = av_write_frame(ofmt_ctx, &pkt);
		ret = av_interleaved_write_frame(ofmt_ctx, &pkt);

		if (ret < 0) {
            char err[1024] = { 0 };
            int nRet = av_strerror(ret, err, 1024);
			printf( "Error muxing packet\n");
			break;
		}
		
		av_free_packet(&pkt);
		
	}
	//Write file trailer
	av_write_trailer(ofmt_ctx);
end:
	avformat_close_input(&ifmt_ctx);
	/* close output */
	if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);
	if (ret < 0 && ret != AVERROR_EOF) {
		printf( "Error occurred.\n");
		return -1;
	}
	return 0;
}
static int qsvscale_filter_frame(AVFilterLink *link, AVFrame *in)
{
    AVFilterContext             *ctx = link->dst;
    QSVScaleContext               *s = ctx->priv;
    AVFilterLink            *outlink = ctx->outputs[0];

    mfxSyncPoint sync = NULL;
    mfxStatus err;

    AVFrame *out = NULL;
    int ret = 0;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    do {
        err = MFXVideoVPP_RunFrameVPPAsync(s->session,
                                           (mfxFrameSurface1*)in->data[3],
                                           (mfxFrameSurface1*)out->data[3],
                                           NULL, &sync);
        if (err == MFX_WRN_DEVICE_BUSY)
            av_usleep(1);
    } while (err == MFX_WRN_DEVICE_BUSY);

    if (err < 0 || !sync) {
        av_log(ctx, AV_LOG_ERROR, "Error during scaling\n");
        ret = AVERROR_UNKNOWN;
        goto fail;
    }

    do {
        err = MFXVideoCORE_SyncOperation(s->session, sync, 1000);
    } while (err == MFX_WRN_IN_EXECUTION);
    if (err < 0) {
        av_log(ctx, AV_LOG_ERROR, "Error synchronizing the operation: %d\n", err);
        ret = AVERROR_UNKNOWN;
        goto fail;
    }

    ret = av_frame_copy_props(out, in);
    if (ret < 0)
        goto fail;

    out->width  = outlink->w;
    out->height = outlink->h;

    av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
              (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
              (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
              INT_MAX);

    av_frame_free(&in);
    return ff_filter_frame(outlink, out);
fail:
    av_frame_free(&in);
    av_frame_free(&out);
    return ret;
}
示例#9
0
static void *demux_thread(void *opaque)
{
	struct ff_demuxer *demuxer = (struct ff_demuxer *) opaque;
	int result;

	struct ff_packet packet = {0};

	if (!open_input(demuxer, &demuxer->format_context))
		goto fail;

	av_dump_format(demuxer->format_context, 0, demuxer->input, 0);

	if (!find_and_initialize_stream_decoders(demuxer))
		goto fail;

	ff_demuxer_reset(demuxer);

	while (!demuxer->abort) {
		// failed to seek (looping?)
		if (!handle_seek(demuxer))
			break;

		if (ff_decoder_full(demuxer->audio_decoder) ||
		    ff_decoder_full(demuxer->video_decoder)) {
			av_usleep(10 * 1000); // 10ms
			continue;
		}

		result = av_read_frame(demuxer->format_context, &packet.base);
		if (result < 0) {
			bool eof = false;
			if (result == AVERROR_EOF) {
				eof = true;
			} else if (demuxer->format_context->pb != NULL) {
				AVIOContext *io_context =
						demuxer->format_context->pb;
				if (io_context->error == 0) {
					av_usleep(100 * 1000); // 100ms
					continue;
				} else {
					if (io_context->eof_reached != 0)
						eof = true;
				}
			}

			if (eof) {
				if (demuxer->options.is_looping) {
					seek_beginning(demuxer);
				} else {
					break;
				}
				continue;
			} else {
				av_log(NULL, AV_LOG_ERROR,
						"av_read_frame() failed: %s",
						av_err2str(result));
				break;
			}
		}

		if (ff_decoder_accept(demuxer->video_decoder, &packet))
			continue;
		else if (ff_decoder_accept(demuxer->audio_decoder, &packet))
			continue;
		else
			av_free_packet(&packet.base);
	}
	if (demuxer->audio_decoder != NULL)
		demuxer->audio_decoder->eof = true;
	if (demuxer->video_decoder != NULL)
		demuxer->video_decoder->eof = true;
fail:
	demuxer->abort = true;

	return NULL;
}
示例#10
0
static int qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
                                const AVFrame *src)
{
    QSVFramesContext   *s = ctx->internal->priv;
    mfxFrameSurface1   in = {{ 0 }};
    mfxFrameSurface1 *out = (mfxFrameSurface1*)dst->data[3];

    mfxSyncPoint sync = NULL;
    mfxStatus err;
    int ret;

    while (!s->session_upload_init && !s->session_upload && !ret) {
#if HAVE_PTHREADS
        if (pthread_mutex_trylock(&s->session_lock) == 0) {
#endif
            if (!s->session_upload_init) {
                ret = qsv_init_internal_session(ctx, &s->session_upload, 1);
                if (s->session_upload)
                    s->session_upload_init = 1;
            }
#if HAVE_PTHREADS
            pthread_mutex_unlock(&s->session_lock);
            pthread_cond_signal(&s->session_cond);
        } else {
            pthread_mutex_lock(&s->session_lock);
            while (!s->session_upload_init && !s->session_upload) {
                pthread_cond_wait(&s->session_cond, &s->session_lock);
            }
            pthread_mutex_unlock(&s->session_lock);
        }
#endif
    }
    if (ret < 0)
        return ret;

    if (!s->session_upload) {
        if (s->child_frames_ref)
            return qsv_transfer_data_child(ctx, dst, src);

        av_log(ctx, AV_LOG_ERROR, "Surface upload not possible\n");
        return AVERROR(ENOSYS);
    }

    in.Info = out->Info;
    in.Data.PitchLow = src->linesize[0];
    in.Data.Y        = src->data[0];
    in.Data.U        = src->data[1];
    in.Data.V        = src->data[2];
    in.Data.A        = src->data[3];

    do {
        err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync);
        if (err == MFX_WRN_DEVICE_BUSY)
            av_usleep(1);
    } while (err == MFX_WRN_DEVICE_BUSY);

    if (err < 0 || !sync) {
        av_log(ctx, AV_LOG_ERROR, "Error uploading the surface\n");
        return AVERROR_UNKNOWN;
    }

    do {
        err = MFXVideoCORE_SyncOperation(s->session_upload, sync, 1000);
    } while (err == MFX_WRN_IN_EXECUTION);
    if (err < 0) {
        av_log(ctx, AV_LOG_ERROR, "Error synchronizing the operation\n");
        return AVERROR_UNKNOWN;
    }

    return 0;
}
示例#11
0
文件: dxva2.c 项目: tmm1/FFmpeg
int ff_dxva2_common_end_frame(AVCodecContext *avctx, AVFrame *frame,
                              const void *pp, unsigned pp_size,
                              const void *qm, unsigned qm_size,
                              int (*commit_bs_si)(AVCodecContext *,
                                                  DECODER_BUFFER_DESC *bs,
                                                  DECODER_BUFFER_DESC *slice))
{
    AVDXVAContext *ctx = avctx->hwaccel_context;
    unsigned               buffer_count = 0;
#if CONFIG_D3D11VA
    D3D11_VIDEO_DECODER_BUFFER_DESC buffer11[4];
#endif
#if CONFIG_DXVA2
    DXVA2_DecodeBufferDesc          buffer2[4];
#endif
    DECODER_BUFFER_DESC             *buffer = NULL, *buffer_slice = NULL;
    int result, runs = 0;
    HRESULT hr;
    unsigned type;

    do {
#if CONFIG_D3D11VA
        if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) {
            if (D3D11VA_CONTEXT(ctx)->context_mutex != INVALID_HANDLE_VALUE)
                WaitForSingleObjectEx(D3D11VA_CONTEXT(ctx)->context_mutex, INFINITE, FALSE);
            hr = ID3D11VideoContext_DecoderBeginFrame(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder,
                                                      ff_dxva2_get_surface(frame),
                                                      0, NULL);
        }
#endif
#if CONFIG_DXVA2
        if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
            hr = IDirectXVideoDecoder_BeginFrame(DXVA2_CONTEXT(ctx)->decoder,
                                                 ff_dxva2_get_surface(frame),
                                                 NULL);
#endif
        if (hr != E_PENDING || ++runs > 50)
            break;
#if CONFIG_D3D11VA
        if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD)
            if (D3D11VA_CONTEXT(ctx)->context_mutex != INVALID_HANDLE_VALUE)
                ReleaseMutex(D3D11VA_CONTEXT(ctx)->context_mutex);
#endif
        av_usleep(2000);
    } while(1);

    if (FAILED(hr)) {
        av_log(avctx, AV_LOG_ERROR, "Failed to begin frame: 0x%lx\n", hr);
#if CONFIG_D3D11VA
        if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD)
            if (D3D11VA_CONTEXT(ctx)->context_mutex != INVALID_HANDLE_VALUE)
                ReleaseMutex(D3D11VA_CONTEXT(ctx)->context_mutex);
#endif
        return -1;
    }

#if CONFIG_D3D11VA
    if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) {
        buffer = &buffer11[buffer_count];
        type = D3D11_VIDEO_DECODER_BUFFER_PICTURE_PARAMETERS;
    }
#endif
#if CONFIG_DXVA2
    if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
        buffer = &buffer2[buffer_count];
        type = DXVA2_PictureParametersBufferType;
    }
#endif
    result = ff_dxva2_commit_buffer(avctx, ctx, buffer,
                                    type,
                                    pp, pp_size, 0);
    if (result) {
        av_log(avctx, AV_LOG_ERROR,
               "Failed to add picture parameter buffer\n");
        goto end;
    }
    buffer_count++;

    if (qm_size > 0) {
#if CONFIG_D3D11VA
        if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) {
            buffer = &buffer11[buffer_count];
            type = D3D11_VIDEO_DECODER_BUFFER_INVERSE_QUANTIZATION_MATRIX;
        }
#endif
#if CONFIG_DXVA2
        if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
            buffer = &buffer2[buffer_count];
            type = DXVA2_InverseQuantizationMatrixBufferType;
        }
#endif
        result = ff_dxva2_commit_buffer(avctx, ctx, buffer,
                                        type,
                                        qm, qm_size, 0);
        if (result) {
            av_log(avctx, AV_LOG_ERROR,
                   "Failed to add inverse quantization matrix buffer\n");
            goto end;
        }
        buffer_count++;
    }

#if CONFIG_D3D11VA
    if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) {
        buffer       = &buffer11[buffer_count + 0];
        buffer_slice = &buffer11[buffer_count + 1];
    }
#endif
#if CONFIG_DXVA2
    if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
        buffer       = &buffer2[buffer_count + 0];
        buffer_slice = &buffer2[buffer_count + 1];
    }
#endif

    result = commit_bs_si(avctx,
                          buffer,
                          buffer_slice);
    if (result) {
        av_log(avctx, AV_LOG_ERROR,
               "Failed to add bitstream or slice control buffer\n");
        goto end;
    }
    buffer_count += 2;

    /* TODO Film Grain when possible */

    assert(buffer_count == 1 + (qm_size > 0) + 2);

#if CONFIG_D3D11VA
    if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD)
        hr = ID3D11VideoContext_SubmitDecoderBuffers(D3D11VA_CONTEXT(ctx)->video_context,
                                                     D3D11VA_CONTEXT(ctx)->decoder,
                                                     buffer_count, buffer11);
#endif
#if CONFIG_DXVA2
    if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
        DXVA2_DecodeExecuteParams exec = {
            .NumCompBuffers     = buffer_count,
            .pCompressedBuffers = buffer2,
            .pExtensionData     = NULL,
        };
        hr = IDirectXVideoDecoder_Execute(DXVA2_CONTEXT(ctx)->decoder, &exec);
    }
示例#12
0
文件: dxva2.c 项目: jeeb/ffmpeg
int ff_dxva2_common_end_frame(AVCodecContext *avctx, MpegEncContext *s,
                              const void *pp, unsigned pp_size,
                              const void *qm, unsigned qm_size,
                              int (*commit_bs_si)(AVCodecContext *,
                                                  DXVA2_DecodeBufferDesc *bs,
                                                  DXVA2_DecodeBufferDesc *slice))
{
    struct dxva_context *ctx = avctx->hwaccel_context;
    unsigned               buffer_count = 0;
    DXVA2_DecodeBufferDesc buffer[4];
    DXVA2_DecodeExecuteParams exec = { 0 };
    int      result, trys = 0;
    HRESULT  hr;

again:
    if (FAILED(hr = IDirectXVideoDecoder_BeginFrame(ctx->decoder,
                                               ff_dxva2_get_surface(s->current_picture_ptr),
                                               NULL))) {
        if (hr == E_PENDING && trys < MAX_PENDING_RETRY) {
            av_log(avctx, AV_LOG_DEBUG, "Failed to begin frame, operation pending\n");
            trys++;
            av_usleep(1000);
            goto again;
        }
        av_log(avctx, AV_LOG_ERROR, "Failed to begin frame (hr: %lX)\n", hr);
        return -1;
    }

    result = ff_dxva2_commit_buffer(avctx, ctx, &buffer[buffer_count],
                                    DXVA2_PictureParametersBufferType,
                                    pp, pp_size, 0);
    if (result) {
        av_log(avctx, AV_LOG_ERROR,
               "Failed to add picture parameter buffer\n");
        goto end;
    }
    buffer_count++;

    if (qm_size > 0) {
        result = ff_dxva2_commit_buffer(avctx, ctx, &buffer[buffer_count],
                                        DXVA2_InverseQuantizationMatrixBufferType,
                                        qm, qm_size, 0);
        if (result) {
            av_log(avctx, AV_LOG_ERROR,
                   "Failed to add inverse quantization matrix buffer\n");
            goto end;
        }
        buffer_count++;
    }

    result = commit_bs_si(avctx,
                          &buffer[buffer_count + 0],
                          &buffer[buffer_count + 1]);
    if (result) {
        av_log(avctx, AV_LOG_ERROR,
               "Failed to add bitstream or slice control buffer\n");
        goto end;
    }
    buffer_count += 2;

    /* TODO Film Grain when possible */

    assert(buffer_count == 1 + (qm_size > 0) + 2);

    exec.NumCompBuffers      = buffer_count;
    exec.pCompressedBuffers  = buffer;
    exec.pExtensionData      = NULL;
    if (FAILED(IDirectXVideoDecoder_Execute(ctx->decoder, &exec))) {
        av_log(avctx, AV_LOG_ERROR, "Failed to execute\n");
        result = -1;
    }

end:
    if (FAILED(IDirectXVideoDecoder_EndFrame(ctx->decoder, NULL))) {
        av_log(avctx, AV_LOG_ERROR, "Failed to end frame\n");
        result = -1;
    }

    if (!result)
        ff_draw_horiz_band(s, 0, s->avctx->height);
    return result;
}
示例#13
0
文件: gdigrab.c 项目: 26mansi/FFmpeg
/**
 * Grabs a frame from gdi (public device demuxer API).
 *
 * @param s1 Context from avformat core
 * @param pkt Packet holding the grabbed frame
 * @return frame size in bytes
 */
static int gdigrab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
    struct gdigrab *gdigrab = s1->priv_data;

    HDC        dest_hdc   = gdigrab->dest_hdc;
    HDC        source_hdc = gdigrab->source_hdc;
    RECT       clip_rect  = gdigrab->clip_rect;
    AVRational time_base  = gdigrab->time_base;
    int64_t    time_frame = gdigrab->time_frame;

    BITMAPFILEHEADER bfh;
    int file_size = gdigrab->header_size + gdigrab->frame_size;

    int64_t curtime, delay;

    /* Calculate the time of the next frame */
    time_frame += INT64_C(1000000);

    /* Run Window message processing queue */
    if (gdigrab->show_region)
        gdigrab_region_wnd_update(s1, gdigrab);

    /* wait based on the frame rate */
    for (;;) {
        curtime = av_gettime();
        delay = time_frame * av_q2d(time_base) - curtime;
        if (delay <= 0) {
            if (delay < INT64_C(-1000000) * av_q2d(time_base)) {
                time_frame += INT64_C(1000000);
            }
            break;
        }
        if (s1->flags & AVFMT_FLAG_NONBLOCK) {
            return AVERROR(EAGAIN);
        } else {
            av_usleep(delay);
        }
    }

    if (av_new_packet(pkt, file_size) < 0)
        return AVERROR(ENOMEM);
    pkt->pts = curtime;

    /* Blit screen grab */
    if (!BitBlt(dest_hdc, 0, 0,
                clip_rect.right - clip_rect.left,
                clip_rect.bottom - clip_rect.top,
                source_hdc,
                clip_rect.left, clip_rect.top, SRCCOPY | CAPTUREBLT)) {
        WIN32_API_ERROR("Failed to capture image");
        return AVERROR(EIO);
    }
    if (gdigrab->draw_mouse)
        paint_mouse_pointer(s1, gdigrab);

    /* Copy bits to packet data */

    bfh.bfType = 0x4d42; /* "BM" in little-endian */
    bfh.bfSize = file_size;
    bfh.bfReserved1 = 0;
    bfh.bfReserved2 = 0;
    bfh.bfOffBits = gdigrab->header_size;

    memcpy(pkt->data, &bfh, sizeof(bfh));

    memcpy(pkt->data + sizeof(bfh), &gdigrab->bmi.bmiHeader, sizeof(gdigrab->bmi.bmiHeader));

    if (gdigrab->bmi.bmiHeader.biBitCount <= 8)
        GetDIBColorTable(dest_hdc, 0, 1 << gdigrab->bmi.bmiHeader.biBitCount,
                (RGBQUAD *) (pkt->data + sizeof(bfh) + sizeof(gdigrab->bmi.bmiHeader)));

    memcpy(pkt->data + gdigrab->header_size, gdigrab->buffer, gdigrab->frame_size);

    gdigrab->time_frame = time_frame;

    return gdigrab->header_size + gdigrab->frame_size;
}
示例#14
0
文件: fwServer.cpp 项目: huangyt/FLOW
int main(int argc, char **argv)
{
    /*
     int i;
     char    b[40];
     char    c[21];
     HANDLE hConsole;
     int k;
     
     #ifdef _DEBUG
     _CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
     #endif
     
     for(i = 0; i < sizeof(b); ++i )
     {
     b[i] = ' ';
     }
     b[255] = '\0';
     
     system("color 5");
     for(i = 0; i < 20; ++i) {
     c[i] = '>';
     c[i+1] = '\0';
     printf("Progress |%s%*s|\r",c,19-i,&"");
     Sleep(100);
     //printf("%s\r", b);
     }
     printf("\n");
     printf("sizeof(structa_t) = %d\n", sizeof(structa_t));
     printf("sizeof(structb_t) = %d\n", sizeof(structb_t));
     printf("sizeof(structc_t) = %d\n", sizeof(structc_t));
     printf("sizeof(structd_t) = %d\n", AV_TIME_BASE);
     
     hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
     
     // you can loop k higher to see more color choices
     for(k = 1; k < 255; k++)
     {
     // pick the colorattribute k you want
     SetConsoleTextAttribute(hConsole, k);
     printf("%d I want to be nice today!",k);
     }*/
    
    AVFormatContext             *in_ctx = NULL, *out_ctx = NULL;
    AVInputFormat               *file_iformat = NULL;
    AVOutputFormat              *out_fmt = NULL;
    AVFrame                     *frame = NULL, *frameRGB = NULL;
    AVStream                    *st = NULL;
    AVCodecContext              *codec_ctx = NULL, *pCodecCtx = NULL;
    AVCodec                     *codec = NULL, *pCodec = NULL;
    AVCodec                     dummy_codec = {0};
    AVPacket                    pkt, p;
    AVBitStreamFilterContext    *bsf = NULL;
    struct SwsContext           *sws_ctx = NULL;
    BOOL                        tom = TRUE;
    char                        b[1024];
    int                         err, i, ret, frameFinished, numBytes;
    const char                  *src_filename = "final.mp4";
    int64_t                     timestamp;
    uint8_t                     buf[128];
    uint8_t                     *buffer = NULL;
    int                         video_stream_idx = -1;
    int                         audio_stream_idx = -1;
    FILE*                       sdp_file;
    
#ifdef _DEBUG
    _CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
#endif
    
    /* register all formats and codecs */
    av_register_all();
    avformat_network_init();
    av_log_set_level(AV_LOG_DEBUG);

    /* open input file, and allocate format context */
    ret = avformat_open_input(&in_ctx, src_filename, NULL, NULL);
    if (ret < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        PAUSE_EXIT(1);
    }
    in_ctx->flags |= AVFMT_FLAG_GENPTS;

    ret = avformat_find_stream_info(in_ctx, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", src_filename);
        avformat_close_input(&in_ctx);
        PAUSE_EXIT(1);
    }
    
    av_dump_format(in_ctx, 0, src_filename, 0);
    
    for (i = 0; i < in_ctx->nb_streams; i++) {
        AVStream        *st_ptr;
        AVCodecContext  *coctx_ptr;
        
        if (in_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
	  if (in_ctx->streams[i]->codec->codec_id == CODEC_ID_MPEG4) {
	      bsf = av_bitstream_filter_init("dump_extra");
	  } else if (in_ctx->streams[i]->codec->codec_id == CODEC_ID_H264) {
	      fprintf(stderr, "Found h264 Stream\n");
	      bsf = av_bitstream_filter_init("h264_mp4toannexb");
	  } else {
	      bsf = NULL;
	  }
	  pCodecCtx=in_ctx->streams[i]->codec;
	  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	  if(pCodec==NULL) {
	      fprintf(stderr, "Unsupported codec!\n");
	      return -1; // Codec not found
	  }
	  // Open codec
	  if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
	      return -1; // Could not open codec
	  
	  out_ctx = avformat_alloc_context();
	  out_fmt = av_guess_format("rtp", NULL, NULL);
	  if (!out_fmt) {
	      fprintf(stderr, "Unable for find the RTP format for output\n");
	      avformat_close_input(&in_ctx);
	      PAUSE_EXIT(1);
	  }
	  out_ctx->oformat = out_fmt;
	  //out_ctx->flags |= AVFMT_FLAG_NONBLOCK;
	  
	  st = avformat_new_stream(out_ctx, 0);
	  if (!st) {
	      fprintf(stderr, "Cannot allocate stream\n");
	      avformat_close_input(&in_ctx);
	      PAUSE_EXIT(1);
	  }
	  
	  dummy_codec.type = in_ctx->streams[i]->codec->codec_type;
	  codec_ctx = st->codec;
	  avcodec_get_context_defaults3(codec_ctx, &dummy_codec);
	  avcodec_open2(codec_ctx, NULL, NULL);
	  codec_ctx->codec_type = in_ctx->streams[i]->codec->codec_type;
	  
	  /* FIXME: global headers stuff... */
	  
	  snprintf(out_ctx->filename, sizeof(out_ctx->filename), "rtp://%s:%d", "127.0.0.1", 55444);
	  
	  /* open the UDP sockets for RTP and RTCP */
	  if (!software_streaming) {
	      printf("Distant Connection\n");
	      ret = avio_open(&out_ctx->pb, out_ctx->filename, AVIO_FLAG_WRITE);
	      if (ret < 0) {
		fprintf(stderr, "Cannot open '%s'\n", out_ctx->filename);
		avformat_close_input(&in_ctx);
		PAUSE_EXIT(1);
	      }
	  } else {
	      ret = avio_open_dyn_buf(&out_ctx->pb);
	      out_ctx->pb->max_packet_size = 1460;
	      printf("MAX packet size = %d\n",out_ctx->pb->max_packet_size);
	  }
	  st_ptr = in_ctx->streams[i];
	  coctx_ptr = st_ptr->codec;
	  
	  codec_ctx->codec_id = coctx_ptr->codec_id;
	  codec_ctx->codec_type = coctx_ptr->codec_type;
	  
	  if(!codec_ctx->codec_tag) {
	      codec_ctx->codec_tag = coctx_ptr->codec_tag;
	  }
	  codec_ctx->bit_rate = coctx_ptr->bit_rate;
	  printf("\n\n\n\nFIRE!!!!! %d %d\n\n\n\n", codec_ctx->profile, codec_ctx->level);
	  if(coctx_ptr->extradata_size) {
	      codec_ctx->extradata = (uint8_t*)av_malloc(coctx_ptr->extradata_size);
	      memcpy(codec_ctx->extradata, coctx_ptr->extradata, coctx_ptr->extradata_size);
	  } else {
	      codec_ctx->extradata = NULL;
	  }
	  
	  codec_ctx->extradata_size = coctx_ptr->extradata_size;
	  /* FIXME: ExtraData ??? */
	  if (codec_ctx->codec_id == CODEC_ID_H264) {
	      printf("BINGO\n");
	      extradata_convert(codec_ctx);
	  }
	  
	  if(out_ctx->oformat->flags & AVFMT_GLOBALHEADER)
	      codec_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
	  
	  if(av_q2d(coctx_ptr->time_base) > av_q2d(st_ptr->time_base) && av_q2d(st_ptr->time_base) < 1.0/1000) {
	      codec_ctx->time_base = coctx_ptr->time_base;
	  } else {
	      codec_ctx->time_base = st_ptr->time_base;
	  }
	  
	  switch(codec_ctx->codec_type) {
	      case AVMEDIA_TYPE_AUDIO:
		codec_ctx->sample_rate = coctx_ptr->sample_rate;
		codec_ctx->time_base.den = 1;
		codec_ctx->time_base.num = coctx_ptr->sample_rate;
		codec_ctx->channels = coctx_ptr->channels;
		codec_ctx->frame_size = coctx_ptr->frame_size;
		codec_ctx->block_align= coctx_ptr->block_align;
		
		break;
	      case AVMEDIA_TYPE_VIDEO:
		//printf("Pixel Format %d\n", coctx_ptr->pix_fmt);
		codec_ctx->pix_fmt = coctx_ptr->pix_fmt;
		codec_ctx->width = coctx_ptr->width;
		codec_ctx->height = coctx_ptr->height;
		codec_ctx->has_b_frames = coctx_ptr->has_b_frames;
		
		break;
	      default:
		fprintf(stderr, "Strange Codec Type %d\n", codec_ctx->codec_type);
		PAUSE_EXIT(1);
	  }
	  
	  
	  ret = avformat_write_header(out_ctx, NULL);
	  if (ret < 0) {
	      fprintf(stderr, "Cannot Initialize output stream %d\n", i);
	      //close_output(rtp_c->out_s[i]);
	      
	      continue;
	  }
	  av_dump_format(out_ctx, i, out_ctx->filename, 1);
        }
    }
    
    frame = avcodec_alloc_frame();
    frameRGB = avcodec_alloc_frame();
    
    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
			  pCodecCtx->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
    printf("Allocated %d", numBytes);
    
    sws_ctx = sws_getContext (
     pCodecCtx->width,
     pCodecCtx->height,
     pCodecCtx->pix_fmt,
     pCodecCtx->width,
     pCodecCtx->height,
     PIX_FMT_RGB24,
     SWS_BILINEAR,
     NULL,
     NULL,
     NULL
     );
    
    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)frameRGB, buffer, PIX_FMT_RGB24,
		 pCodecCtx->width, pCodecCtx->height);
    
    av_sdp_create(&out_ctx,1,b,1024);
    printf("SDP : \n%s", b);
    sdp_file = fopen("rtp.sdp","w");
    fprintf(sdp_file, "%s",b);
    fclose(sdp_file);

    i = 0;
    av_init_packet(&pkt);
    av_init_packet(&p);
    printf("Payload Size %d\n", *(uint8_t *)out_ctx->streams[0]->codec->extradata != 1);
    while (av_read_frame(in_ctx, &pkt) >= 0) {
        if (pkt.stream_index == 0) {
	  int res;
	  uint8_t *ptr;
	  uint16_t ptr16;
	  
	  if (avcodec_decode_video2(pCodecCtx, frame, &frameFinished, &pkt) < 0 ) {
	      fprintf(stderr, "Error decoding packet\n");
	  }
	   
	  /* if(frameFinished) {
	  // Convert the image from its native format to RGB
	  sws_scale
	  (
	  sws_ctx,
	  (uint8_t const * const *)frame->data,
	  frame->linesize,
	  0,
	  pCodecCtx->height,
	  frameRGB->data,
	  frameRGB->linesize
	  );
	   
	  // Save the frame to disk
	  if(++i<=5)
	      SaveFrame(frameRGB, pCodecCtx->width, pCodecCtx->height, i);
	  }*/
	  printf("PTS %lld DTS%lld\n",pkt.pts,pkt.dts);
	  printf("Got frame %s %d %s\n",STRING_BOOL(frameFinished), pkt.size, STRING_BOOL(pkt.flags & AV_PKT_FLAG_KEY));
	  //break;
	  /*ret = av_bitstream_filter_filter(bsf,
	   in_ctx->streams[pkt.stream_index]->codec,
	   NULL, &p.data, &p.size,
	   pkt.data, pkt.size, pkt.flags & AV_PKT_FLAG_KEY);
	   if(ret > 0) {
	   av_free_packet(&pkt);
	   p.destruct = av_destruct_packet;
	   } else if (ret < 0) {
	   fprintf(stderr, "%s failed for stream %d, codec %s: ",
	   bsf->filter->name,
	   pkt.stream_index,
	   in_ctx->streams[pkt.stream_index]->codec->codec->name);
	   fprintf(stderr, "%d\n", ret);
	   }
	   pkt = p;*/
	  
	  stream_convert(&pkt);
	  printf("pkt size %d %d\n",pkt.size, pkt.flags);
	  av_usleep(4000000);
	  
	  if (av_write_frame(out_ctx, &pkt) < 0)
	      printf("MITSOS eisai!!!!\n");
	  
	  int written_size = avio_close_dyn_buf(out_ctx->pb,&ptr);
	  printf("Written Size %d\n", written_size);
	  ((uint8_t*)&ptr16)[0] = *(ptr+2);
	  ((uint8_t*)&ptr16)[1] = *(ptr+3);
	  printf("CC adsasd%d\n", ptr16 );
	  printByte(ptr);
	  printByte(ptr+1);
	  //printf("Second Byte %d\n", *(ptr+1));
	  
	  parseStream(ptr, written_size);
	  
	  printf("Version %d\n",(*(ptr) & 0xC0) >> 6);
	  printf("Padding %d\n",(*(ptr) & 0x20) >  0);
	  printf("Ext %d\n",(*(ptr) & 0x10) >  0);
	  printf("CC %d\n",(*(ptr) & 0xF));
	  printf("Marker %d\n",(*(ptr+1) & 0x80) > 0);
	  printf("Type %u\n",(*(ptr+1)));
	  printf("Seq %d\n",(*((uint16_t*)((uint8_t*)ptr+2))));
	  ret = avio_open_dyn_buf(&out_ctx->pb);
	  out_ctx->pb->max_packet_size = 1514;
        }
        
        av_free_packet(&pkt);
    }
示例#15
0
文件: kmsgrab.c 项目: DeHackEd/FFmpeg
static int kmsgrab_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
    KMSGrabContext *ctx = avctx->priv_data;
    drmModePlane *plane;
    drmModeFB *fb;
    AVDRMFrameDescriptor *desc;
    AVFrame *frame;
    int64_t now;
    int err, fd;

    now = av_gettime();
    if (ctx->frame_last) {
        int64_t delay;
        while (1) {
            delay = ctx->frame_last + ctx->frame_delay - now;
            if (delay <= 0)
                break;
            av_usleep(delay);
            now = av_gettime();
        }
    }
    ctx->frame_last = now;

    plane = drmModeGetPlane(ctx->hwctx->fd, ctx->plane_id);
    if (!plane) {
        av_log(avctx, AV_LOG_ERROR, "Failed to get plane "
               "%"PRIu32".\n", ctx->plane_id);
        return AVERROR(EIO);
    }
    if (!plane->fb_id) {
        av_log(avctx, AV_LOG_ERROR, "Plane %"PRIu32" no longer has "
               "an associated framebuffer.\n", ctx->plane_id);
        return AVERROR(EIO);
    }

    fb = drmModeGetFB(ctx->hwctx->fd, plane->fb_id);
    if (!fb) {
        av_log(avctx, AV_LOG_ERROR, "Failed to get framebuffer "
               "%"PRIu32".\n", plane->fb_id);
        return AVERROR(EIO);
    }
    if (fb->width != ctx->width || fb->height != ctx->height) {
        av_log(avctx, AV_LOG_ERROR, "Plane %"PRIu32" framebuffer "
               "dimensions changed: now %"PRIu32"x%"PRIu32".\n",
               ctx->plane_id, fb->width, fb->height);
        return AVERROR(EIO);
    }
    if (!fb->handle) {
        av_log(avctx, AV_LOG_ERROR, "No handle set on framebuffer.\n");
        return AVERROR(EIO);
    }

    err = drmPrimeHandleToFD(ctx->hwctx->fd, fb->handle, O_RDONLY, &fd);
    if (err < 0) {
        err = errno;
        av_log(avctx, AV_LOG_ERROR, "Failed to get PRIME fd from "
               "framebuffer handle: %s.\n", strerror(errno));
        return AVERROR(err);
    }

    desc = av_mallocz(sizeof(*desc));
    if (!desc)
        return AVERROR(ENOMEM);

    *desc = (AVDRMFrameDescriptor) {
        .nb_objects = 1,
        .objects[0] = {
            .fd               = fd,
            .size             = fb->height * fb->pitch,
            .format_modifier  = ctx->drm_format_modifier,
        },
        .nb_layers = 1,
        .layers[0] = {
            .format           = ctx->drm_format,
            .nb_planes        = 1,
            .planes[0] = {
                .object_index = 0,
                .offset       = 0,
                .pitch        = fb->pitch,
            },
        },
    };
示例#16
0
文件: qsvdec.c 项目: Cxuef/FFmpeg-1
int ff_qsv_decode(AVCodecContext *avctx, QSVContext *q,
                  AVFrame *frame, int *got_frame,
                  AVPacket *avpkt)
{
    mfxFrameSurface1 *insurf;
    mfxFrameSurface1 *outsurf;
    mfxSyncPoint sync;
    mfxBitstream bs = { { { 0 } } };
    int ret;

    if (avpkt->size) {
        bs.Data       = avpkt->data;
        bs.DataLength = avpkt->size;
        bs.MaxLength  = bs.DataLength;
        bs.TimeStamp  = avpkt->pts;
    }

    do {
        ret = get_surface(avctx, q, &insurf);
        if (ret < 0)
            return ret;

        ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
                                              insurf, &outsurf, &sync);
        if (ret == MFX_WRN_DEVICE_BUSY)
            av_usleep(1);

    } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);

    if (ret != MFX_ERR_NONE &&
        ret != MFX_ERR_MORE_DATA &&
        ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
        ret != MFX_ERR_MORE_SURFACE) {
        av_log(avctx, AV_LOG_ERROR, "Error during QSV decoding.\n");
        return ff_qsv_error(ret);
    }

    if (sync) {
        AVFrame *src_frame;

        MFXVideoCORE_SyncOperation(q->session, sync, 60000);

        src_frame = find_frame(q, outsurf);
        if (!src_frame) {
            av_log(avctx, AV_LOG_ERROR,
                   "The returned surface does not correspond to any frame\n");
            return AVERROR_BUG;
        }

        ret = av_frame_ref(frame, src_frame);
        if (ret < 0)
            return ret;

        frame->pkt_pts = frame->pts = outsurf->Data.TimeStamp;

        frame->repeat_pict =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
        frame->top_field_first =
            outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
        frame->interlaced_frame =
            !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);

        *got_frame = 1;
    }

    return bs.DataOffset;
}
示例#17
0
文件: hls.c 项目: vriera/libav
static int read_data(void *opaque, uint8_t *buf, int buf_size)
{
    struct variant *v = opaque;
    HLSContext *c = v->parent->priv_data;
    int ret, i;

restart:
    if (!v->input) {
        /* If this is a live stream and the reload interval has elapsed since
         * the last playlist reload, reload the variant playlists now. */
        int64_t reload_interval = v->n_segments > 0 ?
                                  v->segments[v->n_segments - 1]->duration :
                                  v->target_duration;

reload:
        if (!v->finished &&
            av_gettime_relative() - v->last_load_time >= reload_interval) {
            if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
                return ret;
            /* If we need to reload the playlist again below (if
             * there's still no more segments), switch to a reload
             * interval of half the target duration. */
            reload_interval = v->target_duration / 2;
        }
        if (v->cur_seq_no < v->start_seq_no) {
            av_log(NULL, AV_LOG_WARNING,
                   "skipping %d segments ahead, expired from playlists\n",
                   v->start_seq_no - v->cur_seq_no);
            v->cur_seq_no = v->start_seq_no;
        }
        if (v->cur_seq_no >= v->start_seq_no + v->n_segments) {
            if (v->finished)
                return AVERROR_EOF;
            while (av_gettime_relative() - v->last_load_time < reload_interval) {
                if (ff_check_interrupt(c->interrupt_callback))
                    return AVERROR_EXIT;
                av_usleep(100*1000);
            }
            /* Enough time has elapsed since the last reload */
            goto reload;
        }

        ret = open_input(v);
        if (ret < 0)
            return ret;
    }
    ret = ffurl_read(v->input, buf, buf_size);
    if (ret > 0)
        return ret;
    ffurl_close(v->input);
    v->input = NULL;
    v->cur_seq_no++;

    c->end_of_segment = 1;
    c->cur_seq_no = v->cur_seq_no;

    if (v->ctx && v->ctx->nb_streams &&
        v->parent->nb_streams >= v->stream_offset + v->ctx->nb_streams) {
        v->needed = 0;
        for (i = v->stream_offset; i < v->stream_offset + v->ctx->nb_streams;
             i++) {
            if (v->parent->streams[i]->discard < AVDISCARD_ALL)
                v->needed = 1;
        }
    }
    if (!v->needed) {
        av_log(v->parent, AV_LOG_INFO, "No longer receiving variant %d\n",
               v->index);
        return AVERROR_EOF;
    }
    goto restart;
}
示例#18
0
static int hls_read(URLContext *h, uint8_t *buf, int size)
{
    HLSContext *s = h->priv_data;
    const char *url;
    int ret;
    int64_t reload_interval;

start:
    if (s->seg_hd) {
        ret = ffurl_read(s->seg_hd, buf, size);
        if (ret > 0)
            return ret;
    }
    if (s->seg_hd) {
        ffurl_close(s->seg_hd);
        s->seg_hd = NULL;
        s->cur_seq_no++;
    }
    reload_interval = s->n_segments > 0 ?
                      s->segments[s->n_segments - 1]->duration :
                      s->target_duration;
retry:
    if (!s->finished) {
        int64_t now = av_gettime_relative();
        if (now - s->last_load_time >= reload_interval) {
            if ((ret = parse_playlist(h, s->playlisturl)) < 0)
                return ret;
            /* If we need to reload the playlist again below (if
             * there's still no more segments), switch to a reload
             * interval of half the target duration. */
            reload_interval = s->target_duration / 2;
        }
    }
    if (s->cur_seq_no < s->start_seq_no) {
        av_log(h, AV_LOG_WARNING,
               "skipping %d segments ahead, expired from playlist\n",
               s->start_seq_no - s->cur_seq_no);
        s->cur_seq_no = s->start_seq_no;
    }
    if (s->cur_seq_no - s->start_seq_no >= s->n_segments) {
        if (s->finished)
            return AVERROR_EOF;
        while (av_gettime_relative() - s->last_load_time < reload_interval) {
            if (ff_check_interrupt(&h->interrupt_callback))
                return AVERROR_EXIT;
            av_usleep(100*1000);
        }
        goto retry;
    }
    url = s->segments[s->cur_seq_no - s->start_seq_no]->url;
    av_log(h, AV_LOG_DEBUG, "opening %s\n", url);
    ret = ffurl_open_whitelist(&s->seg_hd, url, AVIO_FLAG_READ,
                               &h->interrupt_callback, NULL,
                               h->protocol_whitelist, h->protocol_blacklist, h);
    if (ret < 0) {
        if (ff_check_interrupt(&h->interrupt_callback))
            return AVERROR_EXIT;
        av_log(h, AV_LOG_WARNING, "Unable to open %s\n", url);
        s->cur_seq_no++;
        goto retry;
    }
    goto start;
}
示例#19
0
int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q,
                  AVPacket *pkt, const AVFrame *frame, int *got_packet)
{
    AVPacket new_pkt = { 0 };
    mfxBitstream *bs;

    mfxFrameSurface1 *surf = NULL;
    mfxSyncPoint sync      = NULL;
    int ret;

    if (frame) {
        ret = submit_frame(q, frame, &surf);
        if (ret < 0) {
            av_log(avctx, AV_LOG_ERROR, "Error submitting the frame for encoding.\n");
            return ret;
        }
    }

    ret = av_new_packet(&new_pkt, q->packet_size);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error allocating the output packet\n");
        return ret;
    }

    bs = av_mallocz(sizeof(*bs));
    if (!bs) {
        av_packet_unref(&new_pkt);
        return AVERROR(ENOMEM);
    }
    bs->Data      = new_pkt.data;
    bs->MaxLength = new_pkt.size;

    do {
        ret = MFXVideoENCODE_EncodeFrameAsync(q->session, NULL, surf, bs, &sync);
        if (ret == MFX_WRN_DEVICE_BUSY) {
            av_usleep(500);
            continue;
        }
        break;
    } while ( 1 );

    if (ret < 0) {
        av_packet_unref(&new_pkt);
        av_freep(&bs);
        if (ret == MFX_ERR_MORE_DATA)
            return 0;
        av_log(avctx, AV_LOG_ERROR, "EncodeFrameAsync returned %d\n", ret);
        return ff_qsv_error(ret);
    }

    if (ret == MFX_WRN_INCOMPATIBLE_VIDEO_PARAM) {
        if (frame->interlaced_frame)
            print_interlace_msg(avctx, q);
        else
            av_log(avctx, AV_LOG_WARNING,
                   "EncodeFrameAsync returned 'incompatible param' code\n");
    }
    if (sync) {
        av_fifo_generic_write(q->async_fifo, &new_pkt, sizeof(new_pkt), NULL);
        av_fifo_generic_write(q->async_fifo, &sync,    sizeof(sync),    NULL);
        av_fifo_generic_write(q->async_fifo, &bs,      sizeof(bs),    NULL);
    } else {
        av_packet_unref(&new_pkt);
        av_freep(&bs);
    }

    if (!av_fifo_space(q->async_fifo) ||
        (!frame && av_fifo_size(q->async_fifo))) {
        av_fifo_generic_read(q->async_fifo, &new_pkt, sizeof(new_pkt), NULL);
        av_fifo_generic_read(q->async_fifo, &sync,    sizeof(sync),    NULL);
        av_fifo_generic_read(q->async_fifo, &bs,      sizeof(bs),      NULL);

        MFXVideoCORE_SyncOperation(q->session, sync, 60000);

        new_pkt.dts  = av_rescale_q(bs->DecodeTimeStamp, (AVRational){1, 90000}, avctx->time_base);
        new_pkt.pts  = av_rescale_q(bs->TimeStamp,       (AVRational){1, 90000}, avctx->time_base);
        new_pkt.size = bs->DataLength;

        if (bs->FrameType & MFX_FRAMETYPE_IDR ||
            bs->FrameType & MFX_FRAMETYPE_xIDR)
            new_pkt.flags |= AV_PKT_FLAG_KEY;

#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
        if (bs->FrameType & MFX_FRAMETYPE_I || bs->FrameType & MFX_FRAMETYPE_xI)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
        else if (bs->FrameType & MFX_FRAMETYPE_P || bs->FrameType & MFX_FRAMETYPE_xP)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
        else if (bs->FrameType & MFX_FRAMETYPE_B || bs->FrameType & MFX_FRAMETYPE_xB)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
FF_ENABLE_DEPRECATION_WARNINGS
#endif

        av_freep(&bs);

        if (pkt->data) {
            if (pkt->size < new_pkt.size) {
                av_log(avctx, AV_LOG_ERROR, "Submitted buffer not large enough: %d < %d\n",
                       pkt->size, new_pkt.size);
                av_packet_unref(&new_pkt);
                return AVERROR(EINVAL);
            }

            memcpy(pkt->data, new_pkt.data, new_pkt.size);
            pkt->size = new_pkt.size;

            ret = av_packet_copy_props(pkt, &new_pkt);
            av_packet_unref(&new_pkt);
            if (ret < 0)
                return ret;
        } else
            *pkt = new_pkt;

        *got_packet = 1;
    }
示例#20
0
int main(int argc, char **argv)
{
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
	AVDictionary *out_opts = NULL;
    AVPacket pkt;
    const char *fmt, *in_filename, *out_filename;
    int ret, i, cmp;
	int cnt = 0;

    if (argc < 4) {
        printf("usage: remuxing input output fmt [do_sff do_rate_emu]\n");
        return 1;
    }
	if(argc >= 5){
		do_sff = atoi(argv[4]);
	}
	if(argc >= 6){
		do_rate_emu = atoi(argv[5]);
	}

    in_filename  = argv[1];
    out_filename = argv[2];
	fmt = argv[3];

    av_register_all();
    avformat_network_init();

    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }

    av_dump_format(ifmt_ctx, 0, in_filename, 0);

    avformat_alloc_output_context2(&ofmt_ctx, NULL, fmt, out_filename);
    if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    ofmt = ofmt_ctx->oformat;

    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
		
		avpriv_set_pts_info(out_stream, in_stream->pts_wrap_bits, in_stream->time_base.num, in_stream->time_base.den);
        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

	if(out_filename && strstr(out_filename, ".m3u8")){
		av_opt_set_int(ofmt_ctx, "hls_wrap",  6, AV_OPT_SEARCH_CHILDREN);
		av_opt_set_int(ofmt_ctx, "hls_list_size",  6, AV_OPT_SEARCH_CHILDREN); 
		av_opt_set(ofmt_ctx, "hls_time", "1.0", AV_OPT_SEARCH_CHILDREN);
	}

    av_dump_format(ofmt_ctx, 0, out_filename, 1);

    if (!(ofmt->flags & AVFMT_NOFILE)) {
		av_dict_set(&out_opts, "chunked_post", "0", 0);	
        ret = avio_open2(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE|AVIO_FLAG_NONBLOCK, NULL, &out_opts);

        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }

    ret = ofmt_ctx->pb && do_sff ? sff_write_header(ofmt_ctx) : avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }
	if(ofmt_ctx->pb){
		avio_flush(ofmt_ctx->pb);
	}

    while (1) {
        AVStream *in_stream, *out_stream;
		ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;

        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];
		
		i = pkt.stream_index;
		if(curr_dts[i] == AV_NOPTS_VALUE && pkt.dts != AV_NOPTS_VALUE){
			first_dts[i] = pkt.dts;
			start_time[i] = av_gettime_relative();
		}
		if(pkt.dts != AV_NOPTS_VALUE){	
			curr_dts[i] = pkt.dts; //us
		}

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
		
		ret = ofmt_ctx->pb && do_sff ? sff_write_packet(ofmt_ctx, &pkt) : av_interleaved_write_frame(ofmt_ctx, &pkt); 
		if(ofmt_ctx->pb){
			avio_flush(ofmt_ctx->pb);
		}
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);
		++cnt;
		//printf("cnt %d\t", cnt);
		
		do{
			curr_time[i] = av_gettime_relative();
			cmp = av_compare_ts(curr_dts[i] - first_dts[i], in_stream->time_base, 
					curr_time[i] - start_time[i], AV_TIME_BASE_Q); 
			if(!do_rate_emu || cmp <= 0)break;
			
			av_usleep(10000);
		}while(cmp > 0);
		
    }
	
	ofmt_ctx->pb && do_sff ? sff_write_packet(ofmt_ctx, NULL) : av_write_trailer(ofmt_ctx); 
end:

    avformat_close_input(&ifmt_ctx);

    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE) && ofmt_ctx->pb){
        avio_close(ofmt_ctx->pb);
		av_dict_free(&out_opts);
	}
    avformat_free_context(ofmt_ctx);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }
	
	printf("end of remux\n");
    return 0;
}