Esempio n. 1
0
static int ff_libde265dec_decode(AVCodecContext *avctx,
                                 void *data, int *got_frame, AVPacket *avpkt)
{
    DE265Context *ctx = (DE265Context *) avctx->priv_data;
    AVFrame *picture = (AVFrame *) data;
    const struct de265_image *img;
    de265_error err;
    int ret;
    int64_t pts;
#if LIBDE265_NUMERIC_VERSION >= 0x00050000
    int more = 0;
#endif

    const uint8_t* src[4];
    int stride[4];

    if (avpkt->size > 0) {
        if (avpkt->pts != AV_NOPTS_VALUE) {
            pts = avpkt->pts;
        } else {
            pts = avctx->reordered_opaque;
        }

        // replace 4-byte length fields with NAL start codes
        uint8_t* avpkt_data = avpkt->data;
        uint8_t* avpkt_end = avpkt->data + avpkt->size;
        while (avpkt_data + 4 <= avpkt_end) {
            int nal_size = AV_RB32(avpkt_data);
#if LIBDE265_NUMERIC_VERSION < 0x00050000
            AV_WB32(avpkt_data, 0x00000001);
#else
            err = de265_push_NAL(ctx->decoder, avpkt_data + 4, nal_size, pts, NULL);
            if (err != DE265_OK) {
                const char *error = de265_get_error_text(err);
                av_log(avctx, AV_LOG_ERROR, "Failed to push data: %s\n", error);
                return AVERROR_INVALIDDATA;
            }
#endif
            avpkt_data += 4 + nal_size;
        }
#if LIBDE265_NUMERIC_VERSION >= 0x00050000
    } else {
        de265_flush_data(ctx->decoder);
#endif
    }

#if LIBDE265_NUMERIC_VERSION < 0x00050000
    // insert input packet PTS into sorted queue
    if (ctx->pts_queue_len < DE265_MAX_PTS_QUEUE) {
        int pos=0;
        while (ctx->pts_queue[pos] < pts &&
               pos<ctx->pts_queue_len) {
            pos++;
        }

        if (pos < ctx->pts_queue_len) {
            memmove(&ctx->pts_queue[pos+1], &ctx->pts_queue[pos],
                sizeof(int64_t) * (ctx->pts_queue_len - pos));
        }

        ctx->pts_queue[pos] = pts;
        ctx->pts_queue_len++;
        if (ctx->pts_queue_len > ctx->pts_min_queue_len) {
            ctx->pts_min_queue_len = ctx->pts_queue_len;
        }
    }

    err = de265_decode_data(ctx->decoder, avpkt->data, avpkt->size);
#else
    // decode as much as possible
    do {
        err = de265_decode(ctx->decoder, &more);
    } while (more && err == DE265_OK);
#endif

    switch (err) {
    case DE265_OK:
    case DE265_ERROR_IMAGE_BUFFER_FULL:
#if LIBDE265_NUMERIC_VERSION >= 0x00050000
    case DE265_ERROR_WAITING_FOR_INPUT_DATA:
#endif
        break;

    default:
        {
            const char *error  = de265_get_error_text(err);

            av_log(avctx, AV_LOG_ERROR, "Failed to decode frame: %s\n", error);
            return AVERROR_INVALIDDATA;
        }
    }

    if ((img = de265_get_next_picture(ctx->decoder)) != NULL) {
        int width;
        int height;
        if (de265_get_chroma_format(img) != de265_chroma_420) {
            av_log(avctx, AV_LOG_ERROR, "Unsupported output colorspace (%d)\n",
                   de265_get_chroma_format(img));
            return AVERROR_INVALIDDATA;
        }

        width  = de265_get_image_width(img,0);
        height = de265_get_image_height(img,0);
        if (width != avctx->width || height != avctx->height) {
            if (avctx->width != 0)
                av_log(avctx, AV_LOG_INFO, "dimension change! %dx%d -> %dx%d\n",
                       avctx->width, avctx->height, width, height);

            if (av_image_check_size(width, height, 0, avctx)) {
                return AVERROR_INVALIDDATA;
            }

            avcodec_set_dimensions(avctx, width, height);
        }
#if LIBDE265_NUMERIC_VERSION < 0x00050000
        if (ctx->pts_queue_len < ctx->pts_min_queue_len) {
            // fill pts queue to ensure reordering works
            return avpkt->size;
        }
#endif

        picture->width = avctx->width;
        picture->height = avctx->height;
        picture->format = avctx->pix_fmt;
        if ((ret = av_frame_get_buffer(picture, 32)) < 0) {
            return ret;
        }

        for (int i=0;i<4;i++) {
            src[i] = de265_get_image_plane(img,i, &stride[i]);
        }

        av_image_copy(picture->data, picture->linesize, src, stride,
                      avctx->pix_fmt, width, height);

        *got_frame = 1;

#if LIBDE265_NUMERIC_VERSION < 0x00050000
        // assign next PTS from queue
        if (ctx->pts_queue_len > 0) {
            picture->reordered_opaque = ctx->pts_queue[0];
            picture->pkt_pts = ctx->pts_queue[0];

            if (ctx->pts_queue_len>1) {
                memmove(&ctx->pts_queue[0], &ctx->pts_queue[1],
                    sizeof(int64_t) * (ctx->pts_queue_len-1));
            }

            ctx->pts_queue_len--;
        }
#else
        picture->reordered_opaque = de265_get_image_PTS(img);
        picture->pkt_pts = de265_get_image_PTS(img);
#endif
    }
    return avpkt->size;
}
int main(int argc, char* argv[])
{
    AVFormatContext *ifmt_ctx = NULL;
    AVFormatContext *ifmt_ctx_a = NULL;
    AVFormatContext *ofmt_ctx;
    AVInputFormat* ifmt;
    AVStream* video_st;
    AVStream* audio_st;
    AVCodecContext* pCodecCtx;
    AVCodecContext* pCodecCtx_a;
    AVCodec* pCodec;
    AVCodec* pCodec_a;
    AVPacket *dec_pkt, enc_pkt;
    AVPacket *dec_pkt_a, enc_pkt_a;
    AVFrame *pframe, *pFrameYUV;
    struct SwsContext *img_convert_ctx;
    struct SwrContext *aud_convert_ctx;

    char capture_name[80] = { 0 };
	char device_name[80] = { 0 };
	char device_name_a[80] = { 0 };
    int framecnt = 0;
	int nb_samples = 0;
    int videoindex;
    int audioindex;
    int i;
    int ret;
    HANDLE  hThread;

	const char* out_path = "rtmp://localhost/live/livestream";
    int dec_got_frame, enc_got_frame;
	int dec_got_frame_a, enc_got_frame_a;

	int aud_next_pts = 0;
	int vid_next_pts = 0;
	int encode_video = 1, encode_audio = 1;

	AVRational time_base_q = { 1, AV_TIME_BASE };

    av_register_all();
    //Register Device
    avdevice_register_all();
    avformat_network_init();
#if USEFILTER
    //Register Filter
    avfilter_register_all();
    buffersrc = avfilter_get_by_name("buffer");
    buffersink = avfilter_get_by_name("buffersink");
#endif

    //Show Dshow Device  
    show_dshow_device();

    printf("\nChoose video capture device: ");
    if (gets(capture_name) == 0)
    {
		printf("Error in gets()\n");
		return -1;
    }
    sprintf(device_name, "video=%s", capture_name);

	printf("\nChoose audio capture device: ");
	if (gets(capture_name) == 0)
	{
		printf("Error in gets()\n");
		return -1;
	}
	sprintf(device_name_a, "audio=%s", capture_name);

    //wchar_t *cam = L"video=Integrated Camera";
	//wchar_t *cam = L"video=YY伴侣";
	//char *device_name_utf8 = dup_wchar_to_utf8(cam);
    //wchar_t *cam_a = L"audio=麦克风阵列 (Realtek High Definition Audio)";
	//char *device_name_utf8_a = dup_wchar_to_utf8(cam_a);

	ifmt = av_find_input_format("dshow");
    // Set device params
    AVDictionary *device_param = 0;
	//if not setting rtbufsize, error messages will be shown in cmd, but you can still watch or record the stream correctly in most time
	//setting rtbufsize will erase those error messages, however, larger rtbufsize will bring latency
    //av_dict_set(&device_param, "rtbufsize", "10M", 0);

    //Set own video device's name
	if (avformat_open_input(&ifmt_ctx, device_name, ifmt, &device_param) != 0){

        printf("Couldn't open input video stream.(无法打开输入流)\n");
        return -1;
    }
	//Set own audio device's name
	if (avformat_open_input(&ifmt_ctx_a, device_name_a, ifmt, &device_param) != 0){

        printf("Couldn't open input audio stream.(无法打开输入流)\n");
        return -1;
    }
    //input video initialize
    if (avformat_find_stream_info(ifmt_ctx, NULL) < 0)
    {
        printf("Couldn't find video stream information.(无法获取流信息)\n");
        return -1;
    }
    videoindex = -1;
    for (i = 0; i < ifmt_ctx->nb_streams; i++)
    if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
    {
        videoindex = i;
        break;
    }
    if (videoindex == -1)
    {
        printf("Couldn't find a video stream.(没有找到视频流)\n");
        return -1;
    }
    if (avcodec_open2(ifmt_ctx->streams[videoindex]->codec, avcodec_find_decoder(ifmt_ctx->streams[videoindex]->codec->codec_id), NULL) < 0)
    {
        printf("Could not open video codec.(无法打开解码器)\n");
        return -1;
    }
    //input audio initialize
    if (avformat_find_stream_info(ifmt_ctx_a, NULL) < 0)
    {
        printf("Couldn't find audio stream information.(无法获取流信息)\n");
        return -1;
    }
    audioindex = -1;
    for (i = 0; i < ifmt_ctx_a->nb_streams; i++)
    if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
    {
        audioindex = i;
        break;
    }
    if (audioindex == -1)
    {
        printf("Couldn't find a audio stream.(没有找到视频流)\n");
        return -1;
	}
    if (avcodec_open2(ifmt_ctx_a->streams[audioindex]->codec, avcodec_find_decoder(ifmt_ctx_a->streams[audioindex]->codec->codec_id), NULL) < 0)
    {
        printf("Could not open audio codec.(无法打开解码器)\n");
        return -1;
    }

    //output initialize
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_path);
    //output video encoder initialize
    pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!pCodec){
        printf("Can not find output video encoder! (没有找到合适的编码器!)\n");
        return -1;
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
    pCodecCtx->width = ifmt_ctx->streams[videoindex]->codec->width;
    pCodecCtx->height = ifmt_ctx->streams[videoindex]->codec->height;
    pCodecCtx->time_base.num = 1;
    pCodecCtx->time_base.den = 25;
    pCodecCtx->bit_rate = 300000;
    pCodecCtx->gop_size = 250;
    /* Some formats want stream headers to be separate. */
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

    //H264 codec param
    //pCodecCtx->me_range = 16;
    //pCodecCtx->max_qdiff = 4;
    //pCodecCtx->qcompress = 0.6;
    pCodecCtx->qmin = 10;
    pCodecCtx->qmax = 51;
    //Optional Param
    pCodecCtx->max_b_frames = 0;
    // Set H264 preset and tune
    AVDictionary *param = 0;
    av_dict_set(&param, "preset", "fast", 0);
    av_dict_set(&param, "tune", "zerolatency", 0);

    if (avcodec_open2(pCodecCtx, pCodec, &param) < 0){
        printf("Failed to open output video encoder! (编码器打开失败!)\n");
        return -1;
    }

    //Add a new stream to output,should be called by the user before avformat_write_header() for muxing
    video_st = avformat_new_stream(ofmt_ctx, pCodec);
    if (video_st == NULL){
        return -1;
    }
    video_st->time_base.num = 1;
    video_st->time_base.den = 25;
    video_st->codec = pCodecCtx;


    //output audio encoder initialize
    pCodec_a = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!pCodec_a){
        printf("Can not find output audio encoder! (没有找到合适的编码器!)\n");
        return -1;
    }
    pCodecCtx_a = avcodec_alloc_context3(pCodec_a);
    pCodecCtx_a->channels = 2;
    pCodecCtx_a->channel_layout = av_get_default_channel_layout(2);
	pCodecCtx_a->sample_rate = ifmt_ctx_a->streams[audioindex]->codec->sample_rate;
    pCodecCtx_a->sample_fmt = pCodec_a->sample_fmts[0];
    pCodecCtx_a->bit_rate = 32000;
    pCodecCtx_a->time_base.num = 1;
	pCodecCtx_a->time_base.den = pCodecCtx_a->sample_rate;
    /** Allow the use of the experimental AAC encoder */
    pCodecCtx_a->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
    /* Some formats want stream headers to be separate. */
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        pCodecCtx_a->flags |= CODEC_FLAG_GLOBAL_HEADER;
    if (avcodec_open2(pCodecCtx_a, pCodec_a, NULL) < 0){
        printf("Failed to open ouput audio encoder! (编码器打开失败!)\n");
        return -1;
    }

    //Add a new stream to output,should be called by the user before avformat_write_header() for muxing
    audio_st = avformat_new_stream(ofmt_ctx, pCodec_a);
    if (audio_st == NULL){
        return -1;
    }
    audio_st->time_base.num = 1;
	audio_st->time_base.den = pCodecCtx_a->sample_rate;
    audio_st->codec = pCodecCtx_a;

    //Open output URL,set before avformat_write_header() for muxing
    if (avio_open(&ofmt_ctx->pb, out_path, AVIO_FLAG_READ_WRITE) < 0){
        printf("Failed to open output file! (输出文件打开失败!)\n");
        return -1;
    }

    //Show some Information
    av_dump_format(ofmt_ctx, 0, out_path, 1);

    //Write File Header
    avformat_write_header(ofmt_ctx, NULL);

    //prepare before decode and encode
    dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));

#if USEFILTER
#else
	//camera data may has a pix fmt of RGB or sth else,convert it to YUV420
    img_convert_ctx = sws_getContext(ifmt_ctx->streams[videoindex]->codec->width, ifmt_ctx->streams[videoindex]->codec->height,
        ifmt_ctx->streams[videoindex]->codec->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
    
	// Initialize the resampler to be able to convert audio sample formats
	aud_convert_ctx = swr_alloc_set_opts(NULL,
		av_get_default_channel_layout(pCodecCtx_a->channels),
		pCodecCtx_a->sample_fmt,
		pCodecCtx_a->sample_rate,
		av_get_default_channel_layout(ifmt_ctx_a->streams[audioindex]->codec->channels),
		ifmt_ctx_a->streams[audioindex]->codec->sample_fmt,
		ifmt_ctx_a->streams[audioindex]->codec->sample_rate,
		0, NULL);
	
	/**
	* Perform a sanity check so that the number of converted samples is
	* not greater than the number of samples to be converted.
	* If the sample rates differ, this case has to be handled differently
	*/
	//av_assert0(pCodecCtx_a->sample_rate == ifmt_ctx_a->streams[audioindex]->codec->sample_rate);

	swr_init(aud_convert_ctx);

    
#endif
    //Initialize the buffer to store YUV frames to be encoded.
	pFrameYUV = av_frame_alloc();
    uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

	//Initialize the FIFO buffer to store audio samples to be encoded. 
    AVAudioFifo *fifo = NULL;
	fifo = av_audio_fifo_alloc(pCodecCtx_a->sample_fmt, pCodecCtx_a->channels, 1);

	//Initialize the buffer to store converted samples to be encoded.
	uint8_t **converted_input_samples = NULL;
	/**
	* Allocate as many pointers as there are audio channels.
	* Each pointer will later point to the audio samples of the corresponding
	* channels (although it may be NULL for interleaved formats).
	*/
	if (!(converted_input_samples = (uint8_t**)calloc(pCodecCtx_a->channels,
		sizeof(**converted_input_samples)))) {
		printf("Could not allocate converted input sample pointers\n");
		return AVERROR(ENOMEM);
	}


    printf("\n --------call started----------\n");
#if USEFILTER
    printf("\n Press differnet number for different filters:");
    printf("\n 1->Mirror");
    printf("\n 2->Add Watermark");
    printf("\n 3->Negate");
    printf("\n 4->Draw Edge");
    printf("\n 5->Split Into 4");
    printf("\n 6->Vintage");
    printf("\n Press 0 to remove filter\n");
#endif
    printf("\nPress enter to stop...\n");
    hThread = CreateThread(
        NULL,                   // default security attributes
        0,                      // use default stack size  
        MyThreadFunction,       // thread function name
        NULL,          // argument to thread function 
        0,                      // use default creation flags 
        NULL);   // returns the thread identifier 

    //start decode and encode
    int64_t start_time = av_gettime();
    while (encode_video || encode_audio)
    {
        if (encode_video &&
			(!encode_audio || av_compare_ts(vid_next_pts, time_base_q,
			aud_next_pts, time_base_q) <= 0))
        {
            if ((ret=av_read_frame(ifmt_ctx, dec_pkt)) >= 0){

                if (exit_thread)
                    break;

                av_log(NULL, AV_LOG_DEBUG, "Going to reencode the frame\n");
                pframe = av_frame_alloc();
                if (!pframe) {
                    ret = AVERROR(ENOMEM);
                    return ret;
                }
                ret = avcodec_decode_video2(ifmt_ctx->streams[dec_pkt->stream_index]->codec, pframe,
                    &dec_got_frame, dec_pkt);
                if (ret < 0) {
                    av_frame_free(&pframe);
                    av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                    break;
                }
                if (dec_got_frame){
#if USEFILTER
                    pframe->pts = av_frame_get_best_effort_timestamp(pframe);

                    if (filter_change)
                        apply_filters(ifmt_ctx);
                    filter_change = 0;
                    /* push the decoded frame into the filtergraph */
                    if (av_buffersrc_add_frame(buffersrc_ctx, pframe) < 0) {
                        printf("Error while feeding the filtergraph\n");
                        break;
                    }
                    picref = av_frame_alloc();

                    /* pull filtered pictures from the filtergraph */
                    while (1) {
                        ret = av_buffersink_get_frame_flags(buffersink_ctx, picref, 0);
                        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                            break;
                        if (ret < 0)
                            return ret;

                        if (picref) {
                            img_convert_ctx = sws_getContext(picref->width, picref->height, (AVPixelFormat)picref->format, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
                            sws_scale(img_convert_ctx, (const uint8_t* const*)picref->data, picref->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
                            sws_freeContext(img_convert_ctx);
                            pFrameYUV->width = picref->width;
                            pFrameYUV->height = picref->height;
                            pFrameYUV->format = PIX_FMT_YUV420P;
#else
                    sws_scale(img_convert_ctx, (const uint8_t* const*)pframe->data, pframe->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
                    pFrameYUV->width = pframe->width;
                    pFrameYUV->height = pframe->height;
                    pFrameYUV->format = PIX_FMT_YUV420P;
#endif					
                    enc_pkt.data = NULL;
                    enc_pkt.size = 0;
                    av_init_packet(&enc_pkt);
                    ret = avcodec_encode_video2(pCodecCtx, &enc_pkt, pFrameYUV, &enc_got_frame);
                    av_frame_free(&pframe);
                    if (enc_got_frame == 1){
                        //printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, enc_pkt.size);
                        framecnt++;
                        enc_pkt.stream_index = video_st->index;						

                        //Write PTS
						AVRational time_base = ofmt_ctx->streams[0]->time_base;//{ 1, 1000 };
                        AVRational r_framerate1 = ifmt_ctx->streams[videoindex]->r_frame_rate;//{ 50, 2 }; 
                        //Duration between 2 frames (us)
                        int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));	//内部时间戳
                        //Parameters
                        //enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
                        enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
                        enc_pkt.dts = enc_pkt.pts;
                        enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
                        enc_pkt.pos = -1;
                        //printf("video pts : %d\n", enc_pkt.pts);

						vid_next_pts=framecnt*calc_duration; //general timebase

                        //Delay
						int64_t pts_time = av_rescale_q(enc_pkt.pts, time_base, time_base_q);
						int64_t now_time = av_gettime() - start_time;						
						if ((pts_time > now_time) && ((vid_next_pts + pts_time - now_time)<aud_next_pts))
							av_usleep(pts_time - now_time);
						
                        ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
                        av_free_packet(&enc_pkt);
                    }
#if USEFILTER
                    av_frame_unref(picref);
                }
            }
#endif
        }
        else {
            av_frame_free(&pframe);
        }
        av_free_packet(dec_pkt);
    }
    else
		if (ret == AVERROR_EOF)
			encode_video = 0;
		else
		{
			printf("Could not read video frame\n");
			return ret;
		}
    }
    else
    {
        //audio trancoding here
        const int output_frame_size = pCodecCtx_a->frame_size;

		if (exit_thread)
			break;

        /**
        * Make sure that there is one frame worth of samples in the FIFO
        * buffer so that the encoder can do its work.
        * Since the decoder's and the encoder's frame size may differ, we
        * need to FIFO buffer to store as many frames worth of input samples
        * that they make up at least one frame worth of output samples.
        */
        while (av_audio_fifo_size(fifo) < output_frame_size) {
            /**
            * Decode one frame worth of audio samples, convert it to the
            * output sample format and put it into the FIFO buffer.
            */
			AVFrame *input_frame = av_frame_alloc();
			if (!input_frame)
			{
				ret = AVERROR(ENOMEM);
				return ret;
			}			
			
			/** Decode one frame worth of audio samples. */
			/** Packet used for temporary storage. */
			AVPacket input_packet;
			av_init_packet(&input_packet);
			input_packet.data = NULL;
			input_packet.size = 0;
			
			/** Read one audio frame from the input file into a temporary packet. */
			if ((ret = av_read_frame(ifmt_ctx_a, &input_packet)) < 0) {
				/** If we are at the end of the file, flush the decoder below. */
				if (ret == AVERROR_EOF)
				{
					encode_audio = 0;
				}
				else
				{
					printf("Could not read audio frame\n");
					return ret;
				}					
			}

			/**
			* Decode the audio frame stored in the temporary packet.
			* The input audio stream decoder is used to do this.
			* If we are at the end of the file, pass an empty packet to the decoder
			* to flush it.
			*/
			if ((ret = avcodec_decode_audio4(ifmt_ctx_a->streams[audioindex]->codec, input_frame,
				&dec_got_frame_a, &input_packet)) < 0) {
				printf("Could not decode audio frame\n");
				return ret;
			}
			av_packet_unref(&input_packet);
			/** If there is decoded data, convert and store it */
			if (dec_got_frame_a) {
				/**
				* Allocate memory for the samples of all channels in one consecutive
				* block for convenience.
				*/
				if ((ret = av_samples_alloc(converted_input_samples, NULL,
					pCodecCtx_a->channels,
					input_frame->nb_samples,
					pCodecCtx_a->sample_fmt, 0)) < 0) {
					printf("Could not allocate converted input samples\n");
					av_freep(&(*converted_input_samples)[0]);
					free(*converted_input_samples);
					return ret;
				}

				/**
				* Convert the input samples to the desired output sample format.
				* This requires a temporary storage provided by converted_input_samples.
				*/
				/** Convert the samples using the resampler. */
				if ((ret = swr_convert(aud_convert_ctx,
					converted_input_samples, input_frame->nb_samples,
					(const uint8_t**)input_frame->extended_data, input_frame->nb_samples)) < 0) {
					printf("Could not convert input samples\n");
					return ret;
				}

				/** Add the converted input samples to the FIFO buffer for later processing. */
				/**
				* Make the FIFO as large as it needs to be to hold both,
				* the old and the new samples.
				*/
				if ((ret = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + input_frame->nb_samples)) < 0) {
					printf("Could not reallocate FIFO\n");
					return ret;
				}

				/** Store the new samples in the FIFO buffer. */
				if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
					input_frame->nb_samples) < input_frame->nb_samples) {
					printf("Could not write data to FIFO\n");
					return AVERROR_EXIT;
				}				
			}
        }

        /**
        * If we have enough samples for the encoder, we encode them.
        * At the end of the file, we pass the remaining samples to
        * the encoder.
        */
        if (av_audio_fifo_size(fifo) >= output_frame_size)
            /**
            * Take one frame worth of audio samples from the FIFO buffer,
            * encode it and write it to the output file.
            */
        {
            /** Temporary storage of the output samples of the frame written to the file. */
			AVFrame *output_frame=av_frame_alloc();
			if (!output_frame)
			{
				ret = AVERROR(ENOMEM);
				return ret;
			}
			/**
			* Use the maximum number of possible samples per frame.
			* If there is less than the maximum possible frame size in the FIFO
			* buffer use this number. Otherwise, use the maximum possible frame size
			*/
			const int frame_size = FFMIN(av_audio_fifo_size(fifo),
				pCodecCtx_a->frame_size);
			
			/** Initialize temporary storage for one output frame. */
			/**
			* Set the frame's parameters, especially its size and format.
			* av_frame_get_buffer needs this to allocate memory for the
			* audio samples of the frame.
			* Default channel layouts based on the number of channels
			* are assumed for simplicity.
			*/
			output_frame->nb_samples = frame_size;
			output_frame->channel_layout = pCodecCtx_a->channel_layout;
			output_frame->format = pCodecCtx_a->sample_fmt;
			output_frame->sample_rate = pCodecCtx_a->sample_rate;

			/**
			* Allocate the samples of the created frame. This call will make
			* sure that the audio frame can hold as many samples as specified.
			*/
			if ((ret = av_frame_get_buffer(output_frame, 0)) < 0) {
				printf("Could not allocate output frame samples\n");
				av_frame_free(&output_frame);
				return ret;
			}
			
			/**
			* Read as many samples from the FIFO buffer as required to fill the frame.
			* The samples are stored in the frame temporarily.
			*/
			if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
				printf("Could not read data from FIFO\n");
				return AVERROR_EXIT;
			}

			/** Encode one frame worth of audio samples. */
			/** Packet used for temporary storage. */
			AVPacket output_packet;
			av_init_packet(&output_packet);
			output_packet.data = NULL;
			output_packet.size = 0;
			
			/** Set a timestamp based on the sample rate for the container. */
			if (output_frame) {
				nb_samples += output_frame->nb_samples;
			}

			/**
			* Encode the audio frame and store it in the temporary packet.
			* The output audio stream encoder is used to do this.
			*/
			if ((ret = avcodec_encode_audio2(pCodecCtx_a, &output_packet,
				output_frame, &enc_got_frame_a)) < 0) {
				printf("Could not encode frame\n");
				av_packet_unref(&output_packet);
				return ret;
			}

			/** Write one audio frame from the temporary packet to the output file. */
			if (enc_got_frame_a) {

				output_packet.stream_index = 1;

				AVRational time_base = ofmt_ctx->streams[1]->time_base;
				AVRational r_framerate1 = { ifmt_ctx_a->streams[audioindex]->codec->sample_rate, 1 };// { 44100, 1};  
				int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));  //内部时间戳  

				output_packet.pts = av_rescale_q(nb_samples*calc_duration, time_base_q, time_base);
				output_packet.dts = output_packet.pts;
				output_packet.duration = output_frame->nb_samples;

				//printf("audio pts : %d\n", output_packet.pts);
				aud_next_pts = nb_samples*calc_duration;

				int64_t pts_time = av_rescale_q(output_packet.pts, time_base, time_base_q);
				int64_t now_time = av_gettime() - start_time;
				if ((pts_time > now_time) && ((aud_next_pts + pts_time - now_time)<vid_next_pts))
					av_usleep(pts_time - now_time);

				if ((ret = av_interleaved_write_frame(ofmt_ctx, &output_packet)) < 0) {
					printf("Could not write frame\n");
					av_packet_unref(&output_packet);
					return ret;
				}

				av_packet_unref(&output_packet);
			}			
			av_frame_free(&output_frame);		
        }      
	}
  }


    //Flush Encoder
    ret = flush_encoder(ifmt_ctx, ofmt_ctx, 0, framecnt);
    if (ret < 0) {
        printf("Flushing encoder failed\n");
        return -1;
    }
	ret = flush_encoder_a(ifmt_ctx_a, ofmt_ctx, 1, nb_samples);
	if (ret < 0) {
		printf("Flushing encoder failed\n");
		return -1;
	}



    //Write file trailer
    av_write_trailer(ofmt_ctx);

cleanup:
    //Clean
#if USEFILTER
    if (filter_graph)
        avfilter_graph_free(&filter_graph);
#endif
    if (video_st)
        avcodec_close(video_st->codec);
    if (audio_st)
        avcodec_close(audio_st->codec);
    av_free(out_buffer);
	if (converted_input_samples) {
		av_freep(&converted_input_samples[0]);
		//free(converted_input_samples);
	}
	if (fifo)
		av_audio_fifo_free(fifo);
    avio_close(ofmt_ctx->pb);
    avformat_free_context(ifmt_ctx);
	avformat_free_context(ifmt_ctx_a);
    avformat_free_context(ofmt_ctx);
    CloseHandle(hThread);
    return 0;
}
Esempio n. 3
0
void CAudioEncoder::AddAudioStream(int bitrate)
{
	if (m_audioCodec)
	{
		//nothing to do!
		return;
	}


	m_audioCodec = std::unique_ptr < AVCodec, std::function<void(AVCodec *) >> (avcodec_find_encoder(CODEC_ID_AAC), [](AVCodec* ptr) { av_free(ptr); });
	if (!m_audioCodec)
	{
		fprintf(stderr, "Codec not found\n");
	}
	else
		printf("AAC codec found\n");

	auto pContext = m_pfileWriter->GetContext();
	m_audioStream = std::unique_ptr < AVStream, std::function<void(AVStream *) >>(avformat_new_stream(pContext, m_audioCodec.get()), [](AVStream* f) {  av_free(f); });
	m_audioStream->id = pContext->nb_streams - 1;

	AVCodecContext* cc = m_audioStream->codec;
	cc->sample_fmt = AV_SAMPLE_FMT_S16;
	cc->bit_rate = bitrate*1000;
	cc->sample_rate = 44100;
	cc->channel_layout = AV_CH_LAYOUT_STEREO;
	cc->channels = 2;

	if (pContext->oformat->flags & AVFMT_GLOBALHEADER)
		cc->flags |= CODEC_FLAG_GLOBAL_HEADER;

	// Open the encoder
	if (avcodec_open2(cc, m_audioCodec.get(), NULL) < 0)
	{
		fprintf(stderr, "Could not open codec\n");
		exit(1);
	}
	else
		printf("H264 codec opened\n");

	m_frame_size = m_audioStream->codec->frame_size;

	m_fifo =av_audio_fifo_alloc(AV_SAMPLE_FMT_S16,2, 1);


	m_tempFrame = std::unique_ptr<AVFrame, std::function<void(AVFrame *)>>(av_frame_alloc(), [](AVFrame* ptr) {av_frame_free(&ptr); });
	
	m_tempFrame->nb_samples = m_frame_size;
	m_tempFrame->channel_layout = cc->channel_layout;
	m_tempFrame->format = cc->sample_fmt;
	m_tempFrame->sample_rate = cc->sample_rate;

	int error = 0;
	/**
	* Allocate the samples of the created frame. This call will make
	* sure that the audio frame can hold as many samples as specified.
	*/
	if ((error = av_frame_get_buffer(m_tempFrame.get(), 0)) < 0)
	{
		return ;
	}

}
static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    InputStream *ist = s->opaque;
    VTContext  *vt = ist->hwaccel_ctx;
    CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
    OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
    CVReturn err;
    uint8_t *data[4] = { 0 };
    int linesize[4] = { 0 };
    int planes, ret, i;
    char codec_str[32];

    av_frame_unref(vt->tmp_frame);

    switch (pixel_format) {
    case kCVPixelFormatType_420YpCbCr8Planar: vt->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
    case kCVPixelFormatType_422YpCbCr8:       vt->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
    case kCVPixelFormatType_32BGRA:           vt->tmp_frame->format = AV_PIX_FMT_BGRA; break;
#ifdef kCFCoreFoundationVersionNumber10_7
    case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break;
#endif
    default:
        av_get_codec_tag_string(codec_str, sizeof(codec_str), s->codec_tag);
        av_log(NULL, AV_LOG_ERROR,
               "%s: Unsupported pixel format: %s\n", codec_str, videotoolbox_pixfmt);
        return AVERROR(ENOSYS);
    }

    vt->tmp_frame->width  = frame->width;
    vt->tmp_frame->height = frame->height;
    ret = av_frame_get_buffer(vt->tmp_frame, 32);
    if (ret < 0)
        return ret;

    err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (err != kCVReturnSuccess) {
        av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
        return AVERROR_UNKNOWN;
    }

    if (CVPixelBufferIsPlanar(pixbuf)) {

        planes = CVPixelBufferGetPlaneCount(pixbuf);
        for (i = 0; i < planes; i++) {
            data[i]     = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
            linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
        }
    } else {
        data[0] = CVPixelBufferGetBaseAddress(pixbuf);
        linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
    }

    av_image_copy(vt->tmp_frame->data, vt->tmp_frame->linesize,
                  (const uint8_t **)data, linesize, vt->tmp_frame->format,
                  frame->width, frame->height);

    ret = av_frame_copy_props(vt->tmp_frame, frame);
    CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (ret < 0)
        return ret;

    av_frame_unref(frame);
    av_frame_move_ref(frame, vt->tmp_frame);

    return 0;
}
static int run_test(AVCodec *enc, AVCodec *dec, AVCodecContext *enc_ctx,
                    AVCodecContext *dec_ctx)
{
    AVPacket enc_pkt;
    AVFrame *in_frame, *out_frame;
    uint8_t *raw_in = NULL, *raw_out = NULL;
    int in_offset = 0, out_offset = 0;
    int result = 0;
    int got_output = 0;
    int i = 0;
    int in_frame_bytes, out_frame_bytes;

    in_frame = av_frame_alloc();
    if (!in_frame) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate input frame\n");
        return AVERROR(ENOMEM);
    }

    in_frame->nb_samples = enc_ctx->frame_size;
    in_frame->format = enc_ctx->sample_fmt;
    in_frame->channel_layout = enc_ctx->channel_layout;
    if (av_frame_get_buffer(in_frame, 32) != 0) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate a buffer for input frame\n");
        return AVERROR(ENOMEM);
    }

    out_frame = av_frame_alloc();
    if (!out_frame) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate output frame\n");
        return AVERROR(ENOMEM);
    }

    raw_in = av_malloc(in_frame->linesize[0] * NUMBER_OF_FRAMES);
    if (!raw_in) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for raw_in\n");
        return AVERROR(ENOMEM);
    }

    raw_out = av_malloc(in_frame->linesize[0] * NUMBER_OF_FRAMES);
    if (!raw_out) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for raw_out\n");
        return AVERROR(ENOMEM);
    }

    for (i = 0; i < NUMBER_OF_FRAMES; i++) {
        av_init_packet(&enc_pkt);
        enc_pkt.data = NULL;
        enc_pkt.size = 0;

        generate_raw_frame((uint16_t*)(in_frame->data[0]), i, enc_ctx->sample_rate,
                           enc_ctx->channels, enc_ctx->frame_size);
        in_frame_bytes = in_frame->nb_samples * av_frame_get_channels(in_frame) * sizeof(uint16_t);
        if (in_frame_bytes > in_frame->linesize[0]) {
            av_log(NULL, AV_LOG_ERROR, "Incorrect value of input frame linesize\n");
            return 1;
        }
        memcpy(raw_in + in_offset, in_frame->data[0], in_frame_bytes);
        in_offset += in_frame_bytes;
        result = avcodec_encode_audio2(enc_ctx, &enc_pkt, in_frame, &got_output);
        if (result < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error encoding audio frame\n");
            return result;
        }

        /* if we get an encoded packet, feed it straight to the decoder */
        if (got_output) {
            result = avcodec_decode_audio4(dec_ctx, out_frame, &got_output, &enc_pkt);
            if (result < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding audio packet\n");
                return result;
            }

            if (got_output) {
                if (result != enc_pkt.size) {
                    av_log(NULL, AV_LOG_INFO, "Decoder consumed only part of a packet, it is allowed to do so -- need to update this test\n");
                    return AVERROR_UNKNOWN;
                }

                if (in_frame->nb_samples != out_frame->nb_samples) {
                    av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different number of samples\n");
                    return AVERROR_UNKNOWN;
                }

                if (in_frame->channel_layout != out_frame->channel_layout) {
                    av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different channel layout\n");
                    return AVERROR_UNKNOWN;
                }

                if (in_frame->format != out_frame->format) {
                    av_log(NULL, AV_LOG_ERROR, "Error frames before and after decoding has different sample format\n");
                    return AVERROR_UNKNOWN;
                }
                out_frame_bytes = out_frame->nb_samples * av_frame_get_channels(out_frame) * sizeof(uint16_t);
                if (out_frame_bytes > out_frame->linesize[0]) {
                    av_log(NULL, AV_LOG_ERROR, "Incorrect value of output frame linesize\n");
                    return 1;
                }
                memcpy(raw_out + out_offset, out_frame->data[0], out_frame_bytes);
                out_offset += out_frame_bytes;
            }
        }
        av_free_packet(&enc_pkt);
    }

    if (memcmp(raw_in, raw_out, out_frame_bytes * NUMBER_OF_FRAMES) != 0) {
        av_log(NULL, AV_LOG_ERROR, "Output differs\n");
        return 1;
    }

    av_log(NULL, AV_LOG_INFO, "OK\n");

    av_freep(&raw_in);
    av_freep(&raw_out);
    av_frame_free(&in_frame);
    av_frame_free(&out_frame);
    return 0;
}
Esempio n. 6
0
std::shared_ptr<AVFrame> make_av_video_frame(const core::const_frame& frame, const core::video_format_desc& format_desc)
{
    auto av_frame = alloc_frame();

    auto pix_desc = frame.pixel_format_desc();

    auto planes = pix_desc.planes;
    auto format = pix_desc.format;

    const auto sar = boost::rational<int>(format_desc.square_width, format_desc.square_height) /
                     boost::rational<int>(format_desc.width, format_desc.height);

    av_frame->sample_aspect_ratio = {sar.numerator(), sar.denominator()};
    av_frame->width               = format_desc.width;
    av_frame->height              = format_desc.height;

    switch (format) {
        case core::pixel_format::rgb:
            av_frame->format = AVPixelFormat::AV_PIX_FMT_RGB24;
            break;
        case core::pixel_format::bgr:
            av_frame->format = AVPixelFormat::AV_PIX_FMT_BGR24;
            break;
        case core::pixel_format::rgba:
            av_frame->format = AVPixelFormat::AV_PIX_FMT_RGBA;
            break;
        case core::pixel_format::argb:
            av_frame->format = AVPixelFormat::AV_PIX_FMT_ARGB;
            break;
        case core::pixel_format::bgra:
            av_frame->format = AVPixelFormat::AV_PIX_FMT_BGRA;
            break;
        case core::pixel_format::abgr:
            av_frame->format = AVPixelFormat::AV_PIX_FMT_ABGR;
            break;
        case core::pixel_format::gray:
            av_frame->format = AVPixelFormat::AV_PIX_FMT_GRAY8;
            break;
        case core::pixel_format::ycbcr: {
            int y_w = planes[0].width;
            int y_h = planes[0].height;
            int c_w = planes[1].width;
            int c_h = planes[1].height;

            if (c_h == y_h && c_w == y_w)
                av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV444P;
            else if (c_h == y_h && c_w * 2 == y_w)
                av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV422P;
            else if (c_h == y_h && c_w * 4 == y_w)
                av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV411P;
            else if (c_h * 2 == y_h && c_w * 2 == y_w)
                av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV420P;
            else if (c_h * 2 == y_h && c_w * 4 == y_w)
                av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV410P;

            break;
        }
        case core::pixel_format::ycbcra:
            av_frame->format = AVPixelFormat::AV_PIX_FMT_YUVA420P;
            break;
    }

    FF(av_frame_get_buffer(av_frame.get(), 32));

    // TODO (perf) Avoid extra memcpy.
    for (int n = 0; n < planes.size(); ++n) {
        for (int y = 0; y < av_frame->height; ++y) {
            std::memcpy(av_frame->data[n] + y * av_frame->linesize[n],
                        frame.image_data(n).data() + y * planes[n].linesize,
                        planes[n].linesize);
        }
    }

    return av_frame;
}
Esempio n. 7
0
bool AVIDump::CreateFile()
{
  AVCodec* codec = nullptr;

  s_format_context = avformat_alloc_context();
  std::stringstream s_file_index_str;
  s_file_index_str << s_file_index;
  snprintf(s_format_context->filename, sizeof(s_format_context->filename), "%s",
           (File::GetUserPath(D_DUMPFRAMES_IDX) + "framedump" + s_file_index_str.str() + ".avi")
               .c_str());
  File::CreateFullPath(s_format_context->filename);

  // Ask to delete file
  if (File::Exists(s_format_context->filename))
  {
    if (SConfig::GetInstance().m_DumpFramesSilent ||
        AskYesNoT("Delete the existing file '%s'?", s_format_context->filename))
    {
      File::Delete(s_format_context->filename);
    }
    else
    {
      // Stop and cancel dumping the video
      return false;
    }
  }

  if (!(s_format_context->oformat = av_guess_format("avi", nullptr, nullptr)) ||
      !(s_stream = avformat_new_stream(s_format_context, codec)))
  {
    return false;
  }

  s_stream->codec->codec_id =
      g_Config.bUseFFV1 ? AV_CODEC_ID_FFV1 : s_format_context->oformat->video_codec;
  if (!g_Config.bUseFFV1)
    s_stream->codec->codec_tag =
        MKTAG('X', 'V', 'I', 'D');  // Force XVID FourCC for better compatibility
  s_stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
  s_stream->codec->bit_rate = 400000;
  s_stream->codec->width = s_width;
  s_stream->codec->height = s_height;
  s_stream->codec->time_base.num = 1;
  s_stream->codec->time_base.den = VideoInterface::GetTargetRefreshRate();
  s_stream->codec->gop_size = 12;
  s_stream->codec->pix_fmt = g_Config.bUseFFV1 ? AV_PIX_FMT_BGRA : AV_PIX_FMT_YUV420P;

  if (!(codec = avcodec_find_encoder(s_stream->codec->codec_id)) ||
      (avcodec_open2(s_stream->codec, codec, nullptr) < 0))
  {
    return false;
  }

  s_src_frame = av_frame_alloc();
  s_scaled_frame = av_frame_alloc();

  s_scaled_frame->format = s_stream->codec->pix_fmt;
  s_scaled_frame->width = s_width;
  s_scaled_frame->height = s_height;

#if LIBAVCODEC_VERSION_MAJOR >= 55
  if (av_frame_get_buffer(s_scaled_frame, 1))
    return false;
#else
  if (avcodec_default_get_buffer(s_stream->codec, s_scaled_frame))
    return false;
#endif

  NOTICE_LOG(VIDEO, "Opening file %s for dumping", s_format_context->filename);
  if (avio_open(&s_format_context->pb, s_format_context->filename, AVIO_FLAG_WRITE) < 0 ||
      avformat_write_header(s_format_context, nullptr))
  {
    WARN_LOG(VIDEO, "Could not open %s", s_format_context->filename);
    return false;
  }

  OSD::AddMessage(StringFromFormat("Dumping Frames to \"%s\" (%dx%d)", s_format_context->filename,
                                   s_width, s_height));

  return true;
}
Esempio n. 8
0
// ========================================================================== //
// Run.
// ========================================================================== //
void ACaptureThread::run() {
    _mutex.lock();
    const QString dev_fmt_name = _dev_fmt_name;
    _mutex.unlock();

    AVInputFormat *av_inp_fmt = NULL;
    if(!dev_fmt_name.isEmpty()) {
        av_inp_fmt = av_find_input_format(dev_fmt_name.toLatin1());
        if(!av_inp_fmt) {
            QMessageLogger(__FILE__, __LINE__, Q_FUNC_INFO, "video").warning()
                << qPrintable(ACaptureThread::tr("Unable to find input video" \
                    " format \"%1\"!").arg(dev_fmt_name));

            QMetaObject::invokeMethod(this, "failed", Qt::QueuedConnection);

            return;
        }
    }

    _mutex.lock();
    const QString dev_name = _dev_name;
    _mutex.unlock();

    AVFormatContext *av_fmt_ctx = NULL;
    if(avformat_open_input(&av_fmt_ctx, dev_name.toLatin1()
        , av_inp_fmt, NULL) < 0) {

        QMessageLogger(__FILE__, __LINE__, Q_FUNC_INFO, "video").warning()
            << qPrintable(ACaptureThread::tr("Unable to open video" \
                " device \"%1\"!").arg(dev_name));

        QMetaObject::invokeMethod(this, "failed", Qt::QueuedConnection);

        return;
    }

    if(avformat_find_stream_info(av_fmt_ctx, NULL) < 0) {
        avformat_close_input(&av_fmt_ctx);

        QMessageLogger(__FILE__, __LINE__, Q_FUNC_INFO, "video").warning()
            << qPrintable(ACaptureThread::tr("Unable to find video stream!"));

        QMetaObject::invokeMethod(this, "failed", Qt::QueuedConnection);

        return;
    }

    int vid_strm_idx = -1;
    for(int i = 0, n = av_fmt_ctx->nb_streams; i < n; ++i) {
        if(av_fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            vid_strm_idx = i; break;
        }
    }

    if(vid_strm_idx == -1) {
        avformat_close_input(&av_fmt_ctx);

        QMessageLogger(__FILE__, __LINE__, Q_FUNC_INFO, "video").warning()
            << qPrintable(ACaptureThread::tr("Unable to find video stream!"));

        QMetaObject::invokeMethod(this, "failed", Qt::QueuedConnection);

        return;
    }

    AVStream *av_vid_strm = av_fmt_ctx->streams[vid_strm_idx];

    AVCodec *av_dec = avcodec_find_decoder(av_vid_strm->codec->codec_id);
    if(!av_dec) {
        avformat_close_input(&av_fmt_ctx);

        QMessageLogger(__FILE__, __LINE__, Q_FUNC_INFO, "video").warning()
            << qPrintable(ACaptureThread::tr("Unable to find video decoder!"));

        QMetaObject::invokeMethod(this, "failed", Qt::QueuedConnection);

        return;
    }

    if(avcodec_open2(av_vid_strm->codec, av_dec, NULL) < 0) {
        avformat_close_input(&av_fmt_ctx);

        QMessageLogger(__FILE__, __LINE__, Q_FUNC_INFO, "video").warning()
            << qPrintable(ACaptureThread::tr("Unable to open video decoder!"));

        QMetaObject::invokeMethod(this, "failed", Qt::QueuedConnection);

        return;
    }

    AVCodecContext *av_dec_ctx = av_vid_strm->codec;

    SwsContext *av_sws_ctx
        = sws_getCachedContext(NULL
            , av_dec_ctx->width, av_dec_ctx->height, av_dec_ctx->pix_fmt
            , av_dec_ctx->width, av_dec_ctx->height, AV_PIX_FMT_RGB24
            , SWS_FAST_BILINEAR, NULL, NULL, NULL);

    if(!av_sws_ctx) {
        sws_freeContext(av_sws_ctx);
        avcodec_close(av_vid_strm->codec);
        avformat_close_input(&av_fmt_ctx);

        QMessageLogger(__FILE__, __LINE__, Q_FUNC_INFO, "video").warning()
            << qPrintable(ACaptureThread::tr("Unable to open cached context!"));

        QMetaObject::invokeMethod(this, "failed", Qt::QueuedConnection);

        return;
    }

    AVFrame *av_cap_frm = av_frame_alloc();
    av_cap_frm->format = AV_PIX_FMT_RGB24;
    av_cap_frm->width  = av_dec_ctx->width;
    av_cap_frm->height = av_dec_ctx->height;
    if(av_frame_get_buffer(av_cap_frm, 32) < 0) {
        av_frame_free(&av_cap_frm);
        sws_freeContext(av_sws_ctx);
        avcodec_close(av_vid_strm->codec);
        avformat_close_input(&av_fmt_ctx);

        QMessageLogger(__FILE__, __LINE__, Q_FUNC_INFO, "video").warning()
            << qPrintable(ACaptureThread::tr("Unable to open capture frame!"));

        QMetaObject::invokeMethod(this, "failed", Qt::QueuedConnection);

        return;
    }

    AVFrame *av_vid_frm = av_frame_alloc();

    _mutex.lock();
    const QString det_fname = _det_fname;
    _mutex.unlock();

    QTemporaryFile *dst_file = QTemporaryFile::createNativeFile(det_fname);
    if(!dst_file) {
        av_frame_free(&av_vid_frm);
        av_frame_free(&av_cap_frm);
        sws_freeContext(av_sws_ctx);
        avcodec_close(av_vid_strm->codec);
        avformat_close_input(&av_fmt_ctx);

        QMessageLogger(__FILE__, __LINE__, Q_FUNC_INFO, "video").warning()
            << qPrintable(ACaptureThread::tr("Can not create temporary file!"));

        QMetaObject::invokeMethod(this, "failed", Qt::QueuedConnection);

        return;
    }

    const QString dst_fname = QDir::toNativeSeparators(dst_file->fileName());

    cv::CascadeClassifier classifier;
    if(!classifier.load(dst_fname.toStdString())) {
        av_frame_free(&av_vid_frm);
        av_frame_free(&av_cap_frm);
        sws_freeContext(av_sws_ctx);
        avcodec_close(av_vid_strm->codec);
        avformat_close_input(&av_fmt_ctx);

        delete dst_file;

        QMessageLogger(__FILE__, __LINE__, Q_FUNC_INFO, "video").warning()
            << qPrintable(ACaptureThread::tr("Load classifier failed!"));

        QMetaObject::invokeMethod(this, "failed", Qt::QueuedConnection);

        return;
    }

    delete dst_file;

    cv::Mat acc_mat, src_mat
        = cv::Mat::zeros(av_dec_ctx->height, av_dec_ctx->width, CV_8UC3);

    AVPacket av_pkt;
    while(av_read_frame(av_fmt_ctx, &av_pkt) >= 0
        && !isInterruptionRequested()) {

        int pkt_rdy = -1;
        if(avcodec_decode_video2(av_dec_ctx, av_vid_frm
            , &pkt_rdy, &av_pkt) < 0) {

            QMessageLogger(__FILE__, __LINE__, Q_FUNC_INFO, "app").warning()
                << qPrintable(ACaptureThread::tr("Capturing failed!"));

            QMetaObject::invokeMethod(this, "failed", Qt::QueuedConnection);

            break;
        }

        if(!pkt_rdy) {av_free_packet(&av_pkt); continue;}

        sws_scale(av_sws_ctx
            , av_vid_frm->data, av_vid_frm->linesize, 0, av_vid_frm->height
            , av_cap_frm->data, av_cap_frm->linesize);

        for(int y = 0, rows = av_dec_ctx->height; y < rows; ++y) {
            for(int x = 0, cols = av_dec_ctx->width; x < cols; ++x) {
                cv::Vec3b &d = src_mat.at<cv::Vec3b>(y,x);
                d[0] = av_cap_frm->data[0][y*av_cap_frm->linesize[0]+x*3+0];
                d[1] = av_cap_frm->data[0][y*av_cap_frm->linesize[0]+x*3+1];
                d[2] = av_cap_frm->data[0][y*av_cap_frm->linesize[0]+x*3+2];
            }
        }

        av_free_packet(&av_pkt);

        cv::Mat gry_mat;
        cv::cvtColor(src_mat, gry_mat, cv::COLOR_RGB2GRAY);

        if(acc_mat.rows != gry_mat.rows || acc_mat.cols != gry_mat.cols)
            acc_mat = cv::Mat(gry_mat.size(), CV_64F);

        cv::accumulateWeighted(gry_mat, acc_mat, 0.75);
        cv::convertScaleAbs(acc_mat, gry_mat);

        _mutex.lock();
        const int det_min = _det_min;
        const int det_max = _det_max;
        _mutex.unlock();

        const int hrz_min = gry_mat.cols * det_min / 100;
        const int vrt_min = gry_mat.rows * det_min / 100;
        const int hrz_max = gry_mat.cols * det_max / 100;
        const int vrt_max = gry_mat.rows * det_max / 100;

        std::vector<cv::Rect> rois;
        classifier.detectMultiScale(gry_mat, rois, 1.1, 3, 0
            , cv::Size(hrz_min,vrt_min), cv::Size(hrz_max,vrt_max));

        if(isHiddenCapture()) {
            QMetaObject::invokeMethod(this, "captured", Qt::QueuedConnection);

        } else {
            QImage img(src_mat.data, src_mat.cols, src_mat.rows
                , src_mat.step, QImage::Format_RGB888);

            img = img.convertToFormat(QImage::Format_ARGB32_Premultiplied);

            QMetaObject::invokeMethod(this, "captured"
                , Qt::QueuedConnection, Q_ARG(QImage,img));
        }

        if(rois.size() > 0) {
            if(isHiddenDetect()) {
                const cv::Rect &roi = rois.at(0);
                QMetaObject::invokeMethod(this, "detected"
                    , Qt::QueuedConnection
                    , Q_ARG(QRect,QRect(roi.x,roi.y,roi.width,roi.height)));

            } else {
                cv::Mat roi_mat = src_mat(rois.at(0));

                QImage img(roi_mat.data, roi_mat.cols, roi_mat.rows
                    , roi_mat.step, QImage::Format_RGB888);

                img = img.convertToFormat(QImage::Format_ARGB32_Premultiplied);

                QMetaObject::invokeMethod(this, "detected"
                    , Qt::QueuedConnection, Q_ARG(QImage,img));
            }
        }
    }

    av_frame_free(&av_vid_frm);
    av_frame_free(&av_cap_frm);
    sws_freeContext(av_sws_ctx);
    avcodec_close(av_vid_strm->codec);
    avformat_close_input(&av_fmt_ctx);

    if(!isHiddenCapture()) {
        QMetaObject::invokeMethod(this, "captured"
            , Qt::QueuedConnection, Q_ARG(QImage,QImage()));
    }
}
Esempio n. 9
0
bool AVIDump::CreateVideoFile()
{
  const std::string& format = g_Config.sDumpFormat;

  const std::string dump_path = GetDumpPath(format);

  if (dump_path.empty())
    return false;

  File::CreateFullPath(dump_path);

  AVOutputFormat* output_format = av_guess_format(format.c_str(), dump_path.c_str(), nullptr);
  if (!output_format)
  {
    ERROR_LOG(VIDEO, "Invalid format %s", format.c_str());
    return false;
  }

  if (avformat_alloc_output_context2(&s_format_context, output_format, nullptr, dump_path.c_str()) <
      0)
  {
    ERROR_LOG(VIDEO, "Could not allocate output context");
    return false;
  }

  const std::string& codec_name = g_Config.bUseFFV1 ? "ffv1" : g_Config.sDumpCodec;

  AVCodecID codec_id = output_format->video_codec;

  if (!codec_name.empty())
  {
    const AVCodecDescriptor* codec_desc = avcodec_descriptor_get_by_name(codec_name.c_str());
    if (codec_desc)
      codec_id = codec_desc->id;
    else
      WARN_LOG(VIDEO, "Invalid codec %s", codec_name.c_str());
  }

  const AVCodec* codec = nullptr;

  if (!g_Config.sDumpEncoder.empty())
  {
    codec = avcodec_find_encoder_by_name(g_Config.sDumpEncoder.c_str());
    if (!codec)
      WARN_LOG(VIDEO, "Invalid encoder %s", g_Config.sDumpEncoder.c_str());
  }
  if (!codec)
    codec = avcodec_find_encoder(codec_id);

  s_codec_context = avcodec_alloc_context3(codec);
  if (!codec || !s_codec_context)
  {
    ERROR_LOG(VIDEO, "Could not find encoder or allocate codec context");
    return false;
  }

  // Force XVID FourCC for better compatibility
  if (codec->id == AV_CODEC_ID_MPEG4)
    s_codec_context->codec_tag = MKTAG('X', 'V', 'I', 'D');

  s_codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
  s_codec_context->bit_rate = g_Config.iBitrateKbps * 1000;
  s_codec_context->width = s_width;
  s_codec_context->height = s_height;
  s_codec_context->time_base.num = 1;
  s_codec_context->time_base.den = VideoInterface::GetTargetRefreshRate();
  s_codec_context->gop_size = 12;
  s_codec_context->pix_fmt = g_Config.bUseFFV1 ? AV_PIX_FMT_BGRA : AV_PIX_FMT_YUV420P;

  if (output_format->flags & AVFMT_GLOBALHEADER)
    s_codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

  if (avcodec_open2(s_codec_context, codec, nullptr) < 0)
  {
    ERROR_LOG(VIDEO, "Could not open codec");
    return false;
  }

  s_src_frame = av_frame_alloc();
  s_scaled_frame = av_frame_alloc();

  s_scaled_frame->format = s_codec_context->pix_fmt;
  s_scaled_frame->width = s_width;
  s_scaled_frame->height = s_height;

#if LIBAVCODEC_VERSION_MAJOR >= 55
  if (av_frame_get_buffer(s_scaled_frame, 1))
    return false;
#else
  if (avcodec_default_get_buffer(s_codec_context, s_scaled_frame))
    return false;
#endif

  s_stream = avformat_new_stream(s_format_context, codec);
  if (!s_stream || !AVStreamCopyContext(s_stream, s_codec_context))
  {
    ERROR_LOG(VIDEO, "Could not create stream");
    return false;
  }

  NOTICE_LOG(VIDEO, "Opening file %s for dumping", s_format_context->filename);
  if (avio_open(&s_format_context->pb, s_format_context->filename, AVIO_FLAG_WRITE) < 0 ||
      avformat_write_header(s_format_context, nullptr))
  {
    ERROR_LOG(VIDEO, "Could not open %s", s_format_context->filename);
    return false;
  }

  OSD::AddMessage(StringFromFormat("Dumping Frames to \"%s\" (%dx%d)", s_format_context->filename,
                                   s_width, s_height));

  return true;
}
Esempio n. 10
0
int main(int argc, char **argv)
{
    const char *filename;
    const AVCodec *codec;
    AVCodecContext *c = NULL;
    AVFrame *frame;
    AVPacket *pkt;
    int i, j, k, ret;
    FILE *f;
    uint16_t *samples;
    float t, tincr;

    if (argc <= 1) {
        fprintf(stderr, "Usage: %s <output file>\n", argv[0]);
        return 0;
    }
    filename = argv[1];

    /* find the MP2 encoder */
    codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate audio codec context\n");
        exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 64000;

    /* check that the encoder supports s16 pcm input */
    c->sample_fmt = AV_SAMPLE_FMT_S16;
    if (!check_sample_fmt(codec, c->sample_fmt)) {
        fprintf(stderr, "Encoder does not support sample format %s",
                av_get_sample_fmt_name(c->sample_fmt));
        exit(1);
    }

    /* select other audio parameters supported by the encoder */
    c->sample_rate    = select_sample_rate(codec);
    c->channel_layout = select_channel_layout(codec);
    c->channels       = av_get_channel_layout_nb_channels(c->channel_layout);

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    /* packet for holding encoded output */
    pkt = av_packet_alloc();
    if (!pkt) {
        fprintf(stderr, "could not allocate the packet\n");
        exit(1);
    }

    /* frame containing input raw audio */
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate audio frame\n");
        exit(1);
    }

    frame->nb_samples     = c->frame_size;
    frame->format         = c->sample_fmt;
    frame->channel_layout = c->channel_layout;

    /* allocate the data buffers */
    ret = av_frame_get_buffer(frame, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate audio data buffers\n");
        exit(1);
    }

    /* encode a single tone sound */
    t = 0;
    tincr = 2 * M_PI * 440.0 / c->sample_rate;
    for (i = 0; i < 200; i++) {
        /* make sure the frame is writable -- makes a copy if the encoder
         * kept a reference internally */
        ret = av_frame_make_writable(frame);
        if (ret < 0)
            exit(1);
        samples = (uint16_t*)frame->data[0];

        for (j = 0; j < c->frame_size; j++) {
            samples[2 * j] = (int)(sin(t) * 10000);

            for (k = 1; k < c->channels; k++)
                samples[2 * j + k] = samples[2 * j];
            t += tincr;
        }
        encode(c, frame, pkt, f);
    }

    /* flush the encoder */
    encode(c, NULL, pkt, f);

    fclose(f);

    av_frame_free(&frame);
    av_packet_free(&pkt);
    avcodec_free_context(&c);

    return 0;
}
Esempio n. 11
0
static int startffmpeg(struct anim *anim)
{
    int i, videoStream;

    AVCodec *pCodec;
    AVFormatContext *pFormatCtx = NULL;
    AVCodecContext *pCodecCtx;
    AVRational frame_rate;
    int frs_num;
    double frs_den;
    int streamcount;

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
    /* The following for color space determination */
    int srcRange, dstRange, brightness, contrast, saturation;
    int *table;
    const int *inv_table;
#endif

    if (anim == NULL) return(-1);

    streamcount = anim->streamindex;

    if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) {
        return -1;
    }

    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    av_dump_format(pFormatCtx, 0, anim->name, 0);


    /* Find the video stream */
    videoStream = -1;

    for (i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (streamcount > 0) {
                streamcount--;
                continue;
            }
            videoStream = i;
            break;
        }

    if (videoStream == -1) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    /* Find the decoder for the video stream */
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    pCodecCtx->workaround_bugs = 1;

    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    frame_rate = av_get_r_frame_rate_compat(pFormatCtx->streams[videoStream]);
    if (pFormatCtx->streams[videoStream]->nb_frames != 0) {
        anim->duration = pFormatCtx->streams[videoStream]->nb_frames;
    }
    else {
        anim->duration = ceil(pFormatCtx->duration *
                              av_q2d(frame_rate) /
                              AV_TIME_BASE);
    }

    frs_num = frame_rate.num;
    frs_den = frame_rate.den;

    frs_den *= AV_TIME_BASE;

    while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) {
        frs_num /= 10;
        frs_den /= 10;
    }

    anim->frs_sec = frs_num;
    anim->frs_sec_base = frs_den;

    anim->params = 0;

    anim->x = pCodecCtx->width;
    anim->y = av_get_cropped_height_from_codec(pCodecCtx);

    anim->pFormatCtx = pFormatCtx;
    anim->pCodecCtx = pCodecCtx;
    anim->pCodec = pCodec;
    anim->videoStream = videoStream;

    anim->interlacing = 0;
    anim->orientation = 0;
    anim->framesize = anim->x * anim->y * 4;

    anim->curposition = -1;
    anim->last_frame = 0;
    anim->last_pts = -1;
    anim->next_pts = -1;
    anim->next_packet.stream_index = -1;

    anim->pFrame = av_frame_alloc();
    anim->pFrameComplete = false;
    anim->pFrameDeinterlaced = av_frame_alloc();
    anim->pFrameRGB = av_frame_alloc();

    if (need_aligned_ffmpeg_buffer(anim)) {
        anim->pFrameRGB->format = AV_PIX_FMT_RGBA;
        anim->pFrameRGB->width  = anim->x;
        anim->pFrameRGB->height = anim->y;

        if (av_frame_get_buffer(anim->pFrameRGB, 32) < 0) {
            fprintf(stderr, "Could not allocate frame data.\n");
            avcodec_close(anim->pCodecCtx);
            avformat_close_input(&anim->pFormatCtx);
            av_frame_free(&anim->pFrameRGB);
            av_frame_free(&anim->pFrameDeinterlaced);
            av_frame_free(&anim->pFrame);
            anim->pCodecCtx = NULL;
            return -1;
        }
    }

    if (avpicture_get_size(AV_PIX_FMT_RGBA, anim->x, anim->y) !=
            anim->x * anim->y * 4)
    {
        fprintf(stderr,
                "ffmpeg has changed alloc scheme ... ARGHHH!\n");
        avcodec_close(anim->pCodecCtx);
        avformat_close_input(&anim->pFormatCtx);
        av_frame_free(&anim->pFrameRGB);
        av_frame_free(&anim->pFrameDeinterlaced);
        av_frame_free(&anim->pFrame);
        anim->pCodecCtx = NULL;
        return -1;
    }

    if (anim->ib_flags & IB_animdeinterlace) {
        avpicture_fill((AVPicture *) anim->pFrameDeinterlaced,
                       MEM_callocN(avpicture_get_size(
                                       anim->pCodecCtx->pix_fmt,
                                       anim->pCodecCtx->width,
                                       anim->pCodecCtx->height),
                                   "ffmpeg deinterlace"),
                       anim->pCodecCtx->pix_fmt,
                       anim->pCodecCtx->width,
                       anim->pCodecCtx->height);
    }

    if (pCodecCtx->has_b_frames) {
        anim->preseek = 25; /* FIXME: detect gopsize ... */
    }
    else {
        anim->preseek = 0;
    }

    anim->img_convert_ctx = sws_getContext(
                                anim->x,
                                anim->y,
                                anim->pCodecCtx->pix_fmt,
                                anim->x,
                                anim->y,
                                AV_PIX_FMT_RGBA,
                                SWS_FAST_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT,
                                NULL, NULL, NULL);

    if (!anim->img_convert_ctx) {
        fprintf(stderr,
                "Can't transform color space??? Bailing out...\n");
        avcodec_close(anim->pCodecCtx);
        avformat_close_input(&anim->pFormatCtx);
        av_frame_free(&anim->pFrameRGB);
        av_frame_free(&anim->pFrameDeinterlaced);
        av_frame_free(&anim->pFrame);
        anim->pCodecCtx = NULL;
        return -1;
    }

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
    /* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */
    if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange,
                                  &table, &dstRange, &brightness, &contrast, &saturation))
    {
        srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG;
        inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace);

        if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange,
                                     table, dstRange, brightness, contrast, saturation))
        {
            fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
        }
    }
    else {
        fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
    }
#endif

    return (0);
}
Esempio n. 12
0
static bool vaapi_init_codec(struct vaapi_encoder *enc, const char *path)
{
	int ret;

	ret = av_hwdevice_ctx_create(&enc->vadevice_ref, AV_HWDEVICE_TYPE_VAAPI,
			path, NULL, 0);
	if (ret < 0) {
		warn("Failed to create VAAPI device context: %s",
				av_err2str(ret));
		return false;
	}

	enc->vaframes_ref = av_hwframe_ctx_alloc(enc->vadevice_ref);
	if (!enc->vaframes_ref) {
		warn("Failed to alloc HW frames context");
		return false;
	}

	AVHWFramesContext *frames_ctx =
			(AVHWFramesContext *)enc->vaframes_ref->data;
	frames_ctx->format            = AV_PIX_FMT_VAAPI;
	frames_ctx->sw_format         = AV_PIX_FMT_NV12;
	frames_ctx->width             = enc->context->width;
	frames_ctx->height            = enc->context->height;
	frames_ctx->initial_pool_size = 20;

	ret = av_hwframe_ctx_init(enc->vaframes_ref);
	if (ret < 0) {
		warn("Failed to init HW frames context: %s", av_err2str(ret));
		return false;
	}

	/* 2. Create software frame and picture */
	enc->vframe = av_frame_alloc();
	if (!enc->vframe) {
		warn("Failed to allocate video frame");
		return false;
	}

	enc->vframe->format = enc->context->pix_fmt;
	enc->vframe->width  = enc->context->width;
	enc->vframe->height = enc->context->height;
	enc->vframe->colorspace  = enc->context->colorspace;
	enc->vframe->color_range = enc->context->color_range;

	ret = av_frame_get_buffer(enc->vframe, base_get_alignment());
	if (ret < 0) {
		warn("Failed to allocate vframe: %s", av_err2str(ret));
		return false;
	}

	/* 3. set up codec */
	enc->context->pix_fmt       = AV_PIX_FMT_VAAPI;
	enc->context->hw_frames_ctx = av_buffer_ref(enc->vaframes_ref);

	ret = avcodec_open2(enc->context, enc->vaapi, NULL);
	if (ret < 0) {
		warn("Failed to open VAAPI codec: %s", av_err2str(ret));
		return false;
	}

	enc->initialized = true;
	return true;
}
Esempio n. 13
0
AVFrame *resample_frame(resample_t *resample, AVFrame *frame, const format_t *to_format) {
	const char *err;
	int errcode = 0;

	uint64_t to_channel_layout = av_get_default_channel_layout(to_format->channels);
	fix_frame_channel_layout(frame);

	if (frame->format != to_format->format)
		goto resample;
	if (frame->sample_rate != to_format->clockrate)
		goto resample;
	if (frame->channel_layout != to_channel_layout)
		goto resample;

	return av_frame_clone(frame);

resample:

	if (G_UNLIKELY(!resample->swresample)) {
		resample->swresample = swr_alloc_set_opts(NULL,
				to_channel_layout,
				to_format->format,
				to_format->clockrate,
				frame->channel_layout,
				frame->format,
				frame->sample_rate,
				0, NULL);
		err = "failed to alloc resample context";
		if (!resample->swresample)
			goto err;

		err = "failed to init resample context";
		if ((errcode = swr_init(resample->swresample)) < 0)
			goto err;
	}

	// get a large enough buffer for resampled audio - this should be enough so we don't
	// have to loop
	int dst_samples = av_rescale_rnd(swr_get_delay(resample->swresample, to_format->clockrate)
			+ frame->nb_samples,
				to_format->clockrate, frame->sample_rate, AV_ROUND_UP);

	AVFrame *swr_frame = av_frame_alloc();

	err = "failed to alloc resampling frame";
	if (!swr_frame)
		goto err;
	av_frame_copy_props(swr_frame, frame);
	swr_frame->format = to_format->format;
	swr_frame->channel_layout = to_channel_layout;
	swr_frame->nb_samples = dst_samples;
	swr_frame->sample_rate = to_format->clockrate;
	err = "failed to get resample buffers";
	if ((errcode = av_frame_get_buffer(swr_frame, 0)) < 0)
		goto err;

	int ret_samples = swr_convert(resample->swresample, swr_frame->extended_data,
				dst_samples,
				(const uint8_t **) frame->extended_data,
				frame->nb_samples);
	err = "failed to resample audio";
	if ((errcode = ret_samples) < 0)
		goto err;

	swr_frame->nb_samples = ret_samples;
	swr_frame->pts = av_rescale(frame->pts, to_format->clockrate, frame->sample_rate);
	return swr_frame;

err:
	if (errcode)
		ilog(LOG_ERR, "Error resampling: %s (%s)", err, av_error(errcode));
	else
		ilog(LOG_ERR, "Error resampling: %s", err);
	resample_shutdown(resample);
	return NULL;
}
Esempio n. 14
0
int av_frame_ref(AVFrame *dst, const AVFrame *src)
{
    int i, ret = 0;

    dst->format         = src->format;
    dst->width          = src->width;
    dst->height         = src->height;
    dst->channel_layout = src->channel_layout;
    dst->nb_samples     = src->nb_samples;

    ret = av_frame_copy_props(dst, src);
    if (ret < 0)
        return ret;

    /* duplicate the frame data if it's not refcounted */
    if (!src->buf[0]) {
        ret = av_frame_get_buffer(dst, 32);
        if (ret < 0)
            return ret;

        if (src->nb_samples) {
            int ch = av_get_channel_layout_nb_channels(src->channel_layout);
            av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
                            dst->nb_samples, ch, dst->format);
        } else {
            av_image_copy(dst->data, dst->linesize, src->data, src->linesize,
                          dst->format, dst->width, dst->height);
        }
        return 0;
    }

    /* ref the buffers */
    for (i = 0; i < FF_ARRAY_ELEMS(src->buf) && src->buf[i]; i++) {
        dst->buf[i] = av_buffer_ref(src->buf[i]);
        if (!dst->buf[i]) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
    }

    if (src->extended_buf) {
        dst->extended_buf = av_mallocz(sizeof(*dst->extended_buf) *
                                       src->nb_extended_buf);
        if (!dst->extended_buf) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        dst->nb_extended_buf = src->nb_extended_buf;

        for (i = 0; i < src->nb_extended_buf; i++) {
            dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
            if (!dst->extended_buf[i]) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
        }
    }

    /* duplicate extended data */
    if (src->extended_data != src->data) {
        int ch = av_get_channel_layout_nb_channels(src->channel_layout);

        if (!ch) {
            ret = AVERROR(EINVAL);
            goto fail;
        }

        dst->extended_data = av_malloc(sizeof(*dst->extended_data) * ch);
        if (!dst->extended_data) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
    } else
        dst->extended_data = dst->data;

    memcpy(dst->data,     src->data,     sizeof(src->data));
    memcpy(dst->linesize, src->linesize, sizeof(src->linesize));

    return 0;

fail:
    av_frame_unref(dst);
    return ret;
}
Esempio n. 15
0
int frame_pusher_open(frame_pusher **o_fp, const char *path,
    int aud_samplerate, AVRational vid_framerate,
    int width, int height, int vid_bitrate)
{
    *o_fp = NULL;
    int ret;
    frame_pusher *fp = (frame_pusher *)av_malloc(sizeof(frame_pusher));

    // Guess the format
    AVOutputFormat *ofmt = av_guess_format(NULL, path, NULL);
    if (!ofmt) {
        ofmt = av_oformat_next(NULL);   // Use the first format available
        av_log(NULL, AV_LOG_WARNING, "Unsupported container format. Using %s instead.\n", ofmt->name);
        // TODO: Add the extension to the path.
    }
    av_log(NULL, AV_LOG_INFO, "Using format %s\n", ofmt->name);
    // Open output file
    AVIOContext *io_ctx;
    if ((ret = avio_open2(&io_ctx, path, AVIO_FLAG_WRITE, NULL, NULL)) < 0) return ret;
    // Create the format context
    fp->fmt_ctx = avformat_alloc_context();
    fp->fmt_ctx->oformat = ofmt;
    fp->fmt_ctx->pb = io_ctx;
    // > Create the streams. Here we simply create one for video and one for audio.
    // >> The audio stream
    AVCodec *aud_codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
    fp->aud_stream = avformat_new_stream(fp->fmt_ctx, aud_codec);
    fp->aud_stream->id = 0;
    fp->aud_stream->codec->codec_id = AV_CODEC_ID_AAC;
    fp->aud_stream->codec->bit_rate = 64000;
    fp->aud_stream->codec->sample_rate = fp->aud_samplerate = aud_samplerate;
    // >>> http://stackoverflow.com/questions/22989838
    // >>> TODO: Add an option to set the codec and the sample format.
    fp->aud_stream->codec->sample_fmt = fp->aud_stream->codec->codec->sample_fmts[0];
    fp->aud_stream->codec->channel_layout = AV_CH_LAYOUT_STEREO;
    fp->aud_stream->codec->channels = 2;
    fp->aud_stream->codec->time_base = fp->aud_stream->time_base = (AVRational){1, aud_samplerate};
    // >> The video stream
    AVCodec *vid_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    fp->vid_stream = avformat_new_stream(fp->fmt_ctx, vid_codec);
    fp->vid_width = fp->vid_stream->codec->width = width;
    fp->vid_height = fp->vid_stream->codec->height = height;
    fp->vid_stream->id = 1;
    // >>> * ATTENTION: fp->vid_stream->codec is an (AVCodecContext *) rather than (AVCodec *)!
    fp->vid_stream->codec->codec_id = AV_CODEC_ID_H264;
    fp->vid_stream->codec->bit_rate = vid_bitrate > 0 ? vid_bitrate : 1200000;
    fp->vid_stream->codec->pix_fmt = AV_PIX_FMT_YUV420P;
    fp->vid_stream->codec->gop_size = 24;
    fp->vid_stream->codec->time_base = fp->vid_stream->time_base = (AVRational){vid_framerate.den, vid_framerate.num};
    // >> Enable experimental codecs such as AAC
    fp->aud_stream->codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
    fp->vid_stream->codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
    // >> Some formats want stream headers to be separate.
    // >> XXX: MPEG-4 doesn't have AVFMT_GLOBALHEADER in its format flags??
    //if (fp->fmt_ctx->flags & AVFMT_GLOBALHEADER)
        fp->aud_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        fp->vid_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    if ((ret = avcodec_open2(fp->aud_stream->codec, aud_codec, NULL)) < 0) return ret;
    if ((ret = avcodec_open2(fp->vid_stream->codec, vid_codec, NULL)) < 0) return ret;
    // Trigger a full initialization on the format context and write the header.
    avformat_write_header(fp->fmt_ctx, NULL);

    // Miscellaneous initializations
    fp->first_packet = 1;
    fp->last_aud_pts = fp->last_vid_pts = 0;
    fp->nb_aud_buffered_samples = 0;
    // > Video
    fp->vid_frame = av_frame_alloc();
    fp->pict_bufsize = avpicture_get_size(AV_PIX_FMT_YUV420P, width, height);
    fp->pict_buf = (uint8_t *)av_malloc(fp->pict_bufsize);
    // >> Assign the video frame with the allocated buffer
    avpicture_fill((AVPicture *)fp->vid_frame, fp->pict_buf, AV_PIX_FMT_YUV420P, width, height);
    fp->sws_ctx = sws_getContext(
        width, height, PIX_FMT_RGB24, width, height, AV_PIX_FMT_YUV420P,
        SWS_BILINEAR, NULL, NULL, NULL);
    // > Audio
    fp->aud_frame = av_frame_alloc();
    fp->aud_buf = av_frame_alloc();
    fp->aud_buf->format = fp->aud_frame->format = fp->aud_stream->codec->sample_fmt;
    fp->aud_buf->channel_layout = fp->aud_frame->channel_layout = fp->aud_stream->codec->channel_layout;
    fp->aud_buf->sample_rate = fp->aud_frame->sample_rate = fp->aud_stream->codec->sample_rate;
    if (aud_codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) {
        fp->nb_aud_samples_per_frame = 4096;
        av_log(NULL, AV_LOG_INFO, "frame_pusher: codec has variable frame size capability\n");
    } else fp->nb_aud_samples_per_frame = fp->aud_stream->codec->frame_size;
    fp->aud_buf->nb_samples = fp->aud_frame->nb_samples = fp->nb_aud_samples_per_frame;
    av_log(NULL, AV_LOG_INFO, "frame_pusher: number of samples per frame = %d\n", fp->nb_aud_samples_per_frame);
    if ((ret = av_frame_get_buffer(fp->aud_frame, 0)) < 0) return ret;
    if ((ret = av_frame_get_buffer(fp->aud_buf, 0)) < 0) return ret;
    // >> The audio resampling context
    fp->swr_ctx = swr_alloc();
    if (!fp->swr_ctx) {
        av_log(NULL, AV_LOG_ERROR, "frame_pusher: Cannot initialize audio resampling library"
            "(possibly caused by insufficient memory)\n");
        return AVERROR_UNKNOWN;
    }
    av_opt_set_channel_layout(fp->swr_ctx, "in_channel_layout", fp->aud_stream->codec->channel_layout, 0);
    av_opt_set_channel_layout(fp->swr_ctx, "out_channel_layout", fp->aud_stream->codec->channel_layout, 0);
    av_opt_set_int(fp->swr_ctx, "in_sample_rate", fp->aud_stream->codec->sample_rate, 0);
    av_opt_set_int(fp->swr_ctx, "out_sample_rate", fp->aud_stream->codec->sample_rate, 0);
    av_opt_set_sample_fmt(fp->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
    av_opt_set_sample_fmt(fp->swr_ctx, "out_sample_fmt", fp->aud_stream->codec->sample_fmt, 0);
    if ((ret = swr_init(fp->swr_ctx)) < 0) return ret;

    *o_fp = fp;
    return 0;
}
Esempio n. 16
0
void Parser::Resample()
{
    int got_frame;

    int idxFrame;
    int idxBuff;

    bool isExtAAC = false;
    if (cdc_ctx_in->codec_id == AV_CODEC_ID_AAC)
        isExtAAC = sampleFormatIn == (int)AVSampleFormat::AV_SAMPLE_FMT_FLTP;

    try
    {
        try
        {
            if (nbSamplesIn > 0)
            {
                if (isExtAAC)
                {
                    frame_in->nb_samples = 2048;
                    frame_in->channels = cdc_ctx_in->channels;
                }
                else
                {
                    frame_in->nb_samples = nbSamplesIn;
                    frame_in->channels = nbChannelsIn;
                }
                frame_in->format = sampleFormatIn;
                frame_in->sample_rate = sampleRateIn;
                frame_in->channel_layout = channelLayoutIn;

                if (av_frame_get_buffer(frame_in, 1) < 0)
                    throw ResampleException() << errno_code(MIR_ERR_BUFFER_ALLOC_IN);

                if (isExtAAC)
                    frame_in->nb_samples = nbSamplesIn;
            }
            else
                throw ResampleException() << errno_code(MIR_ERR_BUFFER_ALLOC_NULL);
        }
        catch(SignalException& err)
        {
            throw ExceptionClass("parser", "Resample", "Segmentation error");
        }
        catch(ResampleException& err)
        {
            throw;
        }

/**
        try
        {
            frame_out->nb_samples = cdc_ctx_out->frame_size;
            frame_out->format = cdc_ctx_out->sample_fmt;
            frame_out->sample_rate = sampleRate;
            frame_out->channel_layout = av_get_default_channel_layout(nbChannel);

            if (av_frame_get_buffer(frame_out, 1) < 0)
                throw ResampleException() << errno_code(MIR_ERR_BUFFER_ALLOC_OUT);
        }
        catch(ResampleException& err)
        {
            throw;
        }
/**/

        for (idxFrame = 0; idxFrame < (int)this->bufFrames.size(); idxFrame++)
        {
            try
            {
                vetAux = bufFrames[idxFrame];

                //!< carregando dados no frame de entrada
                try
                {
                    nbBuffers = av_sample_fmt_is_planar((AVSampleFormat)frame_in->format) ? frame_in->channels : 1;
                    if ((int)vetAux.size() != nbBuffers)
                        throw FifoException() << errno_code(MIR_ERR_FIFO_DATA1);

                    for (idxBuff = 0; idxBuff < (int)vetAux.size(); idxBuff++)
                    {
                        if ((int)vetAux[idxBuff].size() > frame_in->linesize[0])
                            throw FifoException() << errno_code(MIR_ERR_FIFO_DATA2);

                        try
                        {
                            memcpy(frame_in->data[idxBuff], vetAux[idxBuff].data(), frame_in->linesize[0]);
                        }
                        catch(SignalException& err)
                        {
                            throw ExceptionClass("parser", "Resample", err.what());
                        }
                    }

                    for (int idxBuff = 0; idxBuff < (int)vetAux.size(); idxBuff++)
                        if (vetAux[idxBuff].size() > 0)
                            vetAux[idxBuff].clear();
                    if (vetAux.size() > 0)
                        vetAux.clear();
                }
                catch (ExceptionClass& err)
                {
                    throw;
                }
                catch (FifoException& err)
                {
                    throw;
                }
                catch (...)
                {
                    throw FifoException() << errno_code(MIR_ERR_FIFO_DATA3);
                }

                //!< convertendo dados
                if (swr_convert(swr_ctx, (uint8_t**)&frame_out->data, frame_out->nb_samples,
                                 (const uint8_t**)&frame_in->data, frame_in->nb_samples) >= 0)
                {
                    got_frame = 0;
                    av_init_packet(&pkt_out);
                    pkt_out.data = NULL;
                    pkt_out.size = 0;

                    if (avcodec_encode_audio2(cdc_ctx_out, &pkt_out, frame_out, &got_frame) >= 0)
                        EndResample();
                    else
                    {
                        av_free_packet(&pkt_out);
                        throw ResampleException() << errno_code(MIR_ERR_ENCODE);
                    }

                    av_free_packet(&pkt_out);
                }
                else
                    throw ResampleException() << errno_code(MIR_ERR_RESAMPLE);
            }
            catch(ExceptionClass& err)
            {
                objLog->mr_printf(MR_LOG_ERROR, idRadio, "%s\n", err.what());
            }
            catch(FifoException& err)
            {
                char aux[200];

                switch(*boost::get_error_info<errno_code>(err))
                {
                    case MIR_ERR_FIFO_DATA1:
                        sprintf(aux, "Error (%d) : vetAux.size = %d    nbBuffers = %d\n", MIR_ERR_FIFO_DATA1,
                                (int)vetAux.size(), nbBuffers);
                        break;
                    case MIR_ERR_FIFO_DATA2:
/**
                        sprintf(aux, "Error (%d) : vetAux[idxBuff].size = %d    linesize = %d\n", MIR_ERR_FIFO_DATA2,
                                (int)vetAux[idxBuff].size(), frame_in->linesize[0]);
/**/
                        sprintf(aux, "Error (%d) : vetAux[idxBuff].size = %d    linesize = %d    channels = %d"
                                "    channelsLayout = %d    format = %d    nb_samples = %d\n",
                                MIR_ERR_FIFO_DATA2,
                                (int)vetAux[idxBuff].size(),
                                frame_in->linesize[0],
                                frame_in->channels,
                                frame_in->channel_layout,
                                frame_in->format,
                                frame_in->nb_samples);
/**/
                        break;
                    case MIR_ERR_FIFO_DATA3:
                        sprintf(aux, "Error (%d) : General Frame allocation error\n", MIR_ERR_FIFO_DATA3);
                        break;
                    default :
                        break;
                }

                objLog->mr_printf(MR_LOG_ERROR, idRadio, aux);

                for (int idxBuff = 0; idxBuff < (int)vetAux.size(); idxBuff++)
                    if (vetAux[idxBuff].size() > 0)
                        vetAux[idxBuff].clear();
                if (vetAux.size() > 0)
                    vetAux.clear();
            }
            catch(ResampleException& err)
            {
                objLog->mr_printf(MR_LOG_ERROR, idRadio, "Frame Resample Exception : %d\n", *boost::get_error_info<errno_code>(err));
            }
        }

        av_frame_unref(frame_in);
//        av_frame_unref(frame_out);

        for (int idxFrame = 0; idxFrame < (int)this->bufFrames.size(); idxFrame++)
        {
            for (int idxBuff = 0; idxBuff < (int)bufFrames[idxFrame].size(); idxBuff++)
                if (bufFrames[idxFrame][idxBuff].size() > 0)
                    bufFrames[idxFrame][idxBuff].clear();
            if (bufFrames[idxFrame].size() > 0)
                bufFrames[idxFrame].clear();
        }
        if (bufFrames.size() > 0)
            bufFrames.clear();
    }
    catch (SignalException& err)
    {
        if (frame_in)
        {
            av_frame_free(&frame_in);
            frame_in = NULL;
        }

        if (frame_out)
        {
            av_frame_free(&frame_out);
            frame_out = NULL;
        }

        for (int idxBuff = 0; idxBuff < (int)vetAux.size(); idxBuff++)
            if (vetAux[idxBuff].size() > 0)
                vetAux[idxBuff].clear();
        if (vetAux.size() > 0)
            vetAux.clear();

        for (int idxFrame = 0; idxFrame < (int)this->bufFrames.size(); idxFrame++)
        {
            for (int idxBuff = 0; idxBuff < (int)bufFrames[idxFrame].size(); idxBuff++)
                if (bufFrames[idxFrame][idxBuff].size() > 0)
                    bufFrames[idxFrame][idxBuff].clear();
            if (bufFrames[idxFrame].size() > 0)
                bufFrames[idxFrame].clear();
        }
        if (bufFrames.size() > 0)
            bufFrames.clear();

        throw;
    }
    catch(...)
    {
        if (frame_in)
        {
            av_frame_free(&frame_in);
            frame_in = NULL;
        }

        if (frame_out)
        {
            av_frame_free(&frame_out);
            frame_out = NULL;
        }

        for (int idxBuff = 0; idxBuff < (int)vetAux.size(); idxBuff++)
            if (vetAux[idxBuff].size() > 0)
                vetAux[idxBuff].clear();
        if (vetAux.size() > 0)
            vetAux.clear();

        for (int idxFrame = 0; idxFrame < (int)this->bufFrames.size(); idxFrame++)
        {
            for (int idxBuff = 0; idxBuff < (int)bufFrames[idxFrame].size(); idxBuff++)
                if (bufFrames[idxFrame][idxBuff].size() > 0)
                    bufFrames[idxFrame][idxBuff].clear();
            if (bufFrames[idxFrame].size() > 0)
                bufFrames[idxFrame].clear();
        }
        if (bufFrames.size() > 0)
            bufFrames.clear();

        throw;
    }
}
Esempio n. 17
0
int main(int argc, char *argv[])
{
    int cameraFd;
    cameraFd = open("/dev/video0", O_RDWR|O_NONBLOCK);
    if (cameraFd < 0) {
        std::cout << "open /dev/video0 failed!" << std::endl;
        exit(EXIT_FAILURE);
    }

    int dumpFd;
    dumpFd = open("record.yuv", O_RDWR|O_TRUNC|O_CREAT, 0666);
    if (dumpFd < 0) {
        die();
    }

    struct v4l2_capability capability;
    int ret = ioctl(cameraFd, VIDIOC_QUERYCAP, &capability);
    if (ret < 0) {
        die();
    }

    if (!(capability.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
        std::cout << "not an video capture device!" << std::endl;
        exit(EXIT_FAILURE);
    }

    if (!(capability.capabilities & V4L2_CAP_STREAMING)) {
        std::cout << "does not support streaming i/o" << std::endl;
        exit(EXIT_FAILURE);
    }

    std::stringstream ss;
    ss << "Driver name: " << capability.driver;
    std::cout << ss.str() << std::endl;

    int index = 0;
    ret = ioctl(cameraFd, VIDIOC_G_INPUT, &index);
    if (ret < 0) {
        die();
    }
    std::cout << "device index: " << index << std::endl;

    struct v4l2_input input;
    memset(&input, 0, sizeof(input));
    input.index = index;
    ret = ioctl(cameraFd, VIDIOC_ENUMINPUT, &input);
    if (ret < 0) die();
    std::cout << "dump v4l2_input:" << std::endl;
    std::cout << "  index:" << input.index << std::endl;
    std::cout << "  name:" << input.name << std::endl;
    std::cout << "  type:" << input.type << std::endl;
    std::cout << "  std:" << input.std << std::endl;
    std::cout << "  status:" << input.status << std::endl;
    std::cout << "  capabilities:" << input.capabilities << std::endl;

    struct v4l2_format fmt;
    memset(&fmt, 0, sizeof(fmt));
    fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    fmt.fmt.pix.width = 640;
    fmt.fmt.pix.height = 480;
    fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
    fmt.fmt.pix.field = V4L2_FIELD_NONE;

    if (ioctl(cameraFd, VIDIOC_S_FMT, &fmt) < 0) {
        die();
    }

    memset(&fmt, 0, sizeof(fmt));
    fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if (ioctl(cameraFd, VIDIOC_G_FMT, &fmt) < 0) {
        die();
    }
    std::cout << "\nv4l2_format:" << std::endl;
    std::cout << "  width:" << fmt.fmt.pix.width << std::endl;
    std::cout << "  height:" << fmt.fmt.pix.height << std::endl;
    std::cout << "  pixel_format:" << print_fourcc(fmt.fmt.pix.pixelformat) << std::endl;
    std::cout << "  field:" << fmt.fmt.pix.field << std::endl;

    struct v4l2_requestbuffers req;
    memset(&req, 0, sizeof(req));
    req.count = 4;
    req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    req.memory = V4L2_MEMORY_MMAP;
    if (ioctl(cameraFd, VIDIOC_REQBUFS, &req) < 0) {
        if (EINVAL == errno) {
            std::cout << "does not surrpot memory mapping!\n" << std::endl;
            return -1;
        } else {
            die();
        }
    }

    VideoBuffer* buffers = (VideoBuffer*)calloc(req.count, sizeof(*buffers));
    struct v4l2_buffer buf;
    struct v4l2_buffer bufs[4];
    for (int numBufs = 0; numBufs < req.count; ++numBufs) {
        memset(&buf, 0, sizeof(buf));
        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        buf.memory = V4L2_MEMORY_MMAP;
        buf.index = numBufs;
        if (ioctl(cameraFd, VIDIOC_QUERYBUF, &buf) == -1)
            return -1;
        bufs[numBufs] = buf;
        buffers[numBufs].length = buf.length;
        buffers[numBufs].start = mmap(NULL, buf.length,
                PROT_READ|PROT_WRITE, MAP_SHARED, cameraFd, buf.m.offset);
        if (buffers[numBufs].start == MAP_FAILED) {
            return -1;
        }
    }

    for (int i = 0; i < 4; ++i) {
        memset(&buf, 0, sizeof(buf));
        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        buf.memory = V4L2_MEMORY_MMAP;
        buf.index = i;
        buf.m.offset = bufs[i].m.offset;

        if (ioctl(cameraFd, VIDIOC_QBUF, &buf) < 0) {
            die();
        }
    }

    int type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if (ioctl(cameraFd, VIDIOC_STREAMON, &type) < 0) {
        die();
    }
    std::cout << "begin... " << std::endl;

    struct SwsContext *swsContext = sws_getContext(640, 480,
            AV_PIX_FMT_YUYV422, 640, 480, AV_PIX_FMT_YUV420P,SWS_BILINEAR, NULL, NULL, NULL);
    AVFrame* pSrcFrame, *pDstFrame;
    pSrcFrame = av_frame_alloc();
    pSrcFrame->width = 640;
    pSrcFrame->height = 480;
    pSrcFrame->format = AV_PIX_FMT_YUYV422;
    av_frame_get_buffer(pSrcFrame, 0);
    std::cout << "linesize:" << pSrcFrame->linesize[0] << std::endl;

    pDstFrame = av_frame_alloc();
    pDstFrame->width = 640;
    pDstFrame->height = 480;
    pDstFrame->format = AV_PIX_FMT_YUV420P;
    av_frame_get_buffer(pDstFrame, 0);
    //pDstFrame->linesize[1] = 160;
    //pDstFrame->linesize[2] = 160;
    std::cout << "linesize:" << pDstFrame->linesize[0] << std::endl;
    std::cout << "linesize:" << pDstFrame->linesize[1] << std::endl;
    std::cout << "linesize:" << pDstFrame->linesize[2] << std::endl;

    fd_set fds;
    FD_ZERO(&fds);
    FD_SET(cameraFd, &fds);
    int maxFds = cameraFd+1;
    struct timeval tv;
    int frame_cnt = 100;
    while (frame_cnt) {
        tv.tv_sec = 1;
        tv.tv_usec = 0;
        ret = select(maxFds, &fds, nullptr, nullptr, &tv);
        if (ret < 0) {
            die();
        } else if (ret == 0) {

        } else {
            if (FD_ISSET(cameraFd, &fds)) {
                struct v4l2_buffer capture_buf;
                memset(&capture_buf, 0, sizeof(capture_buf));
                capture_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
                capture_buf.memory = V4L2_MEMORY_MMAP;
                if (ioctl(cameraFd, VIDIOC_DQBUF, &capture_buf) < 0) {
                    std::cout << "error VIDIOC_DQBUF" << std::endl;
                    return -1;
                }
                assert(capture_buf.index<4);

                pSrcFrame->data[0] = (uint8_t*)buffers[capture_buf.index].start;
                ret = sws_scale(swsContext, pSrcFrame->data, pSrcFrame->linesize, 0, 0, pDstFrame->data, pDstFrame->linesize);
                //std::cout << "sws_scale ret " << ret << std::endl;
                //std::cout << pDstFrame->linesize[0] << std::endl;
                //std::cout << pDstFrame->linesize[1] << std::endl;
                //std::cout << pDstFrame->linesize[2] << std::endl;

                write(dumpFd, buffers[capture_buf.index].start, capture_buf.bytesused);

                //write(dumpFd, pDstFrame->data[0], pDstFrame->linesize[0]*480);
                //write(dumpFd, pDstFrame->data[1], pDstFrame->linesize[1]*480);
                //write(dumpFd, pDstFrame->data[2], pDstFrame->linesize[2]*480);

                --frame_cnt;

                if (ioctl(cameraFd, VIDIOC_QBUF, &capture_buf) < 0) {
                    die();
                }
            }
        }
    }

    type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if (ioctl(cameraFd, VIDIOC_STREAMOFF, &type) < 0) {
        die();
    }

    close(dumpFd);

    return 0;
}
Esempio n. 18
0
bool CFFmpegImage::DecodeFrame(AVFrame* frame, unsigned int width, unsigned int height, unsigned int pitch, unsigned char * const pixels)
{
  if (pixels == nullptr)
  {
    CLog::Log(LOGERROR, "%s - No valid buffer pointer (nullptr) passed", __FUNCTION__);
    return false;
  }

  AVFrame* pictureRGB = av_frame_alloc();
  if (!pictureRGB)
  {
    CLog::LogF(LOGERROR, "AVFrame could not be allocated");
    return false;
  }

  // we align on 16 as the input provided by the Texture also aligns the buffer size to 16
  int size = av_image_fill_arrays(pictureRGB->data, pictureRGB->linesize, NULL, AV_PIX_FMT_RGB32, width, height, 16);
  if (size < 0)
  {
    CLog::LogF(LOGERROR, "Could not allocate AVFrame member with %i x %i pixes", width, height);
    av_frame_free(&pictureRGB);
    return false;
  }

  bool needsCopy = false;
  int pixelsSize = pitch * height;
  bool aligned = (((uintptr_t)(const void *)(pixels)) % (32) == 0);
  if (!aligned)
    CLog::Log(LOGDEBUG, "Alignment of external buffer is not suitable for ffmpeg intrinsics - please fix your malloc");

  if (aligned && size == pixelsSize && (int)pitch == pictureRGB->linesize[0])
  {
    // We can use the pixels buffer directly
    pictureRGB->data[0] = pixels;
  }
  else
  {
    // We need an extra buffer and copy it manually afterwards
    pictureRGB->format = AV_PIX_FMT_RGB32;
    pictureRGB->width = width;
    pictureRGB->height = height;
    // we copy the data manually later so give a chance to intrinsics (e.g. mmx, neon)
    if (av_frame_get_buffer(pictureRGB, 32) < 0)
    {
      CLog::LogF(LOGERROR, "Could not allocate temp buffer of size %i bytes", size);
      av_frame_free(&pictureRGB);
      return false;
    }
    needsCopy = true;
  }

  // Especially jpeg formats are full range this we need to take care here
  // Input Formats like RGBA are handled correctly automatically
  AVColorRange range = frame->color_range;
  AVPixelFormat pixFormat = ConvertFormats(frame);

  // assumption quadratic maximums e.g. 2048x2048
  float ratio = m_width / (float)m_height;
  unsigned int nHeight = m_originalHeight;
  unsigned int nWidth = m_originalWidth;
  if (nHeight > height)
  {
    nHeight = height;
    nWidth = (unsigned int)(nHeight * ratio + 0.5f);
  }
  if (nWidth > width)
  {
    nWidth = width;
    nHeight = (unsigned int)(nWidth / ratio + 0.5f);
  }

  struct SwsContext* context = sws_getContext(m_originalWidth, m_originalHeight, pixFormat,
    nWidth, nHeight, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);

  if (range == AVCOL_RANGE_JPEG)
  {
    int* inv_table = nullptr;
    int* table = nullptr;
    int srcRange, dstRange, brightness, contrast, saturation;
    sws_getColorspaceDetails(context, &inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation);
    srcRange = 1;
    sws_setColorspaceDetails(context, inv_table, srcRange, table, dstRange, brightness, contrast, saturation);
  }

  sws_scale(context, frame->data, frame->linesize, 0, m_originalHeight,
    pictureRGB->data, pictureRGB->linesize);
  sws_freeContext(context);

  if (needsCopy)
  {
    int minPitch = std::min((int)pitch, pictureRGB->linesize[0]);
    if (minPitch < 0)
    {
      CLog::LogF(LOGERROR, "negative pitch or height");
      av_frame_free(&pictureRGB);
      return false;
    }
    const unsigned char *src = pictureRGB->data[0];
    unsigned char* dst = pixels;

    for (unsigned int y = 0; y < nHeight; y++)
    {
      memcpy(dst, src, minPitch);
      src += pictureRGB->linesize[0];
      dst += pitch;
    }
    av_frame_free(&pictureRGB);
  }
  else
  {
    // we only lended the data so don't get it deleted
    pictureRGB->data[0] = nullptr;
    av_frame_free(&pictureRGB);
  }

  // update width and height original dimensions are kept
  m_height = nHeight;
  m_width = nWidth;

  return true;
}
Esempio n. 19
0
int udpsocket::ts_demux(void)
{
    AVCodec *pVideoCodec[VIDEO_NUM];
    AVCodec *pAudioCodec[AUDIO_NUM];
    AVCodecContext *pVideoCodecCtx[VIDEO_NUM];
    AVCodecContext *pAudioCodecCtx[AUDIO_NUM];
    AVIOContext * pb;
    AVInputFormat *piFmt;
    AVFormatContext *pFmt;
    uint8_t *buffer;
    int videoindex[VIDEO_NUM];
    int audioindex[AUDIO_NUM];
    AVStream *pVst[VIDEO_NUM];
    AVStream *pAst[AUDIO_NUM];
    AVFrame *pVideoframe[VIDEO_NUM];
    AVFrame *pAudioframe[AUDIO_NUM];
    AVFrame *pOutAudioframe[AUDIO_NUM];
    AVFrame *pOutAudioframelast[AUDIO_NUM];
    AVPacket pkt;
    int got_picture;
    int video_num[VIDEO_NUM];
    int audio_num[AUDIO_NUM];
    int frame_size;

    //transcodepool
    transcodepool*  pVideoTransPool[VIDEO_NUM];
    transcodepool*  pAudioTransPool[AUDIO_NUM];

    for( int i=0; i<VIDEO_NUM; i++ ){
        pVideoCodec[i] = NULL;
        pVideoCodecCtx[i] =NULL;
        videoindex[i] = -1;
        pVst[i] = NULL;
        video_num[i] = 0;
        pVideoframe[i] = NULL;
        pVideoframe[i] = av_frame_alloc();
        pVideoTransPool[i] = NULL;
    }
    for( int i=0; i<AUDIO_NUM; i++ ){
        pAudioCodec[i] = NULL;
        pAudioCodecCtx[i] = NULL;
        audioindex[i] = -1;
        pAst[i] = NULL;
        audio_num[i] = 0;
        pOutAudioframe[i] = NULL;
        pOutAudioframe[i] = av_frame_alloc();
        pOutAudioframelast[i] = NULL;
        pOutAudioframelast[i] = av_frame_alloc();
        pAudioframe[i] = NULL;
        pAudioframe[i] = av_frame_alloc();
        pAudioTransPool[i] = NULL;
    }
    pb = NULL;
    piFmt = NULL;
    pFmt = NULL;
    buffer = (uint8_t*)av_mallocz(sizeof(uint8_t)*BUFFER_SIZE);
    got_picture = 0;
    frame_size = AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2;

    //encoder
    AVFormatContext *ofmt_ctx = NULL;
    AVPacket enc_pkt;
    AVStream *out_stream;
    AVCodecContext *enc_ctx;
    AVCodec *encoder;

    AVFormatContext *outAudioFormatCtx[AUDIO_NUM];
    AVPacket audio_pkt;
    AVStream *audio_stream[AUDIO_NUM];
    AVCodecContext *AudioEncodeCtx[AUDIO_NUM];
    AVCodec *AudioEncoder[AUDIO_NUM];

    fp_v = fopen("OUT.h264","wb+"); //输出文件
    fp_a = fopen("audio_out.aac","wb+");

    //FFMPEG
    av_register_all();
    pb = avio_alloc_context(buffer, 4096, 0, NULL, read_data, NULL, NULL);
//    printf("thread %d pid %lu tid %lu\n",index,(unsigned long)getpid(),(unsigned long)pthread_self());
    if (!pb) {
        fprintf(stderr, "avio alloc failed!\n");
        return -1;
    }

    int x = av_probe_input_buffer(pb, &piFmt, "", NULL, 0, 0);
    if (x < 0) {
        printf("probe error: %d",x);
       // fprintf(stderr, "probe failed!\n");
    } else {
        fprintf(stdout, "probe success!\n");
        fprintf(stdout, "format: %s[%s]\n", piFmt->name, piFmt->long_name);
    }
    pFmt = avformat_alloc_context();
    pFmt->pb = pb;

    if (avformat_open_input(&pFmt, "", piFmt, NULL) < 0) {
        fprintf(stderr, "avformat open failed.\n");
        return -1;
    } else {
        fprintf(stdout, "open stream success!\n");
    }
    //pFmt->probesize = 4096 * 2000;
    //pFmt->max_analyze_duration = 5 * AV_TIME_BASE;
    //pFmt->probesize = 2048;
   // pFmt->max_analyze_duration = 1000;
    pFmt->probesize = 2048 * 1000 ;
    pFmt->max_analyze_duration = 2048 * 1000;
    if (avformat_find_stream_info(pFmt,0) < 0) {
        fprintf(stderr, "could not fine stream.\n");
        return -1;
    }
    printf("dump format\n");
    av_dump_format(pFmt, 0, "", 0);

    int videox = 0,audiox = 0;
    for (int i = 0; i < pFmt->nb_streams; i++) {
        if(videox == 7 && audiox == 7)
            break;
        if ( pFmt->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videox < 7 ) {
            videoindex[ videox++ ] = i;
        }
        if ( pFmt->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audiox < 7 ) {
            audioindex[ audiox++ ] = i;
        }
    }

    for(int i=0; i<VIDEO_NUM; i++)
        printf("videoindex %d = %d, audioindex %d = %d\n",i , videoindex[i], i ,audioindex[i]);

    if (videoindex[6] < 0 || audioindex[6] < 0) {
        fprintf(stderr, "videoindex=%d, audioindex=%d\n", videoindex[6], audioindex[6]);
        return -1;
    }

    for( int i=0; i<VIDEO_NUM; i++ ){
        pVst[i] = pFmt->streams[videoindex[i]];
        pVideoCodecCtx[i] = pVst[i]->codec;
        pVideoCodec[i] = avcodec_find_decoder(pVideoCodecCtx[i]->codec_id);
        if (!pVideoCodec[i]) {
            fprintf(stderr, "could not find video decoder!\n");
            return -1;
        }
        if (avcodec_open2(pVideoCodecCtx[i], pVideoCodec[i], NULL) < 0) {
            fprintf(stderr, "could not open video codec!\n");
            return -1;
        }
    }

    for( int i=0; i<AUDIO_NUM; i++ ){
        pAst[i] = pFmt->streams[audioindex[i]];
        pAudioCodecCtx[i] = pAst[i]->codec;
        pAudioCodec[i] = avcodec_find_decoder(pAudioCodecCtx[i]->codec_id);
        if (!pAudioCodec[i]) {
            fprintf(stderr, "could not find audio decoder!\n");
            return -1;
        }
        if (avcodec_open2(pAudioCodecCtx[i], pAudioCodec[i], NULL) < 0) {
            fprintf(stderr, "could not open audio codec!\n");
            return -1;
        }
    }

    //video encoder init
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "h264", NULL);
    unsigned char* outbuffer = NULL;
    outbuffer = (unsigned char*)av_malloc(1024*1000);
    AVIOContext *avio_out = NULL;
    avio_out = avio_alloc_context(outbuffer, 1024*1000, 0, NULL, NULL, write_buffer,NULL);
    if(avio_out == NULL){
        printf("avio_out error\n");
        return -1;
    }
    ofmt_ctx->pb = avio_out;
    ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;
    out_stream = avformat_new_stream(ofmt_ctx, NULL);
    if(!out_stream){
        av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
        return -1;
    }
    enc_ctx = out_stream->codec;
    encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
    enc_ctx->height = pVideoCodecCtx[0]->height;
    enc_ctx->width = pVideoCodecCtx[0]->width;
    enc_ctx->sample_aspect_ratio = pVideoCodecCtx[0]->sample_aspect_ratio;
    enc_ctx->pix_fmt = encoder->pix_fmts[0];
    out_stream->time_base = pVst[0]->time_base;
//    out_stream->time_base.num = 1;
//    out_stream->time_base.den = 25;
    enc_ctx->me_range = 16;
    enc_ctx->max_qdiff = 4;
    enc_ctx->qmin = 25;
    enc_ctx->qmax = 40;
    enc_ctx->qcompress = 0.6;
    enc_ctx->refs = 3;
    enc_ctx->bit_rate = 1000000;
    int re = avcodec_open2(enc_ctx, encoder, NULL);
    if (re < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream \n");
        return re;
    }

    if(ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
    re = avformat_write_header(ofmt_ctx, NULL);
    if(re < 0){
        av_log(NULL, AV_LOG_ERROR, "Error occured when opening output file\n");
        return re;
    }

    //audio encoder
    for( int i=0; i<AUDIO_NUM; i++){
        outAudioFormatCtx[i] = NULL;
//        audio_pkt = NULL;
        audio_stream[i] = NULL;
        AudioEncodeCtx[i] = NULL;
        AudioEncoder[i] = NULL;
    }
    const char* out_audio_file = "transcodeaudio.aac";          //Output URL

    //Method 1.
    outAudioFormatCtx[0] = avformat_alloc_context();
    outAudioFormatCtx[0]->oformat = av_guess_format(NULL, out_audio_file, NULL);
    AVIOContext *avio_audio_out = NULL;
    avio_audio_out = avio_alloc_context(outbuffer, 1024*1000, 0, NULL, NULL, write_buffer,NULL);
    if(avio_audio_out == NULL){
        printf("avio_out error\n");
        return -1;
    }
    outAudioFormatCtx[0]->pb = avio_audio_out;
    //Method 2.
    //avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
    //fmt = pFormatCtx->oformat;

    //Open output URL
    if (avio_open(&outAudioFormatCtx[0]->pb,out_audio_file, AVIO_FLAG_READ_WRITE) < 0){
        printf("Failed to open output file!\n");
        return -1;
    }

    //Show some information
    av_dump_format(outAudioFormatCtx[0], 0, out_audio_file, 1);

    AudioEncoder[0] = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!AudioEncoder[0]){
        printf("Can not find encoder!\n");
        return -1;
    }
    audio_stream[0] = avformat_new_stream(outAudioFormatCtx[0], AudioEncoder[0]);
    if (audio_stream[0]==NULL){
        return -1;
    }
    AudioEncodeCtx[0] = audio_stream[0]->codec;
    AudioEncodeCtx[0]->codec_id =  outAudioFormatCtx[0]->oformat->audio_codec;
    AudioEncodeCtx[0]->codec_type = AVMEDIA_TYPE_AUDIO;
    AudioEncodeCtx[0]->sample_fmt = AV_SAMPLE_FMT_S16;
    AudioEncodeCtx[0]->sample_rate= 48000;//44100
    AudioEncodeCtx[0]->channel_layout=AV_CH_LAYOUT_STEREO;
    AudioEncodeCtx[0]->channels = av_get_channel_layout_nb_channels(AudioEncodeCtx[0]->channel_layout);
    AudioEncodeCtx[0]->bit_rate = 64000;//64000
    /** Allow the use of the experimental AAC encoder */
    AudioEncodeCtx[0]->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;

    /** Set the sample rate for the container. */
    audio_stream[0]->time_base.den = pAudioCodecCtx[0]->sample_rate;
    audio_stream[0]->time_base.num = 1;

    if (avcodec_open2(AudioEncodeCtx[0], AudioEncoder[0],NULL) < 0){
        printf("Failed to open encoder!\n");
        return -1;
    }

    av_samples_get_buffer_size(NULL, AudioEncodeCtx[0]->channels,AudioEncodeCtx[0]->frame_size,AudioEncodeCtx[0]->sample_fmt, 1);

    //uint8_t samples[AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2];
    av_init_packet(&pkt);
    av_init_packet(&audio_pkt);
    av_init_packet(&enc_pkt);
    AVAudioFifo *af = NULL;
    SwrContext *resample_context = NULL;
    long long pts = 0;
    /** Initialize the resampler to be able to convert audio sample formats. */
//    if (init_resampler(input_codec_context, output_codec_context,
//                       &resample_context))
    for(int i=0; i<1; i++){
        printf("work \n");
        printf(" samplerate input = %d , samplerate output = %d\n",pAudioCodecCtx[i]->sample_rate, AudioEncodeCtx[i]->sample_rate);
        resample_context = swr_alloc_set_opts(NULL, av_get_default_channel_layout(AudioEncodeCtx[i]->channels),
                                                          AudioEncodeCtx[i]->sample_fmt,
                                                          AudioEncodeCtx[i]->sample_rate,
                                                          av_get_default_channel_layout(pAudioCodecCtx[i]->channels),
                                                          pAudioCodecCtx[i]->sample_fmt,
                                                          pAudioCodecCtx[i]->sample_rate,
                                                          0, NULL);
        swr_init(resample_context);
    }
    af = av_audio_fifo_alloc(AudioEncodeCtx[0]->sample_fmt, AudioEncodeCtx[0]->channels, 1);
    if(af == NULL)
    {
        printf("error af \n");
        return -1;
    }

    while(1) {
        if (av_read_frame(pFmt, &pkt) >= 0) {
            for( int i=0; i<1; i++ ){
                if (pkt.stream_index == videoindex[i]) {
//                    av_frame_free(&pframe);
                    avcodec_decode_video2(pVideoCodecCtx[i], pVideoframe[i], &got_picture, &pkt);
                    if (got_picture) {
                        if(videoindex[i] == 0){
//                            m_tsRecvPool->write_buffer(pkt.data, pkt.size);
                            pVideoframe[i]->pts = av_frame_get_best_effort_timestamp(pVideoframe[i]);
                            pVideoframe[i]->pict_type = AV_PICTURE_TYPE_NONE;
//                            printf("videoframesize0 = %d, size1 = %d, size2 = %d, size3 = %d, size4 = %d,format = %d\n",pVideoframe[i]->linesize[0],
//                                    pVideoframe[i]->linesize[1],pVideoframe[i]->linesize[2],pVideoframe[i]->linesize[3],pVideoframe[i]->linesize[4],pVideoframe[i]->format);
//                            pVideoTransPool[i]->PutFrame( pVideoframe[i] ,i);
                            int enc_got_frame = 0;
                            /*  ffmpeg encoder */
                            enc_pkt.data = NULL;
                            enc_pkt.size = 0;
                            av_init_packet(&enc_pkt);
                            re = avcodec_encode_video2(ofmt_ctx->streams[videoindex[i]]->codec, &enc_pkt,
                                    pVideoframe[i], &enc_got_frame);
//                            printf("enc_got_frame =%d, re = %d \n",enc_got_frame, re);
                            printf("video Encode 1 Packet\tsize:%d\tpts:%lld\n",enc_pkt.size,enc_pkt.pts);
                            /* prepare packet for muxing */
//                            fwrite(enc_pkt.data,enc_pkt.size, 1, fp_v);
                        }
//                        printf(" video %d decode %d num\n", i, video_num[i]++);
                        break;
                    }

                 }else if (pkt.stream_index == audioindex[i]) {
                    if (avcodec_decode_audio4(pAudioCodecCtx[i], pAudioframe[i], &frame_size, &pkt) >= 0) {
                        if (i == 0){

//                                fwrite(pAudioframe[i]->data[0],pAudioframe[i]->linesize[0], 1, fp_a);
//                                printf("index = %d audio %d decode %d num\n", index, i, audio_num[i]++);
                            uint8_t *converted_input_samples = NULL;
                            converted_input_samples = (uint8_t *)calloc(AudioEncodeCtx[i]->channels, sizeof(*converted_input_samples));
                            av_samples_alloc(&converted_input_samples, NULL, AudioEncodeCtx[i]->channels, pAudioframe[i]->nb_samples, AudioEncodeCtx[i]->sample_fmt, 0);
                                        int error = 0;
                            if((error = swr_convert(resample_context, &converted_input_samples, pAudioframe[i]->nb_samples,
                                                   (const uint8_t**)pAudioframe[i]->extended_data, pAudioframe[i]->nb_samples))<0){
                                printf("error  : %d\n",error);
                            }
//                            av_audio_fifo_realloc(af, av_audio_fifo_size(af) + pAudioframe[i]->nb_samples);
                            av_audio_fifo_write(af, (void **)&converted_input_samples, pAudioframe[i]->nb_samples);
//                            fwrite(pkt.data,pkt.size, 1, fp_a);
//                            pAudioframe[i]->data[0] = frame_buf;
//                            init_converted_samples(&converted_input_samples, output_codec_context, pAudioframe[i]->nb_samples);

                            /** Initialize temporary storage for one output frame. */
//                            printf("pkt.size = %d , pkt.pts = %d ,pkt.dts = %d\n",pkt.size, pkt.pts, pkt.dts);
//                            printf("framesize = %d, audioframesize = %d\n", pAudioframe[i]->nb_samples, frame_size);

//                            pOutAudioframe[i]->pict_type = AV_PICTURE_TYPE_NONE;
                            int got_frame=0;
                            //Encode
//                            av_init_packet(&audio_pkt);
//                            audio_pkt.data = NULL;
//                            audio_pkt.size = 0;
//                            avcodec_encode_audio2(AudioEncodeCtx[0], &audio_pkt, pOutAudioframe[i], &got_frame);
//                            printf("Encode 1 Packet\tsize:%d\tpts:%lld\n", audio_pkt.size, audio_pkt.pts);
                            while(av_audio_fifo_size(af) >= AudioEncodeCtx[i]->frame_size){
                                int frame_size = FFMIN(av_audio_fifo_size(af),AudioEncodeCtx[0]->frame_size);
                                pOutAudioframe[i]->nb_samples =  frame_size;
                                pOutAudioframe[i]->channel_layout = AudioEncodeCtx[0]->channel_layout;
                                pOutAudioframe[i]->sample_rate = AudioEncodeCtx[0]->sample_rate;
                                pOutAudioframe[i]->format = AudioEncodeCtx[0]->sample_fmt;

                                av_frame_get_buffer(pOutAudioframe[i], 0);
                                av_audio_fifo_read(af, (void **)&pOutAudioframe[i]->data, frame_size);

                                pOutAudioframe[i]->pts=pts;
                                pts += pOutAudioframe[i]->nb_samples;

                                audio_pkt.data = NULL;
                                audio_pkt.size = 0;
                                av_init_packet(&audio_pkt);
                                avcodec_encode_audio2(AudioEncodeCtx[0], &audio_pkt, pOutAudioframe[i], &got_frame);
                                printf("audio Encode 1 Packet\tsize:%d\tpts:%lld\n", audio_pkt.size, audio_pkt.pts);
                                fwrite(audio_pkt.data,audio_pkt.size, 1, fp_a);
                            }
                        }
//                        if(i == 0){
//                            fwrite(pkt.data,pkt.size, 1, fp_a);
//                        }
//                        printf("index = %d audio %d decode %d num\n", index, i, audio_num[i]++);
                        break;
                    }
                }
            }
            av_free_packet(&pkt);
            av_free_packet(&enc_pkt);
        }
    }

    av_free(buffer);
    for(int i=0; i<VIDEO_NUM; i++)
        av_free(pVideoframe[i]);

    for(int i=0; i<AUDIO_NUM; i++)
        av_free(pAudioframe[i]);

    return 0;

}
Esempio n. 20
0
static int decklink_write_video_packet(AVFormatContext *avctx, AVPacket *pkt)
{
    struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
    struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
    AVPicture *avpicture = (AVPicture *) pkt->data;
    AVFrame *avframe, *tmp;
    decklink_frame *frame;
    buffercount_type buffered;
    HRESULT hr;

    /* HACK while av_uncoded_frame() isn't implemented */
    int ret;

    tmp = av_frame_alloc();
    if (!tmp)
        return AVERROR(ENOMEM);
    tmp->format = AV_PIX_FMT_UYVY422;
    tmp->width  = ctx->bmd_width;
    tmp->height = ctx->bmd_height;
    ret = av_frame_get_buffer(tmp, 32);
    if (ret < 0) {
        av_frame_free(&tmp);
        return ret;
    }
    av_image_copy(tmp->data, tmp->linesize, (const uint8_t **) avpicture->data,
                  avpicture->linesize, (AVPixelFormat) tmp->format, tmp->width,
                  tmp->height);
    avframe = av_frame_clone(tmp);
    av_frame_free(&tmp);
    if (!avframe) {
        av_log(avctx, AV_LOG_ERROR, "Could not clone video frame.\n");
        return AVERROR(EIO);
    }
    /* end HACK */

    frame = new decklink_frame(ctx, avframe, ctx->bmd_width, ctx->bmd_height,
                               (void *) avframe->data[0]);
    if (!frame) {
        av_log(avctx, AV_LOG_ERROR, "Could not create new frame.\n");
        return AVERROR(EIO);
    }

    /* Always keep at most one second of frames buffered. */
    sem_wait(&ctx->semaphore);

    /* Schedule frame for playback. */
    hr = ctx->dlo->ScheduleVideoFrame((struct IDeckLinkVideoFrame *) frame,
                                      pkt->pts * ctx->bmd_tb_num,
                                      ctx->bmd_tb_num, ctx->bmd_tb_den);
    if (hr != S_OK) {
        av_log(avctx, AV_LOG_ERROR, "Could not schedule video frame."
                " error %08x.\n", (uint32_t) hr);
        frame->Release();
        return AVERROR(EIO);
    }

    ctx->dlo->GetBufferedVideoFrameCount(&buffered);
    av_log(avctx, AV_LOG_DEBUG, "Buffered video frames: %d.\n", (int) buffered);
    if (pkt->pts > 2 && buffered <= 2)
        av_log(avctx, AV_LOG_WARNING, "There are not enough buffered video frames."
               " Video may misbehave!\n");

    /* Preroll video frames. */
    if (!ctx->playback_started && pkt->pts > ctx->frames_preroll) {
        av_log(avctx, AV_LOG_DEBUG, "Ending audio preroll.\n");
        if (ctx->audio && ctx->dlo->EndAudioPreroll() != S_OK) {
            av_log(avctx, AV_LOG_ERROR, "Could not end audio preroll!\n");
            return AVERROR(EIO);
        }
        av_log(avctx, AV_LOG_DEBUG, "Starting scheduled playback.\n");
        if (ctx->dlo->StartScheduledPlayback(0, ctx->bmd_tb_den, 1.0) != S_OK) {
            av_log(avctx, AV_LOG_ERROR, "Could not start scheduled playback!\n");
            return AVERROR(EIO);
        }
        ctx->playback_started = 1;
    }

    return 0;
}
Esempio n. 21
0
int av_frame_ref(AVFrame *dst, const AVFrame *src)
{
    int i, ret = 0;

    dst->format         = src->format;
    dst->width          = src->width;
    dst->height         = src->height;
    dst->channels       = src->channels;
    dst->channel_layout = src->channel_layout;
    dst->nb_samples     = src->nb_samples;

    ret = av_frame_copy_props(dst, src);
    if (ret < 0)
        return ret;

    /* duplicate the frame data if it's not refcounted */
    if (!src->buf[0]) {
        ret = av_frame_get_buffer(dst, 32);
        if (ret < 0)
            return ret;

        ret = av_frame_copy(dst, src);
        if (ret < 0)
            av_frame_unref(dst);

        return ret;
    }

    /* ref the buffers */
    for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
        if (!src->buf[i])
            continue;
        dst->buf[i] = av_buffer_ref(src->buf[i]);
        if (!dst->buf[i]) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
    }

    if (src->extended_buf) {
        dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
                                       src->nb_extended_buf);
        if (!dst->extended_buf) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        dst->nb_extended_buf = src->nb_extended_buf;

        for (i = 0; i < src->nb_extended_buf; i++) {
            dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
            if (!dst->extended_buf[i]) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
        }
    }

    /* duplicate extended data */
    if (src->extended_data != src->data) {
        int ch = src->channels;

        if (!ch) {
            ret = AVERROR(EINVAL);
            goto fail;
        }
        CHECK_CHANNELS_CONSISTENCY(src);

        dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
        if (!dst->extended_data) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
    } else
        dst->extended_data = dst->data;

    memcpy(dst->data,     src->data,     sizeof(src->data));
    memcpy(dst->linesize, src->linesize, sizeof(src->linesize));

    return 0;

fail:
    av_frame_unref(dst);
    return ret;
}
Esempio n. 22
0
static void *sender_thread(void *arg)
{
    int i, ret = 0;
    struct sender_data *wd = arg;

    av_log(NULL, AV_LOG_INFO, "sender #%d: workload=%d\n", wd->id, wd->workload);
    for (i = 0; i < wd->workload; i++) {
        if (rand() % wd->workload < wd->workload / 10) {
            av_log(NULL, AV_LOG_INFO, "sender #%d: flushing the queue\n", wd->id);
            av_thread_message_flush(wd->queue);
        } else {
            char *val;
            AVDictionary *meta = NULL;
            struct message msg = {
                .magic = MAGIC,
                .frame = av_frame_alloc(),
            };

            if (!msg.frame) {
                ret = AVERROR(ENOMEM);
                break;
            }

            /* we add some metadata to identify the frames */
            val = av_asprintf("frame %d/%d from sender %d",
                              i + 1, wd->workload, wd->id);
            if (!val) {
                av_frame_free(&msg.frame);
                ret = AVERROR(ENOMEM);
                break;
            }
            ret = av_dict_set(&meta, "sig", val, AV_DICT_DONT_STRDUP_VAL);
            if (ret < 0) {
                av_frame_free(&msg.frame);
                break;
            }
            msg.frame->metadata = meta;

            /* allocate a real frame in order to simulate "real" work */
            msg.frame->format = AV_PIX_FMT_RGBA;
            msg.frame->width  = 320;
            msg.frame->height = 240;
            ret = av_frame_get_buffer(msg.frame, 32);
            if (ret < 0) {
                av_frame_free(&msg.frame);
                break;
            }

            /* push the frame in the common queue */
            av_log(NULL, AV_LOG_INFO, "sender #%d: sending my work (%d/%d frame:%p)\n",
                   wd->id, i + 1, wd->workload, msg.frame);
            ret = av_thread_message_queue_send(wd->queue, &msg, 0);
            if (ret < 0) {
                av_frame_free(&msg.frame);
                break;
            }
        }
    }
    av_log(NULL, AV_LOG_INFO, "sender #%d: my work is done here (%s)\n",
           wd->id, av_err2str(ret));
    av_thread_message_queue_set_err_recv(wd->queue, ret < 0 ? ret : AVERROR_EOF);
    return NULL;
}