Exemplo n.º 1
0
//Function to open an output file and write codecs, header and stream information
static int open_output_file(const char *filename) {

    AVStream *out_stream;
    AVStream *in_stream;
    AVCodecContext *dec_ctx, *enc_ctx;
    AVCodec *encoder;

    int ret;
    unsigned int i;

    ofmt_ctx = NULL;
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
    if (!ofmt_ctx) {
        av_log(NULL, AV_LOG_DEBUG, "Could not deduce output format from file extension: using MPEG\n");
        avformat_alloc_output_context2(&ofmt_ctx, NULL, "rtsp", filename);
    }
    if (!ofmt_ctx)
        return AVERROR_UNKNOWN;

    
    for (i = 0; i < ifmt_ctx->nb_streams + 1; i++) {        //Change to number of streams!! <pre-defined>   ... +1 for greyscale
        
        //Allocating output stream for each input stream   
        out_stream = avformat_new_stream(ofmt_ctx, NULL);
        if (!out_stream) {
            av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
            return AVERROR_UNKNOWN;
        }

        //hack for now... TODO: make a standard configuration
        if (i < ifmt_ctx->nb_streams) {
            in_stream = ifmt_ctx->streams[i];
        } else {
            in_stream = ifmt_ctx->streams[0];
        }
        
        dec_ctx = in_stream->codec;
        enc_ctx = out_stream->codec;

        if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
        
            //Encoding to same codec as input --Change as needed--
            encoder = avcodec_find_encoder(dec_ctx->codec_id);
            if (!encoder) {
                av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
                return AVERROR_INVALIDDATA;
            }
            
            //Encoding to same video properties as input --Change as needed--
            if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
                enc_ctx->height = dec_ctx->height;
                enc_ctx->width = dec_ctx->width;
                enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
                enc_ctx->pix_fmt = encoder->pix_fmts[0];
                enc_ctx->time_base = dec_ctx->time_base;
                
                //additional information to create h264 encoder
                enc_ctx->bit_rate = dec_ctx->bit_rate;
                enc_ctx->gop_size = 10;
                enc_ctx->qmin = 10;
                enc_ctx->qmax = 51;
                
                if (dec_ctx->codec_id == AV_CODEC_ID_H264) {
                    av_opt_set(enc_ctx, "preset", "slow", 0);
                }
                
            }
            
            ret = avcodec_open2(enc_ctx, encoder, NULL);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
                return ret;
            }

        } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
            av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type\n", i);
            return AVERROR_INVALIDDATA;

        } else {

            //Remux remaining streams
            ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec, ifmt_ctx->streams[i]->codec);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
                return ret;
            }
        }

        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }

    //Show details of output format
    av_dump_format(ofmt_ctx, 0, filename, 1);

    //Open output file, if needed
    if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'\n", filename);
            return ret;
        }
    }

    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Unable to write stream header to output file\n");
        return ret;
    }

    return 0;
}
Exemplo n.º 2
0
static bool
app_parse_options(App *app, int argc, char *argv[])
{
    char errbuf[BUFSIZ];
    int ret, v, o = -1;

    enum {
        OPT_LIST_FORMATS = 1000,
    };

    static const struct option long_options[] = {
        { "help",           no_argument,        NULL, 'h'                   },
        { "window-width",   required_argument,  NULL, 'x'                   },
        { "window-height",  required_argument,  NULL, 'y'                   },
        { "renderer",       required_argument,  NULL, 'r'                   },
        { "mem-type",       required_argument,  NULL, 'm'                   },
        { "format",         required_argument,  NULL, 'f'                   },
        { "list-formats",   no_argument,        NULL, OPT_LIST_FORMATS      },
        { NULL, }
    };

    for (;;) {
        v = getopt_long(argc, argv, "-hx:y:r:m:f:", long_options, &o);
        if (v < 0)
            break;

        switch (v) {
        case '?':
            return false;
        case 'h':
            print_help(argv[0]);
            return false;
        case 'x':
            ret = av_opt_set(app, "window_width", optarg, 0);
            break;
        case 'y':
            ret = av_opt_set(app, "window_height", optarg, 0);
            break;
        case 'r':
            ret = av_opt_set(app, "renderer", optarg, 0);
            break;
        case 'm':
            ret = av_opt_set(app, "mem_type", optarg, 0);
            break;
        case 'f':
            ret = av_opt_set(app, "pix_fmt", optarg, 0);
            break;
        case OPT_LIST_FORMATS:
            ret = av_opt_set_int(app, "list_pix_fmts", 1, 0);
            break;
        case '\1':
            ret = av_opt_set(app, "filename", optarg, 0);
            break;
        default:
            ret = 0;
            break;
        }
        if (ret != 0)
            goto error_set_option;
    }
    return true;

    /* ERRORS */
error_set_option:
    if (o < 0) {
        av_log(app, AV_LOG_ERROR, "failed to set short option -%c: %s\n",
            v, ffmpeg_strerror(ret, errbuf));
    }
    else {
        av_log(app, AV_LOG_ERROR, "failed to set long option --%s: %s\n",
            long_options[o].name, ffmpeg_strerror(ret, errbuf));
    }
    return false;
}
Exemplo n.º 3
0
static int process_options(AVFilterContext *ctx, AVDictionary **options,
                           const char *args)
{
    const AVOption *o = NULL;
    int ret, count = 0;
    char *av_uninit(parsed_key), *av_uninit(value);
    const char *key;
    int offset= -1;

    if (!args)
        return 0;

    while (*args) {
        const char *shorthand = NULL;

        o = av_opt_next(ctx->priv, o);
        if (o) {
            if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
                continue;
            offset = o->offset;
            shorthand = o->name;
        }

        ret = av_opt_get_key_value(&args, "=", ":",
                                   shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
                                   &parsed_key, &value);
        if (ret < 0) {
            if (ret == AVERROR(EINVAL))
                av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
            else
                av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
                       av_err2str(ret));
            return ret;
        }
        if (*args)
            args++;
        if (parsed_key) {
            key = parsed_key;
            while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
        } else {
            key = shorthand;
        }

        av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);

        if (av_opt_find(ctx, key, NULL, 0, 0)) {
            ret = av_opt_set(ctx, key, value, 0);
            if (ret < 0) {
                av_free(value);
                av_free(parsed_key);
                return ret;
            }
        } else {
        av_dict_set(options, key, value, 0);
        if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) {
            if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
            if (ret == AVERROR_OPTION_NOT_FOUND)
                av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
            av_free(value);
            av_free(parsed_key);
            return ret;
            }
        }
        }

        av_free(value);
        av_free(parsed_key);
        count++;
    }

    if (ctx->enable_str) {
        ret = set_enable_expr(ctx, ctx->enable_str);
        if (ret < 0)
            return ret;
    }
    return count;
}
Exemplo n.º 4
0
/*
 * Video encoding example
 */
static void video_encode_example(const char *filename, int codec_id)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, ret, x, y, got_output;
    FILE *f;
    AVFrame *frame;
    AVPacket pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    printf("Encode video file %s\n", filename);

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(codec_id);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* frames per second */
    c->time_base= (AVRational){1,25};
    c->gop_size = 10; /* emit one intra frame every ten frames */
    c->max_b_frames=1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    if(codec_id == AV_CODEC_ID_H264)
        av_opt_set(c->priv_data, "preset", "slow", 0);

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;

    /* the image can be allocated by any means and av_image_alloc() is
     * just the most convenient way if av_malloc() is to be used */
    ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
                         c->pix_fmt, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate raw picture buffer\n");
        exit(1);
    }

    /* encode 1 second of video */
    for(i=0;i<25;i++) {
        av_init_packet(&pkt);
        pkt.data = NULL;    // packet data will be allocated by the encoder
        pkt.size = 0;

        fflush(stdout);
        /* prepare a dummy image */
        /* Y */
        for(y=0;y<c->height;y++) {
            for(x=0;x<c->width;x++) {
                frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
            }
        }

        /* Cb and Cr */
        for(y=0;y<c->height/2;y++) {
            for(x=0;x<c->width/2;x++) {
                frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
                frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
            }
        }

        frame->pts = i;

        /* encode the image */
        ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            printf("Write frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* get the delayed frames */
    for (got_output = 1; got_output; i++) {
        fflush(stdout);

        ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            printf("Write frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* add sequence end code to have a real mpeg file */
    fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_freep(&frame->data[0]);
    avcodec_free_frame(&frame);
    printf("\n");
}
Exemplo n.º 5
0
struct enc_handle *encode_open(struct enc_param param)
{
	struct enc_handle *handle = malloc(sizeof(struct enc_handle));
	if (!handle)
	{
		printf("--- malloc enc handle failed\n");
		return NULL;
	}

	CLEAR(*handle);
	handle->codec = NULL;
	handle->ctx = NULL;
	handle->frame = NULL;
	handle->inbuffer = NULL;
	handle->inbufsize = 0;
	handle->frame_counter = 0;
	handle->params.src_picwidth = param.src_picwidth;
	handle->params.src_picheight = param.src_picheight;
	handle->params.enc_picwidth = param.enc_picwidth;
	handle->params.enc_picheight = param.enc_picheight;
	handle->params.fps = param.fps;
	handle->params.bitrate = param.bitrate;
	handle->params.gop = param.gop;
	handle->params.chroma_interleave = param.chroma_interleave;

	avcodec_register_all();
	handle->codec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!handle->codec)
	{
		printf("--- H264 codec not found\n");
		goto err0;
	}

	handle->ctx = avcodec_alloc_context3(handle->codec);
	if (!handle->ctx)
	{
		printf("--- Could not allocate video codec context\n");
		goto err0;
	}

	handle->ctx->bit_rate = handle->params.bitrate * 1000;    // to kbps
	handle->ctx->width = handle->params.src_picwidth;
	handle->ctx->height = handle->params.src_picheight;
	handle->ctx->time_base = (AVRational
			)
			{ 1, handle->params.fps };    // frames per second
	handle->ctx->gop_size = handle->params.gop;
	handle->ctx->max_b_frames = 1;
	handle->ctx->pix_fmt = AV_PIX_FMT_YUV420P;
//	handle->ctx->thread_count = 1;
	// eliminate frame delay!
	av_opt_set(handle->ctx->priv_data, "preset", "ultrafast", 0);
	av_opt_set(handle->ctx->priv_data, "tune", "zerolatency", 0);
	av_opt_set(handle->ctx->priv_data, "x264opts",
			"no-mbtree:sliced-threads:sync-lookahead=0", 0);

	if (avcodec_open2(handle->ctx, handle->codec, NULL) < 0)
	{
		printf("--- Could not open codec\n");
		goto err1;
	}

	handle->frame = av_frame_alloc();
	if (!handle->frame)
	{
		printf("--- Could not allocate video frame\n");
		goto err2;
	}

	handle->frame->format = handle->ctx->pix_fmt;
	handle->frame->width = handle->ctx->width;
	handle->frame->height = handle->ctx->height;
	handle->inbufsize = avpicture_get_size(AV_PIX_FMT_YUV420P,
			handle->params.src_picwidth, handle->params.src_picheight);
	handle->inbuffer = av_malloc(handle->inbufsize);
	if (!handle->inbuffer)
	{
		printf("--- Could not allocate inbuffer\n");
		goto err3;
	}
	avpicture_fill((AVPicture *) handle->frame, handle->inbuffer,
			AV_PIX_FMT_YUV420P, handle->params.src_picwidth,
			handle->params.src_picheight);

	av_init_packet(&handle->packet);
	handle->packet.data = NULL;
	handle->packet.size = 0;

	printf("+++ Encode Opened\n");
	return handle;

	err3: av_frame_free(&handle->frame);
	err2: avcodec_close(handle->ctx);
	err1: av_free(handle->ctx);
	err0: free(handle);
	return NULL;
}
Exemplo n.º 6
0
bool Remux::writeHeader()
{

	AVOutputFormat *ofmt = NULL;
	AVStream *out_stream;
	AVCodec *encoder;
	int ret;
	ofmt = ofmt_ctx->oformat;
	for (int i = 0; i < ifmt_ctx->nb_streams; i++) {
		AVStream *in_stream = ifmt_ctx->streams[i];
		if (in_stream->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			videoIndex = i;
			AVCodec *pCodec = avcodec_find_encoder(in_stream->codec->codec_id);
			out_stream = avformat_new_stream(ofmt_ctx, pCodec);
			if (!out_stream) {
				printf("Failed allocating output stream\n");
				ret = AVERROR_UNKNOWN;
				return false;
			}
			encCtx = out_stream->codec;
			encCtx->codec_id = in_stream->codec->codec_id;
			encCtx->codec_type = in_stream->codec->codec_type;
			encCtx->pix_fmt = in_stream->codec->pix_fmt;
			encCtx->width = in_stream->codec->width;
			encCtx->height = in_stream->codec->height;
			encCtx->flags = in_stream->codec->flags;
			encCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
			av_opt_set(encCtx->priv_data, "tune", "zerolatency", 0);
			ofmt_ctx->streams[i]->avg_frame_rate = in_stream->avg_frame_rate;
			if (in_stream->codec->time_base.den > 25)
			{
				encCtx->time_base = { 1, 25 };
				encCtx->pkt_timebase = { 1, 25 };
			}
			else{
				AVRational tmp = in_stream->avg_frame_rate;

				//encCtx->time_base = in_stream->codec->time_base;
				//encCtx->pkt_timebase = in_stream->codec->pkt_timebase;
				encCtx->time_base = { tmp.den, tmp.num };
			}
			AVDictionary *param=0;
			if (encCtx->codec_id == AV_CODEC_ID_H264) {
				av_opt_set(&param, "preset", "slow", 0);
				//av_dict_set(&param, "profile", "main", 0);
			}
			//没有这句,导致得到的视频没有缩略图等信息
			ret = avcodec_open2(encCtx, pCodec, &param);
		}
		
	}
	/*
	//encCtx->extradata = new uint8_t[32];//给extradata成员参数分配内存
	//encCtx->extradata_size = 32;//extradata成员参数分配内存大小



	////给extradata成员参数设置值
	////00 00 00 01 
	//encCtx->extradata[0] = 0x00;
	//encCtx->extradata[1] = 0x00;
	//encCtx->extradata[2] = 0x00;
	//encCtx->extradata[3] = 0x01;

	////67 42 80 1e 
	//encCtx->extradata[4] = 0x67;
	//encCtx->extradata[5] = 0x42;
	//encCtx->extradata[6] = 0x80;
	//encCtx->extradata[7] = 0x1e;

	////88 8b 40 50 
	//encCtx->extradata[8] = 0x88;
	//encCtx->extradata[9] = 0x8b;
	//encCtx->extradata[10] = 0x40;
	//encCtx->extradata[11] = 0x50;

	////1e d0 80 00 
	//encCtx->extradata[12] = 0x1e;
	//encCtx->extradata[13] = 0xd0;
	//encCtx->extradata[14] = 0x80;
	//encCtx->extradata[15] = 0x00;

	////03 84 00 00 
	//encCtx->extradata[16] = 0x03;
	//encCtx->extradata[17] = 0x84;
	//encCtx->extradata[18] = 0x00;
	//encCtx->extradata[19] = 0x00;

	////af c8 02 00 
	//encCtx->extradata[20] = 0xaf;
	//encCtx->extradata[21] = 0xc8;
	//encCtx->extradata[22] = 0x02;
	//encCtx->extradata[23] = 0x00;

	////00 00 00 01 
	//encCtx->extradata[24] = 0x00;
	//encCtx->extradata[25] = 0x00;
	//encCtx->extradata[26] = 0x00;
	//encCtx->extradata[27] = 0x01;

	////68 ce 38 80
	//encCtx->extradata[28] = 0x68;
	//encCtx->extradata[29] = 0xce;
	//encCtx->extradata[30] = 0x38;
	//encCtx->extradata[31] = 0x80;*/
	if (!(ofmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&ofmt_ctx->pb, out_filename.c_str(), AVIO_FLAG_WRITE);
		if (ret < 0) {
			return false;
		}
	}
	/*
	out_stream->codec->codec_tag = 0;
	if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
		out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
		*/
	ret = avformat_write_header(ofmt_ctx, NULL);
	if (ret < 0){
		return false;
	}
	return true;
}
Exemplo n.º 7
0
AVStream* AVFormatWriter::AddVideoStream(void)
{
    AVCodecContext *c;
    AVStream *st;
    AVCodec *codec;

    st = avformat_new_stream(m_ctx, NULL);
    if (!st)
    {
        LOG(VB_RECORD, LOG_ERR,
            LOC + "AddVideoStream(): avformat_new_stream() failed");
        return NULL;
    }
    st->id = 0;

    c = st->codec;

    codec = avcodec_find_encoder(m_ctx->oformat->video_codec);
    if (!codec)
    {
        LOG(VB_RECORD, LOG_ERR,
            LOC + "AddVideoStream(): avcodec_find_encoder() failed");
        return false;
    }

    avcodec_get_context_defaults3(c, codec);

    c->codec                      = codec;
    c->codec_id                   = m_ctx->oformat->video_codec;
    c->codec_type                 = AVMEDIA_TYPE_VIDEO;

    c->bit_rate                   = m_videoBitrate;
    c->width                      = m_width;
    c->height                     = m_height;

    // c->sample_aspect_ratio.num    = (int)floor(m_aspect * 10000);
    // c->sample_aspect_ratio.den    = 10000;

    c->time_base                  = GetCodecTimeBase();

    st->time_base.den             = 90000;
    st->time_base.num             = 1;
    st->r_frame_rate.num          = 0;
    st->r_frame_rate.den          = 0;

    c->gop_size                   = m_keyFrameDist;
    c->pix_fmt                    = PIX_FMT_YUV420P;
    c->thread_count               = m_encodingThreadCount;
    c->thread_type                = FF_THREAD_SLICE;

    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
        c->max_b_frames          = 2;
    }
    else if (c->codec_id == CODEC_ID_MPEG1VIDEO)
    {
        c->mb_decision           = 2;
    }
    else if (c->codec_id == CODEC_ID_H264)
    {

        if ((c->width > 480) ||
            (c->bit_rate > 600000))
        {
            c->level = 31;
            av_opt_set(c->priv_data, "profile", "main", 0);
        }
        else
        {
            c->level = 30;
            av_opt_set(c->priv_data, "profile", "baseline", 0);
        }

        c->coder_type            = 0;
        c->max_b_frames          = 0;
        c->slices                = 8;

        c->flags                |= CODEC_FLAG_LOOP_FILTER;
        c->me_cmp               |= 1;
        c->me_method             = ME_HEX;
        c->me_subpel_quality     = 6;
        c->me_range              = 16;
        c->keyint_min            = 25;
        c->scenechange_threshold = 40;
        c->i_quant_factor        = 0.71;
        c->b_frame_strategy      = 1;
        c->qcompress             = 0.6;
        c->qmin                  = 10;
        c->qmax                  = 51;
        c->max_qdiff             = 4;
        c->refs                  = 3;
        c->trellis               = 0;

        av_opt_set(c, "partitions", "i8x8,i4x4,p8x8,b8x8", 0);
        av_opt_set_int(c, "direct-pred", 1, 0);
        av_opt_set_int(c, "rc-lookahead", 0, 0);
        av_opt_set_int(c, "fast-pskip", 1, 0);
        av_opt_set_int(c, "mixed-refs", 1, 0);
        av_opt_set_int(c, "8x8dct", 0, 0);
        av_opt_set_int(c, "weightb", 0, 0);
    }

    if(m_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;

    return st;
}
Exemplo n.º 8
0
HRESULT CLAVVideo::Filter(LAVFrame *pFrame)
{
  int ret = 0;
  BOOL bFlush = pFrame->flags & LAV_FRAME_FLAG_FLUSH;
  if (m_Decoder.IsInterlaced(FALSE) && m_settings.DeintMode != DeintMode_Disable
    && (m_settings.SWDeintMode == SWDeintMode_YADIF || m_settings.SWDeintMode == SWDeintMode_W3FDIF_Simple || m_settings.SWDeintMode == SWDeintMode_W3FDIF_Complex)
    && ((bFlush && m_pFilterGraph) || pFrame->format == LAVPixFmt_YUV420 || pFrame->format == LAVPixFmt_YUV422 || pFrame->format == LAVPixFmt_NV12)) {
    AVPixelFormat ff_pixfmt = (pFrame->format == LAVPixFmt_YUV420) ? AV_PIX_FMT_YUV420P : (pFrame->format == LAVPixFmt_YUV422) ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_NV12;

    if (!bFlush && (!m_pFilterGraph || pFrame->format != m_filterPixFmt || pFrame->width != m_filterWidth || pFrame->height != m_filterHeight)) {
      DbgLog((LOG_TRACE, 10, L":Filter()(init) Initializing YADIF deinterlacing filter..."));
      if (m_pFilterGraph) {
        avfilter_graph_free(&m_pFilterGraph);
        m_pFilterBufferSrc = nullptr;
        m_pFilterBufferSink = nullptr;
      }

      m_filterPixFmt = pFrame->format;
      m_filterWidth  = pFrame->width;
      m_filterHeight = pFrame->height;

      char args[512];
      enum AVPixelFormat pix_fmts[3];

      if (ff_pixfmt == AV_PIX_FMT_NV12) {
        pix_fmts[0] = AV_PIX_FMT_NV12;
        pix_fmts[1] = AV_PIX_FMT_YUV420P;
      } else {
        pix_fmts[0] = ff_pixfmt;
        pix_fmts[1] = AV_PIX_FMT_NONE;
      }
      pix_fmts[2] = AV_PIX_FMT_NONE;

      AVFilter *buffersrc  = avfilter_get_by_name("buffer");
      AVFilter *buffersink = avfilter_get_by_name("buffersink");
      AVFilterInOut *outputs = avfilter_inout_alloc();
      AVFilterInOut *inputs  = avfilter_inout_alloc();

      m_pFilterGraph = avfilter_graph_alloc();

      av_opt_set(m_pFilterGraph, "thread_type", "slice", AV_OPT_SEARCH_CHILDREN);
      av_opt_set_int(m_pFilterGraph, "threads", FFMAX(1, av_cpu_count() / 2), AV_OPT_SEARCH_CHILDREN);

      // 0/0 is not a valid value for avfilter, make sure it doesn't happen
      AVRational aspect_ratio = pFrame->aspect_ratio;
      if (aspect_ratio.num == 0 || aspect_ratio.den == 0)
        aspect_ratio = { 0, 1 };

      _snprintf_s(args, sizeof(args), "video_size=%dx%d:pix_fmt=%s:time_base=1/10000000:pixel_aspect=%d/%d", pFrame->width, pFrame->height, av_get_pix_fmt_name(ff_pixfmt), pFrame->aspect_ratio.num, pFrame->aspect_ratio.den);
      ret = avfilter_graph_create_filter(&m_pFilterBufferSrc, buffersrc, "in", args, nullptr, m_pFilterGraph);
      if (ret < 0) {
        DbgLog((LOG_TRACE, 10, L"::Filter()(init) Creating the input buffer filter failed with code %d", ret));
        avfilter_graph_free(&m_pFilterGraph);
        goto deliver;
      }

      ret = avfilter_graph_create_filter(&m_pFilterBufferSink, buffersink, "out", nullptr, nullptr, m_pFilterGraph);
      if (ret < 0) {
        DbgLog((LOG_TRACE, 10, L"::Filter()(init) Creating the buffer sink filter failed with code %d", ret));
        avfilter_free(m_pFilterBufferSrc);
        m_pFilterBufferSrc = nullptr;
        avfilter_graph_free(&m_pFilterGraph);
        goto deliver;
      }

      /* set allowed pixfmts on the output */
      av_opt_set_int_list(m_pFilterBufferSink->priv, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, 0);

      /* Endpoints for the filter graph. */
      outputs->name       = av_strdup("in");
      outputs->filter_ctx = m_pFilterBufferSrc;
      outputs->pad_idx    = 0;
      outputs->next       = nullptr;

      inputs->name       = av_strdup("out");
      inputs->filter_ctx = m_pFilterBufferSink;
      inputs->pad_idx    = 0;
      inputs->next       = nullptr;

      if (m_settings.SWDeintMode == SWDeintMode_YADIF)
        _snprintf_s(args, sizeof(args), "yadif=mode=%s:parity=auto:deint=interlaced", (m_settings.SWDeintOutput == DeintOutput_FramePerField) ? "send_field" : "send_frame");
      else if (m_settings.SWDeintMode == SWDeintMode_W3FDIF_Simple)
        _snprintf_s(args, sizeof(args), "w3fdif=filter=simple:deint=interlaced");
      else if (m_settings.SWDeintMode == SWDeintMode_W3FDIF_Complex)
        _snprintf_s(args, sizeof(args), "w3fdif=filter=complex:deint=interlaced");
      else
        ASSERT(0);

      if ((ret = avfilter_graph_parse_ptr(m_pFilterGraph, args, &inputs, &outputs, nullptr)) < 0) {
        DbgLog((LOG_TRACE, 10, L"::Filter()(init) Parsing the graph failed with code %d", ret));
        avfilter_graph_free(&m_pFilterGraph);
        goto deliver;
      }

      if ((ret = avfilter_graph_config(m_pFilterGraph, nullptr)) < 0) {
        DbgLog((LOG_TRACE, 10, L"::Filter()(init) Configuring the graph failed with code %d", ret));
        avfilter_graph_free(&m_pFilterGraph);
        goto deliver;
      }

      DbgLog((LOG_TRACE, 10, L":Filter()(init) avfilter Initialization complete"));
    }

    if (!m_pFilterGraph)
      goto deliver;

    if (pFrame->direct) {
      HRESULT hr = DeDirectFrame(pFrame, true);
      if (FAILED(hr)) {
        ReleaseFrame(&pFrame);
        return hr;
      }
    }

    AVFrame *in_frame = nullptr;
    BOOL refcountedFrame = (m_Decoder.HasThreadSafeBuffers() == S_OK);
    // When flushing, we feed a NULL frame
    if (!bFlush) {
      in_frame = av_frame_alloc();

      for (int i = 0; i < 4; i++) {
        in_frame->data[i] = pFrame->data[i];
        in_frame->linesize[i] = (int)pFrame->stride[i];
      }

      in_frame->width               = pFrame->width;
      in_frame->height              = pFrame->height;
      in_frame->format              = ff_pixfmt;
      in_frame->pts                 = pFrame->rtStart;
      in_frame->interlaced_frame    = pFrame->interlaced;
      in_frame->top_field_first     = pFrame->tff;
      in_frame->sample_aspect_ratio = pFrame->aspect_ratio;

      if (refcountedFrame) {
        AVBufferRef *pFrameBuf = av_buffer_create(nullptr, 0, lav_free_lavframe, pFrame, 0);
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get((AVPixelFormat)in_frame->format);
        int planes = (in_frame->format == AV_PIX_FMT_NV12) ? 2 : desc->nb_components;

        for (int i = 0; i < planes; i++) {
          int h_shift    = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
          int plane_size = (in_frame->height >> h_shift) * in_frame->linesize[i];

          AVBufferRef *planeRef = av_buffer_ref(pFrameBuf);
          in_frame->buf[i] = av_buffer_create(in_frame->data[i], plane_size, lav_unref_frame, planeRef, AV_BUFFER_FLAG_READONLY);
        }
        av_buffer_unref(&pFrameBuf);
      }

      m_FilterPrevFrame = *pFrame;
      memset(m_FilterPrevFrame.data, 0, sizeof(m_FilterPrevFrame.data));
      m_FilterPrevFrame.destruct = nullptr;
    } else {
Exemplo n.º 9
0
void encode_loop(const char *filename, long long int frames, unsigned int delay,
                 int framerate)
{
    FILE *f;

    /* abort if file already exists */
    if ((f = fopen(filename, "r")) != NULL) {
        fclose(f);
        fprintf(stderr, "error: file '%s' already exists\n", filename);
        exit(1);
    }

    AVCodec *codec;
    AVCodecContext *c= NULL;
    int ret, got_output;
    unsigned int i = 0;
    AVFrame *frame;
    AVPacket pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }
    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }
    /* put sample parameters */
  //c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width  = get_frame_width();
    c->height = get_frame_height();
    /* frames per second */
    c->time_base = (AVRational){1,framerate};
    c->gop_size = 10; /* emit one intra frame every ten frames */
    c->max_b_frames = 1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    av_opt_set(c->priv_data, "preset", "slow", 0);
    av_opt_set_int(c, "crf", 20, AV_OPT_SEARCH_CHILDREN);

    /* open codec */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    /* open file */
    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    /* allocate video frame */
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;
    /* the image can be allocated by any means and av_image_alloc() is
     * just the most convenient way if av_malloc() is to be used */
    ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
                         c->pix_fmt, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate raw picture buffer\n");
        exit(1);
    }

    /* will we run forever? */
    int inf = !frames;

    while (1) {
        if (CAUGHT_SIGINT)
            break;

        if (!inf && --frames < 0)
            break;

        usleep(delay);

        av_init_packet(&pkt);
        pkt.data = NULL;    // packet data will be allocated by the encoder
        pkt.size = 0;
        fflush(stdout);

        struct SwsContext *ctx =
            sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24,
                           c->width, c->height, AV_PIX_FMT_YUV420P,
                           0, 0, 0, 0);

        unsigned char *rgb_buf = grab_frame();
        const uint8_t *data_in[1] = { rgb_buf };
        int inline_size[1]  = { 3*c->width };
        sws_scale(ctx, data_in, inline_size, 0, c->height,
                  frame->data, frame->linesize);

        frame->pts = i;
        /* encode the image */
        ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }
        if (got_output) {
            printf("Wrote frame %d (size=%d)\n", i++, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }

        free(rgb_buf);
        sws_freeContext(ctx);
    }
    /* get the delayed frames */
    for (got_output = 1; got_output; i++) {
        fflush(stdout);
        ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }
        if (got_output) {
            printf("Wrote frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }
    /* add sequence end code to have a real mpeg file */
    fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);
    avcodec_close(c);
    av_free(c);
    av_freep(&frame->data[0]);
    av_frame_free(&frame);
    printf("\n");
}
Exemplo n.º 10
0
RawPixelSource::RawPixelSource(UsageEnvironment& env,
                               Frame* content,
							   int avgBitRate,
							   bool robustSyncing)
	:
	FramedSource(env), img_convert_ctx(NULL), content(content), /*encodeBarrier(2),*/ destructing(false), lastPTS(0), robustSyncing(robustSyncing)
{

	gettimeofday(&prevtime, NULL); // If you have a more accurate time - e.g., from an encoder - then use that instead.
	if (referenceCount == 0)
	{
		// Any global initialization of the device would be done here:
		//%%% TO BE WRITTEN %%%
	}

	// Any instance-specific initialization of the device would be done here:

	++referenceCount;
	//myfile = fopen("/Users/tiborgoldschwendt/Desktop/Logs/deviceglxgears.log", "w");

	// initialize frame pool
	for (int i = 0; i < 1; i++)
	{
		AVFrame* frame = av_frame_alloc();
		if (!frame)
		{
			fprintf(stderr, "Could not allocate video frame\n");
			exit(1);
		}
		frame->format = content->getFormat();
		frame->width  = content->getWidth();
		frame->height = content->getHeight();

		/* the image can be allocated by any means and av_image_alloc() is
		* just the most convenient way if av_malloc() is to be used */
		if (av_image_alloc(frame->data, frame->linesize, frame->width, frame->height,
			content->getFormat(), 32) < 0)
		{
			fprintf(stderr, "Could not allocate raw picture buffer\n");
			abort();
		}

		framePool.push(frame);
	}

	for (int i = 0; i < 1; i++)
	{
		AVPacket pkt;
		av_init_packet(&pkt);
		pktPool.push(pkt);
	}

	// Initialize codec and encoder
	AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!codec)
	{
		fprintf(stderr, "Codec not found\n");
		exit(1);
	}

	codecContext = avcodec_alloc_context3(codec);

	if (!codecContext)
	{
		fprintf(stderr, "could not allocate video codec context\n");
		exit(1);
	}

	/* put sample parameters */
	codecContext->bit_rate = avgBitRate;
	/* resolution must be a multiple of two */
	codecContext->width = content->getWidth();
	codecContext->height = content->getHeight();
	/* frames per second */
	codecContext->time_base = av_make_q(1, FPS);
	codecContext->gop_size = 20; /* emit one intra frame every ten frames */
	codecContext->max_b_frames = 0;
	codecContext->pix_fmt = AV_PIX_FMT_YUV420P;
	//codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;

	av_opt_set(codecContext->priv_data, "preset", PRESET_VAL, 0);
	av_opt_set(codecContext->priv_data, "tune", TUNE_VAL, 0);
	av_opt_set(codecContext->priv_data, "slice-max-size", "2000", 0);

	/* open it */
	if (avcodec_open2(codecContext, codec, NULL) < 0)
	{
		fprintf(stderr, "could not open codec\n");
		exit(1);
	}


	


	// We arrange here for our "deliverFrame" member function to be called
	// whenever the next frame of data becomes available from the device.
	//
	// If the device can be accessed as a readable socket, then one easy way to do this is using a call to
	//     envir().taskScheduler().turnOnBackgroundReadHandling( ... )
	// (See examples of this call in the "liveMedia" directory.)
	//
	// If, however, the device *cannot* be accessed as a readable socket, then instead we can implement it using 'event triggers':
	// Create an 'event trigger' for this device (if it hasn't already been done):
	eventTriggerId = envir().taskScheduler().createEventTrigger(&deliverFrame0);

	//std::cout << this << ": eventTriggerId: " << eventTriggerId  << std::endl;

	frameContentThread = boost::thread(boost::bind(&RawPixelSource::frameContentLoop, this));

	encodeFrameThread  = boost::thread(boost::bind(&RawPixelSource::encodeFrameLoop,  this));

	//eventThread        = boost::thread(boost::bind(&RawPixelSource::eventLoop, this));

	lastFrameTime = av_gettime();
}
Exemplo n.º 11
0
/* "user interface" functions */
static void dump_stream_format(AVFormatContext *ic, int i,
                               int index, int is_output)
{
    char buf[256];
    int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
    AVStream *st = ic->streams[i];
    AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
    char *separator = ic->dump_separator;
    AVCodecContext *avctx;
    int ret;

    avctx = avcodec_alloc_context3(NULL);
    if (!avctx)
        return;

    ret = avcodec_parameters_to_context(avctx, st->codecpar);
    if (ret < 0) {
        avcodec_free_context(&avctx);
        return;
    }

    // Fields which are missing from AVCodecParameters need to be taken from the AVCodecContext
    avctx->properties = st->codec->properties;
    avctx->codec      = st->codec->codec;
    avctx->qmin       = st->codec->qmin;
    avctx->qmax       = st->codec->qmax;
    avctx->coded_width  = st->codec->coded_width;
    avctx->coded_height = st->codec->coded_height;

    if (separator)
        av_opt_set(avctx, "dump_separator", separator, 0);
    avcodec_string(buf, sizeof(buf), avctx, is_output);
    avcodec_free_context(&avctx);

    av_log(NULL, AV_LOG_INFO, "    Stream #%d:%d", index, i);

    /* the pid is an important information, so we display it */
    /* XXX: add a generic system */
    if (flags & AVFMT_SHOW_IDS)
        av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
    if (lang)
        av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
    av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames,
           st->time_base.num, st->time_base.den);
    av_log(NULL, AV_LOG_INFO, ": %s", buf);

    if (st->sample_aspect_ratio.num &&
        av_cmp_q(st->sample_aspect_ratio, st->codecpar->sample_aspect_ratio)) {
        AVRational display_aspect_ratio;
        av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
                  st->codecpar->width  * (int64_t)st->sample_aspect_ratio.num,
                  st->codecpar->height * (int64_t)st->sample_aspect_ratio.den,
                  1024 * 1024);
        av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
               st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
               display_aspect_ratio.num, display_aspect_ratio.den);
    }

    if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
        int fps = st->avg_frame_rate.den && st->avg_frame_rate.num;
        int tbr = st->r_frame_rate.den && st->r_frame_rate.num;
        int tbn = st->time_base.den && st->time_base.num;
        int tbc = st->codec->time_base.den && st->codec->time_base.num;

        if (fps || tbr || tbn || tbc)
            av_log(NULL, AV_LOG_INFO, "%s", separator);

        if (fps)
            print_fps(av_q2d(st->avg_frame_rate), tbr || tbn || tbc ? "fps, " : "fps");
        if (tbr)
            print_fps(av_q2d(st->r_frame_rate), tbn || tbc ? "tbr, " : "tbr");
        if (tbn)
            print_fps(1 / av_q2d(st->time_base), tbc ? "tbn, " : "tbn");
        if (tbc)
            print_fps(1 / av_q2d(st->codec->time_base), "tbc");
    }

    if (st->disposition & AV_DISPOSITION_DEFAULT)
        av_log(NULL, AV_LOG_INFO, " (default)");
    if (st->disposition & AV_DISPOSITION_DUB)
        av_log(NULL, AV_LOG_INFO, " (dub)");
    if (st->disposition & AV_DISPOSITION_ORIGINAL)
        av_log(NULL, AV_LOG_INFO, " (original)");
    if (st->disposition & AV_DISPOSITION_COMMENT)
        av_log(NULL, AV_LOG_INFO, " (comment)");
    if (st->disposition & AV_DISPOSITION_LYRICS)
        av_log(NULL, AV_LOG_INFO, " (lyrics)");
    if (st->disposition & AV_DISPOSITION_KARAOKE)
        av_log(NULL, AV_LOG_INFO, " (karaoke)");
    if (st->disposition & AV_DISPOSITION_FORCED)
        av_log(NULL, AV_LOG_INFO, " (forced)");
    if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
        av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
    if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
        av_log(NULL, AV_LOG_INFO, " (visual impaired)");
    if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
        av_log(NULL, AV_LOG_INFO, " (clean effects)");
    if (st->disposition & AV_DISPOSITION_DESCRIPTIONS)
        av_log(NULL, AV_LOG_INFO, " (descriptions)");
    if (st->disposition & AV_DISPOSITION_DEPENDENT)
        av_log(NULL, AV_LOG_INFO, " (dependent)");
    av_log(NULL, AV_LOG_INFO, "\n");

    dump_metadata(NULL, st->metadata, "    ");

    dump_sidedata(NULL, st, "    ");
}
Exemplo n.º 12
0
FFmpegVideo::FFmpegVideo()
{
    avcodec_register_all();
    // Encoding

    encoding_codec = NULL ;
    encoding_frame_buffer = NULL ;
    encoding_context = NULL ;

    //AVCodecID codec_id = AV_CODEC_ID_H264 ;
    //AVCodecID codec_id = AV_CODEC_ID_MPEG2VIDEO;
#if LIBAVCODEC_VERSION_MAJOR < 54
    CodecID codec_id = CODEC_ID_MPEG4;
#else
    AVCodecID codec_id = AV_CODEC_ID_MPEG4;
#endif

    /* find the video encoder */
    encoding_codec = avcodec_find_encoder(codec_id);

    if (!encoding_codec) std::cerr << "AV codec not found for codec id " << std::endl;
    if (!encoding_codec) throw std::runtime_error("AV codec not found for codec id ") ;

    encoding_context = avcodec_alloc_context3(encoding_codec);

    if (!encoding_context) std::cerr << "AV: Could not allocate video codec encoding context" << std::endl;
    if (!encoding_context) throw std::runtime_error("AV: Could not allocate video codec encoding context");

    /* put sample parameters */
    encoding_context->bit_rate = 10*1024 ; // default bitrate is 30KB/s
    encoding_context->bit_rate_tolerance = encoding_context->bit_rate ;

#ifdef USE_VARIABLE_BITRATE
    encoding_context->rc_min_rate = 0;
    encoding_context->rc_max_rate = 10*1024;//encoding_context->bit_rate;
    encoding_context->rc_buffer_size = 10*1024*1024;
    encoding_context->rc_initial_buffer_occupancy = (int) ( 0.9 * encoding_context->rc_buffer_size);
    encoding_context->rc_max_available_vbv_use = 1.0;
    encoding_context->rc_min_vbv_overflow_use = 0.0;
#else
    encoding_context->rc_min_rate = 0;
    encoding_context->rc_max_rate = 0;
    encoding_context->rc_buffer_size = 0;
#endif
    if (encoding_codec->capabilities & CODEC_CAP_TRUNCATED)
        encoding_context->flags |= CODEC_FLAG_TRUNCATED;
    encoding_context->flags |= CODEC_FLAG_PSNR;//Peak signal-to-noise ratio
    encoding_context->flags |= CODEC_CAP_PARAM_CHANGE;
    encoding_context->i_quant_factor = 0.769f;
    encoding_context->b_quant_factor = 1.4f;
    encoding_context->time_base.num = 1;
    encoding_context->time_base.den = 15;//framesPerSecond;
    encoding_context->qmin =  1;
    encoding_context->qmax = 51;
    encoding_context->max_qdiff = 4;

    //encoding_context->me_method = ME_HEX;
    //encoding_context->max_b_frames = 4;
    //encoding_context->flags |= CODEC_FLAG_LOW_DELAY;	// MPEG2 only
    //encoding_context->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_P4X4 | X264_PART_B8X8;
    //encoding_context->crf = 0.0f;
    //encoding_context->cqp = 26;

    /* resolution must be a multiple of two */
    encoding_context->width = 640;//176;
    encoding_context->height = 480;//144;
    /* frames per second */
    encoding_context->time_base = av_make_q(1, 25);
    /* emit one intra frame every ten frames
     * check frame pict_type before passing frame
     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
     * then gop_size is ignored and the output of encoder
     * will always be I frame irrespective to gop_size
     */
    encoding_context->gop_size = 100;
    //encoding_context->max_b_frames = 1;
#if LIBAVCODEC_VERSION_MAJOR < 54
    encoding_context->pix_fmt = PIX_FMT_YUV420P; //context->pix_fmt = PIX_FMT_RGB24;
    if (codec_id == CODEC_ID_H264) {
#else
    encoding_context->pix_fmt = AV_PIX_FMT_YUV420P; //context->pix_fmt = AV_PIX_FMT_RGB24;
    if (codec_id == AV_CODEC_ID_H264) {
#endif
        av_opt_set(encoding_context->priv_data, "preset", "slow", 0);
    }

    /* open it */
    if (avcodec_open2(encoding_context, encoding_codec, NULL) < 0)
    {
        std::cerr << "AV: Could not open codec context. Something's wrong." << std::endl;
        throw std::runtime_error( "AV: Could not open codec context. Something's wrong.");
    }

#if (LIBAVCODEC_VERSION_MAJOR < 57) | (LIBAVCODEC_VERSION_MAJOR == 57 && LIBAVCODEC_VERSION_MINOR <3 )
    encoding_frame_buffer = avcodec_alloc_frame() ;//(AVFrame*)malloc(sizeof(AVFrame)) ;
#else
    encoding_frame_buffer = av_frame_alloc() ;
#endif

    if(!encoding_frame_buffer) std::cerr << "AV: could not allocate frame buffer." << std::endl;
    if(!encoding_frame_buffer)
        throw std::runtime_error("AV: could not allocate frame buffer.") ;

    encoding_frame_buffer->format = encoding_context->pix_fmt;
    encoding_frame_buffer->width  = encoding_context->width;
    encoding_frame_buffer->height = encoding_context->height;

    /* the image can be allocated by any means and av_image_alloc() is
     * just the most convenient way if av_malloc() is to be used */

    int ret = av_image_alloc(encoding_frame_buffer->data, encoding_frame_buffer->linesize,
                             encoding_context->width, encoding_context->height, encoding_context->pix_fmt, 32);

    if (ret < 0) std::cerr << "AV: Could not allocate raw picture buffer" << std::endl;
    if (ret < 0)
        throw std::runtime_error("AV: Could not allocate raw picture buffer");

    encoding_frame_count = 0 ;

    // Decoding
    decoding_codec = avcodec_find_decoder(codec_id);

    if (!decoding_codec) std::cerr << "AV codec not found for codec id " << std::endl;
    if (!decoding_codec)
        throw("AV codec not found for codec id ") ;

    decoding_context = avcodec_alloc_context3(decoding_codec);

    if(!decoding_context) std::cerr << "AV: Could not allocate video codec decoding context" << std::endl;
    if(!decoding_context)
        throw std::runtime_error("AV: Could not allocate video codec decoding context");

    decoding_context->width = encoding_context->width;
    decoding_context->height = encoding_context->height;
#if LIBAVCODEC_VERSION_MAJOR < 54
    decoding_context->pix_fmt = PIX_FMT_YUV420P;
#else
    decoding_context->pix_fmt = AV_PIX_FMT_YUV420P;
#endif

    if(decoding_codec->capabilities & CODEC_CAP_TRUNCATED)
        decoding_context->flags |= CODEC_FLAG_TRUNCATED; // we do not send complete frames
    //we can receive truncated frames
    decoding_context->flags2 |= CODEC_FLAG2_CHUNKS;

    AVDictionary* dictionary = NULL;
    if(avcodec_open2(decoding_context, decoding_codec, &dictionary) < 0)
    {
        std::cerr << "AV codec open action failed! " << std::endl;
        throw("AV codec open action failed! ") ;
    }

    //decoding_frame_buffer = avcodec_alloc_frame() ;//(AVFrame*)malloc(sizeof(AVFrame)) ;
    decoding_frame_buffer = av_frame_alloc() ;

    av_init_packet(&decoding_buffer);
    decoding_buffer.data = NULL ;
    decoding_buffer.size = 0 ;

    //ret = av_image_alloc(decoding_frame_buffer->data, decoding_frame_buffer->linesize, decoding_context->width, decoding_context->height, decoding_context->pix_fmt, 32);

    //if (ret < 0)
    //throw std::runtime_error("AV: Could not allocate raw picture buffer");

    // debug
#ifdef DEBUG_MPEG_VIDEO
    std::cerr << "Dumping captured data to file tmpvideo.mpg" << std::endl;
    encoding_debug_file = fopen("tmpvideo.mpg","w") ;
#endif
}

FFmpegVideo::~FFmpegVideo()
{
    avcodec_free_context(&encoding_context);
    avcodec_free_context(&decoding_context);
    av_frame_free(&encoding_frame_buffer);
    av_frame_free(&decoding_frame_buffer);
}

#define MAX_FFMPEG_ENCODING_BITRATE 81920

bool FFmpegVideo::encodeData(const QImage& image, uint32_t target_encoding_bitrate, RsVOIPDataChunk& voip_chunk)
{
#ifdef DEBUG_MPEG_VIDEO
	std::cerr << "Encoding frame of size " << image.width() << "x" << image.height() << ", resized to " << encoding_frame_buffer->width << "x" << encoding_frame_buffer->height << " : ";
#endif
	QImage input ;

    if(target_encoding_bitrate > MAX_FFMPEG_ENCODING_BITRATE)
    {
        std::cerr << "Max encodign bitrate eexceeded. Capping to " << MAX_FFMPEG_ENCODING_BITRATE << std::endl;
        target_encoding_bitrate = MAX_FFMPEG_ENCODING_BITRATE ;
    }
	//encoding_context->bit_rate = target_encoding_bitrate;
	encoding_context->rc_max_rate = target_encoding_bitrate;
	//encoding_context->bit_rate_tolerance = target_encoding_bitrate;

	if(image.width() != encoding_frame_buffer->width || image.height() != encoding_frame_buffer->height)
		input = image.scaled(QSize(encoding_frame_buffer->width,encoding_frame_buffer->height),Qt::IgnoreAspectRatio,Qt::SmoothTransformation) ;
	else
		input = image ;

	/* prepare a dummy image */
	/* Y */
	for (int y = 0; y < encoding_context->height/2; y++)
		for (int x = 0; x < encoding_context->width/2; x++)
		{
			QRgb pix00 = input.pixel(QPoint(2*x+0,2*y+0)) ;
			QRgb pix01 = input.pixel(QPoint(2*x+0,2*y+1)) ;
			QRgb pix10 = input.pixel(QPoint(2*x+1,2*y+0)) ;
			QRgb pix11 = input.pixel(QPoint(2*x+1,2*y+1)) ;

			int R00 = (pix00 >> 16) & 0xff ; int G00 = (pix00 >>  8) & 0xff ; int B00 = (pix00 >>  0) & 0xff ;
			int R01 = (pix01 >> 16) & 0xff ; int G01 = (pix01 >>  8) & 0xff ; int B01 = (pix01 >>  0) & 0xff ;
			int R10 = (pix10 >> 16) & 0xff ; int G10 = (pix10 >>  8) & 0xff ; int B10 = (pix10 >>  0) & 0xff ;
			int R11 = (pix11 >> 16) & 0xff ; int G11 = (pix11 >>  8) & 0xff ; int B11 = (pix11 >>  0) & 0xff ;

			int Y00 =  (0.257 * R00) + (0.504 * G00) + (0.098 * B00) + 16  ;
			int Y01 =  (0.257 * R01) + (0.504 * G01) + (0.098 * B01) + 16  ;
			int Y10 =  (0.257 * R10) + (0.504 * G10) + (0.098 * B10) + 16  ;
			int Y11 =  (0.257 * R11) + (0.504 * G11) + (0.098 * B11) + 16  ;

			float R = 0.25*(R00+R01+R10+R11) ;
			float G = 0.25*(G00+G01+G10+G11) ;
			float B = 0.25*(B00+B01+B10+B11) ;

			int U =  (0.439 * R) - (0.368 * G) - (0.071 * B) + 128 ;
			int V = -(0.148 * R) - (0.291 * G) + (0.439 * B) + 128 ;

			encoding_frame_buffer->data[0][(2*y+0) * encoding_frame_buffer->linesize[0] + 2*x+0] = std::min(255,std::max(0,Y00)); // Y
			encoding_frame_buffer->data[0][(2*y+0) * encoding_frame_buffer->linesize[0] + 2*x+1] = std::min(255,std::max(0,Y01)); // Y
			encoding_frame_buffer->data[0][(2*y+1) * encoding_frame_buffer->linesize[0] + 2*x+0] = std::min(255,std::max(0,Y10)); // Y
			encoding_frame_buffer->data[0][(2*y+1) * encoding_frame_buffer->linesize[0] + 2*x+1] = std::min(255,std::max(0,Y11)); // Y

			encoding_frame_buffer->data[1][y * encoding_frame_buffer->linesize[1] + x] = std::min(255,std::max(0,U));// Cr
			encoding_frame_buffer->data[2][y * encoding_frame_buffer->linesize[2] + x] = std::min(255,std::max(0,V));// Cb
		}


	encoding_frame_buffer->pts = encoding_frame_count++;

	/* encode the image */

	int got_output = 0;

	AVPacket pkt ;
	av_init_packet(&pkt);
#if LIBAVCODEC_VERSION_MAJOR < 54
	pkt.size = avpicture_get_size(encoding_context->pix_fmt, encoding_context->width, encoding_context->height);
	pkt.data = (uint8_t*)av_malloc(pkt.size);

	//    do
	//    {
	int ret = avcodec_encode_video(encoding_context, pkt.data, pkt.size, encoding_frame_buffer) ;
	if (ret > 0) {
		got_output = ret;
	}
#else
	pkt.data = NULL;    // packet data will be allocated by the encoder
	pkt.size = 0;

	//    do
	//    {
	int ret = avcodec_encode_video2(encoding_context, &pkt, encoding_frame_buffer, &got_output) ;
#endif

	if (ret < 0)
	{
		std::cerr << "Error encoding frame!" << std::endl;
		return false ;
	}
	//        frame = NULL ;	// next attempts: do not encode anything. Do this to just flush the buffer
	//
	//    } while(got_output) ;

	if(got_output)
	{
		voip_chunk.data = rs_malloc(pkt.size + HEADER_SIZE) ;
		
		if(!voip_chunk.data)
			return false ;
        
		uint32_t flags = 0;

		((unsigned char *)voip_chunk.data)[0] =  VideoProcessor::VIDEO_PROCESSOR_CODEC_ID_MPEG_VIDEO       & 0xff ;
		((unsigned char *)voip_chunk.data)[1] = (VideoProcessor::VIDEO_PROCESSOR_CODEC_ID_MPEG_VIDEO >> 8) & 0xff ;
		((unsigned char *)voip_chunk.data)[2] = flags & 0xff ;
		((unsigned char *)voip_chunk.data)[3] = (flags >> 8) & 0xff ;

		memcpy(&((unsigned char*)voip_chunk.data)[HEADER_SIZE],pkt.data,pkt.size) ;

		voip_chunk.size = pkt.size + HEADER_SIZE;
		voip_chunk.type = RsVOIPDataChunk::RS_VOIP_DATA_TYPE_VIDEO ;

#ifdef DEBUG_MPEG_VIDEO
		std::cerr << "Output : " << pkt.size << " bytes." << std::endl;
		fwrite(pkt.data,1,pkt.size,encoding_debug_file) ;
		fflush(encoding_debug_file) ;
#endif
		av_free_packet(&pkt);

		return true ;
	}
	else
	{
Exemplo n.º 13
0
static int rtmp_http_open(URLContext *h, const char *uri, int flags)
{
    RTMP_HTTPContext *rt = h->priv_data;
    char headers[1024], url[1024];
    int ret, off = 0;

    av_url_split(NULL, 0, NULL, 0, rt->host, sizeof(rt->host), &rt->port,
                 NULL, 0, uri);

    /* This is the first request that is sent to the server in order to
     * register a client on the server and start a new session. The server
     * replies with a unique id (usually a number) that is used by the client
     * for all future requests.
     * Note: the reply doesn't contain a value for the polling interval.
     * A successful connect resets the consecutive index that is used
     * in the URLs. */
    if (rt->tls) {
        if (rt->port < 0)
            rt->port = RTMPTS_DEFAULT_PORT;
        ff_url_join(url, sizeof(url), "https", NULL, rt->host, rt->port, "/open/1");
    } else {
        if (rt->port < 0)
            rt->port = RTMPT_DEFAULT_PORT;
        ff_url_join(url, sizeof(url), "http", NULL, rt->host, rt->port, "/open/1");
    }

    /* alloc the http context */
    if ((ret = ffurl_alloc(&rt->stream, url, AVIO_FLAG_READ_WRITE, NULL)) < 0)
        goto fail;

    /* set options */
    snprintf(headers, sizeof(headers),
             "Cache-Control: no-cache\r\n"
             "Content-type: application/x-fcs\r\n"
             "User-Agent: Shockwave Flash\r\n");
    av_opt_set(rt->stream->priv_data, "headers", headers, 0);
    av_opt_set(rt->stream->priv_data, "multiple_requests", "1", 0);
    av_opt_set_bin(rt->stream->priv_data, "post_data", "", 1, 0);

    /* open the http context */
    if ((ret = ffurl_connect(rt->stream, NULL)) < 0)
        goto fail;

    /* read the server reply which contains a unique ID */
    for (;;) {
        ret = ffurl_read(rt->stream, rt->client_id + off, sizeof(rt->client_id) - off);
        if (!ret || ret == AVERROR_EOF)
            break;
        if (ret < 0)
            goto fail;
        off += ret;
        if (off == sizeof(rt->client_id)) {
            ret = AVERROR(EIO);
            goto fail;
        }
    }
    while (off > 0 && av_isspace(rt->client_id[off - 1]))
        off--;
    rt->client_id[off] = '\0';

    /* http context is now initialized */
    rt->initialized = 1;
    return 0;

fail:
    rtmp_http_close(h);
    return ret;
}
Exemplo n.º 14
0
/**
  *发送一帧图像
  *前四字节为所发送图像的尺寸,从低字节到高字节依次为width_high, width_low, height_high, height_low
  *后面为图像数据,每个像素3个字节,依次为RGB通道
  */
void MapThread::sendFrame()
{
    if(!started)
        return;

    if(!inited)
    {
        avcodec_register_all();

        c= NULL;
        pkt = new AVPacket;
        i = 0;

        /* find the mpeg1 video encoder */
        codec = avcodec_find_encoder(AV_CODEC_ID_MPEG1VIDEO);

        if (codec == 0)
        {
            exit(1);
        }

        c = avcodec_alloc_context3(codec);
        if (!c)
        {
            exit(1);
        }
        //c->bit_rate = 400000;
        c->width = dest_width;
        c->height = dest_height;

        AVRational ar = {1,25};
        c->time_base = ar;
        c->gop_size = 100;
        c->max_b_frames = 0;
        c->delay = 0;
        c->pix_fmt = AV_PIX_FMT_YUV420P;

        //av_opt_set(c->priv_data, "preset", "slow", 0);

        av_opt_set(c->priv_data, "preset", "superfast", 0);
        av_opt_set(c->priv_data, "tune", "zerolatency", 0);

        int re = avcodec_open2(c, codec, NULL);
        av_opt_set(c->priv_data, "tune", "zerolatency", 0);
        if (re < 0) {
            exit(1);
        }

        frame = av_frame_alloc();
        if (!frame) {
            exit(1);
        }
        frame->format = c->pix_fmt;
        frame->width  = c->width;
        frame->height = c->height;

        ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
        if (ret < 0) {
            exit(1);
        }
        inited = true;
    }


    if(mapSocket == 0)
    {
        return;
    }
    else if(mapSocket->isOpen() == false)
    {
        return;
    }
    else if(mapSocket->isWritable() == false)
    {
        return;
    }
    QImage image = Interface::grapScreen().toImage();
    image = image.scaled(QSize(dest_width, dest_height));

    av_init_packet(pkt);
    pkt->data = NULL;    // packet data will be allocated by the encoder
    pkt->size = 1000000;

    for (int h = 0; h < c->height; h++)
    {
        for (int w = 0; w < c->width; w++)
        {
            QRgb rgb = image.pixel(w, h);

            int r = qRed(rgb);
            int g = qGreen(rgb);
            int b = qBlue(rgb);

            int dy = ((66*r + 129*g + 25*b) >> 8) + 16;
            int du = ((-38*r + -74*g + 112*b) >> 8) + 128;
            int dv = ((112*r + -94*g + -18*b) >> 8) + 128;

            uchar yy = (uchar)dy;
            uchar uu = (uchar)du;
            uchar vv = (uchar)dv;

            frame->data[0][h * frame->linesize[0] + w] = yy;

            if(h % 2 == 0 && w % 2 == 0)
            {
                frame->data[1][h/2 * (frame->linesize[1]) + w/2] = uu;
                frame->data[2][h/2 * (frame->linesize[2]) + w/2] = vv;
            }

        }
    }

    frame->pts = i;

    ret = avcodec_encode_video2(c, pkt, frame, &got_output);

    if (ret < 0)
    {
        exit(1);
    }

    if (got_output)
    {
        int ss = pkt->size;
        writeAndBlock(mapSocket, pkt->data, ss);
        mapSocket->flush();

        av_free_packet(pkt);
    }

    i ++;
}
Exemplo n.º 15
0
static int url_alloc_for_protocol (URLContext **puc, struct URLProtocol *up,
                                   const char *filename, int flags,
                                   const AVIOInterruptCB *int_cb)
{
    URLContext *uc;
    int err;

#if CONFIG_NETWORK
    if (up->flags & URL_PROTOCOL_FLAG_NETWORK && !ff_network_init())
        return AVERROR(EIO);
#endif
    uc = av_mallocz(sizeof(URLContext) + strlen(filename) + 1);
    if (!uc) {
        err = AVERROR(ENOMEM);
        goto fail;
    }
    uc->av_class = &ffurl_context_class;
    uc->filename = (char *) &uc[1];
    strcpy(uc->filename, filename);
    uc->prot = up;
    uc->flags = flags;
    uc->is_streamed = 0; /* default = not streamed */
    uc->max_packet_size = 0; /* default: stream file */
    if (up->priv_data_size) {
        uc->priv_data = av_mallocz(up->priv_data_size);
        if (up->priv_data_class) {
            int proto_len= strlen(up->name);
            char *start = strchr(uc->filename, ',');
            *(const AVClass**)uc->priv_data = up->priv_data_class;
            av_opt_set_defaults(uc->priv_data);
            if(!strncmp(up->name, uc->filename, proto_len) && uc->filename + proto_len == start){
                int ret= 0;
                char *p= start;
                char sep= *++p;
                char *key, *val;
                p++;
                while(ret >= 0 && (key= strchr(p, sep)) && p<key && (val = strchr(key+1, sep))){
                    *val= *key= 0;
                    ret= av_opt_set(uc->priv_data, p, key+1, 0);
                    if (ret == AVERROR_OPTION_NOT_FOUND)
                        av_log(uc, AV_LOG_ERROR, "Key '%s' not found.\n", p);
                    *val= *key= sep;
                    p= val+1;
                }
                if(ret<0 || p!=key){
                    av_log(uc, AV_LOG_ERROR, "Error parsing options string %s\n", start);
                    av_freep(&uc->priv_data);
                    av_freep(&uc);
                    goto fail;
                }
                memmove(start, key+1, strlen(key));
            }
        }
    }
    if (int_cb)
        uc->interrupt_callback = *int_cb;

    *puc = uc;
    return 0;
 fail:
    *puc = NULL;
#if CONFIG_NETWORK
    if (up->flags & URL_PROTOCOL_FLAG_NETWORK)
        ff_network_close();
#endif
    return err;
}
Exemplo n.º 16
0
bool CDVDVideoCodecFFmpeg::Open(CDVDStreamInfo &hints, CDVDCodecOptions &options)
{
  m_hints = hints;
  m_options = options;

  AVCodec* pCodec;

  m_iOrientation = hints.orientation;

  m_formats.clear();
  m_formats = m_processInfo.GetPixFormats();
  m_formats.push_back(AV_PIX_FMT_NONE); /* always add none to get a terminated list in ffmpeg world */
  m_processInfo.SetSwDeinterlacingMethods();

  pCodec = avcodec_find_decoder(hints.codec);

  if(pCodec == NULL)
  {
    CLog::Log(LOGDEBUG,"CDVDVideoCodecFFmpeg::Open() Unable to find codec %d", hints.codec);
    return false;
  }

  CLog::Log(LOGNOTICE,"CDVDVideoCodecFFmpeg::Open() Using codec: %s",pCodec->long_name ? pCodec->long_name : pCodec->name);

  m_pCodecContext = avcodec_alloc_context3(pCodec);
  if (!m_pCodecContext)
    return false;

  m_pCodecContext->opaque = static_cast<ICallbackHWAccel*>(this);
  m_pCodecContext->debug_mv = 0;
  m_pCodecContext->debug = 0;
  m_pCodecContext->workaround_bugs = FF_BUG_AUTODETECT;
  m_pCodecContext->get_format = GetFormat;
  m_pCodecContext->codec_tag = hints.codec_tag;

  // setup threading model
  if (!(hints.codecOptions & CODEC_FORCE_SOFTWARE))
  {
    if (m_decoderState == STATE_NONE)
    {
      m_decoderState = STATE_HW_SINGLE;
    }
    else
    {
      int num_threads = g_cpuInfo.getCPUCount() * 3 / 2;
      num_threads = std::max(1, std::min(num_threads, 16));
      m_pCodecContext->thread_count = num_threads;
      m_pCodecContext->thread_safe_callbacks = 1;
      m_decoderState = STATE_SW_MULTI;
      CLog::Log(LOGDEBUG, "CDVDVideoCodecFFmpeg - open frame threaded with %d threads", num_threads);
    }
  }
  else
    m_decoderState = STATE_SW_SINGLE;

  // if we don't do this, then some codecs seem to fail.
  m_pCodecContext->coded_height = hints.height;
  m_pCodecContext->coded_width = hints.width;
  m_pCodecContext->bits_per_coded_sample = hints.bitsperpixel;

  if( hints.extradata && hints.extrasize > 0 )
  {
    m_pCodecContext->extradata_size = hints.extrasize;
    m_pCodecContext->extradata = (uint8_t*)av_mallocz(hints.extrasize + AV_INPUT_BUFFER_PADDING_SIZE);
    memcpy(m_pCodecContext->extradata, hints.extradata, hints.extrasize);
  }

  // advanced setting override for skip loop filter (see avcodec.h for valid options)
  //! @todo allow per video setting?
  if (g_advancedSettings.m_iSkipLoopFilter != 0)
  {
    m_pCodecContext->skip_loop_filter = (AVDiscard)g_advancedSettings.m_iSkipLoopFilter;
  }

  // set any special options
  for(std::vector<CDVDCodecOption>::iterator it = options.m_keys.begin(); it != options.m_keys.end(); ++it)
  {
    av_opt_set(m_pCodecContext, it->m_name.c_str(), it->m_value.c_str(), 0);
  }

  if (avcodec_open2(m_pCodecContext, pCodec, nullptr) < 0)
  {
    CLog::Log(LOGDEBUG,"CDVDVideoCodecFFmpeg::Open() Unable to open codec");
    avcodec_free_context(&m_pCodecContext);
    return false;
  }

  m_pFrame = av_frame_alloc();
  if (!m_pFrame)
  {
    avcodec_free_context(&m_pCodecContext);
    return false;
  }

  m_pDecodedFrame = av_frame_alloc();
  if (!m_pDecodedFrame)
  {
    av_frame_free(&m_pFrame);
    avcodec_free_context(&m_pCodecContext);
    return false;
  }

  m_pFilterFrame = av_frame_alloc();
  if (!m_pFilterFrame)
  {
    av_frame_free(&m_pFrame);
    av_frame_free(&m_pDecodedFrame);
    avcodec_free_context(&m_pCodecContext);
    return false;
  }

  UpdateName();
  const char* pixFmtName = av_get_pix_fmt_name(m_pCodecContext->pix_fmt);
  m_processInfo.SetVideoDimensions(m_pCodecContext->coded_width, m_pCodecContext->coded_height);
  m_processInfo.SetVideoPixelFormat(pixFmtName ? pixFmtName : "");

  m_dropCtrl.Reset(true);
  m_eof = false;
  return true;
}
Exemplo n.º 17
0
int FFMPEG_Wrapper::init(int input_width,
                          int input_height,
                          const ServerConfiguration& config)
{
  boost::mutex::scoped_lock lock(frame_mutex_);

  time_started_ = boost::posix_time::microsec_clock::local_time();

  config_  = config;

  input_width_ = input_width;
  input_height_ = input_height;

  output_width_ = config.frame_width_;
  output_height_ = config.frame_height_;

  if (output_width_<0)
    output_width_ = input_width_;

  if (output_height_<0)
    output_height_ = input_height_;

  av_lockmgr_register(&ff_lockmgr);

  /* register all the codecs */
  avcodec_register_all();
  av_register_all();

  // lookup webm codec
  avformat_alloc_output_context2(&ffmpeg_format_context_, NULL, config_.codec_.c_str(), NULL);
  if (!ffmpeg_format_context_) {
    return -1;
  }

  ffmpeg_output_format_ = ffmpeg_format_context_->oformat;

  /* Add the audio and video streams using the default format codecs
   * and initialize the codecs. */
  ffmpeg_video_st_ = NULL;
  if (ffmpeg_output_format_->video_codec != AV_CODEC_ID_NONE)
  {

    /* find the video encoder */
    ffmpeg_codec_ = avcodec_find_encoder(ffmpeg_output_format_->video_codec);
    if (!(ffmpeg_codec_))
    {
      fprintf(stderr, "Codec not found (%s)\n",config_.codec_.c_str());
      return -1;
    }

    ffmpeg_video_st_ = avformat_new_stream(ffmpeg_format_context_, ffmpeg_codec_);
    if (!ffmpeg_video_st_)
    {
      fprintf(stderr, "Could not alloc stream\n");
      return -1;
    }

    ffmpeg_codec_context_ = ffmpeg_video_st_->codec;



    avcodec_get_context_defaults3(ffmpeg_codec_context_, ffmpeg_codec_);

    //////////////////////////////////////////////
    // ffmpeg codec configuration
    //////////////////////////////////////////////

    ffmpeg_codec_context_->codec_id = ffmpeg_output_format_->video_codec;
    ffmpeg_codec_context_->bit_rate = config_.bitrate_;

    ffmpeg_codec_context_->width = output_width_;
    ffmpeg_codec_context_->height = output_height_;
    ffmpeg_codec_context_->delay = 0;

    ffmpeg_codec_context_->time_base.den = config_.framerate_+3; //increased framerate to compensate playback delay
    ffmpeg_codec_context_->time_base.num = 1;
    ffmpeg_codec_context_->gop_size = config_.gop_; /* emit one intra ffmpeg_frame_ every twelve frames at most */
    ffmpeg_codec_context_->pix_fmt = PIX_FMT_YUV420P;
    ffmpeg_codec_context_->max_b_frames = 0;

    av_opt_set(ffmpeg_codec_context_->priv_data, "quality", config_.profile_.c_str(), 0);

    av_opt_set(ffmpeg_codec_context_->priv_data, "deadline", "1", 0);
    av_opt_set(ffmpeg_codec_context_->priv_data, "auto-alt-ref", "0", 0);

    // lag in frames
    av_opt_set(ffmpeg_codec_context_->priv_data, "lag-in-frames", "1", 0);
    av_opt_set(ffmpeg_codec_context_->priv_data, "rc_lookahead", "1", 0);

    av_opt_set(ffmpeg_codec_context_->priv_data, "drop_frame", "1", 0);

    // enable error-resilient coding
    av_opt_set(ffmpeg_codec_context_->priv_data, "error-resilient", "1", 0);

    // buffer size of rate controller (length: rc_buffer_size/bitrate * 1000) ms
    int bufsize = 10;//ffmpeg_codec_context_->bit_rate/10;
    ffmpeg_codec_context_->rc_buffer_size = bufsize;
    // prebuffering at decoder
    ffmpeg_codec_context_->rc_initial_buffer_occupancy = bufsize ;//bitrate/3;  

    av_opt_set_int(ffmpeg_codec_context_->priv_data, "bufsize", bufsize, 0);
    av_opt_set_int(ffmpeg_codec_context_->priv_data, "buf-initial", bufsize, 0);
    av_opt_set_int(ffmpeg_codec_context_->priv_data, "buf-optimal", bufsize, 0);

    // buffer agressivity
    ffmpeg_codec_context_->rc_buffer_aggressivity = 0.5;

    // Quality settings
    //ffmpeg_codec_context_->qmin = 50;
    //ffmpeg_codec_context_->qmax = 62;
    if (config_.quality_>0)
      ffmpeg_codec_context_->qmin = config_.quality_;

     //ffmpeg_codec_context_->frame_skip_threshold = 100;

    /* Some formats want stream headers to be separate. */
    if (ffmpeg_format_context_->oformat->flags & AVFMT_GLOBALHEADER)
      ffmpeg_codec_context_->flags |= CODEC_FLAG_GLOBAL_HEADER;
  }

  if (ffmpeg_video_st_)
  {
    int ret;

    /* open the codec */
      {
         boost::mutex::scoped_lock lock(codec_mutex_);
         if (avcodec_open2(ffmpeg_codec_context_, ffmpeg_codec_, NULL) < 0) {
             fprintf(stderr, "Could not open video codec\n");
             return -1;
         }
      }  

      /* allocate and init a re-usable ffmpeg_frame_ */
      ffmpeg_frame_ = avcodec_alloc_frame();
      if (!ffmpeg_frame_) {
          fprintf(stderr, "Could not allocate video ffmpeg_frame_\n");
          return -1;
      }

      /* Allocate the encoded raw picture. */
      ret = avpicture_alloc(ffmpeg_dst_picture_, ffmpeg_codec_context_->pix_fmt, output_width_, output_height_);
      if (ret < 0) {
          fprintf(stderr, "Could not allocate picture\n");
          return -1;
      }

      /* If the output format is not YUV420P, then a temporary YUV420P
       * picture is needed too. It is then converted to the required
       * output format. */
          ret = avpicture_alloc(ffmpeg_src_picture_, AV_PIX_FMT_RGB24, input_width_, input_height_);
          if (ret < 0) {
              fprintf(stderr, "Could not allocate temporary picture\n");
              return -1;
          }

      /* copy data and linesize picture pointers to ffmpeg_frame_ */
      *((AVPicture *)ffmpeg_frame_) = *ffmpeg_dst_picture_;

      av_dump_format(ffmpeg_format_context_, 0, "", 1);

      ffmpeg_output_format_->flags |= AVFMT_NOFILE;

      if (ffmpeg_frame_)
          ffmpeg_frame_->pts = 0;
  }

  init_ = true;

  return 0;
}
Exemplo n.º 18
0
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) {
	AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec);
	AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec);
	if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) {
		return false;
	}

	encoder->currentAudioSample = 0;
	encoder->currentAudioFrame = 0;
	encoder->currentVideoFrame = 0;
	encoder->nextAudioPts = 0;

	AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0);
#ifndef USE_LIBAV
	avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile);
#else
	encoder->context = avformat_alloc_context();
	strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename) - 1);
	encoder->context->filename[sizeof(encoder->context->filename) - 1] = '\0';
	encoder->context->oformat = oformat;
#endif

	if (acodec) {
#ifdef FFMPEG_USE_CODECPAR
		encoder->audioStream = avformat_new_stream(encoder->context, NULL);
		encoder->audio = avcodec_alloc_context3(acodec);
#else
		encoder->audioStream = avformat_new_stream(encoder->context, acodec);
		encoder->audio = encoder->audioStream->codec;
#endif
		encoder->audio->bit_rate = encoder->audioBitrate;
		encoder->audio->channels = 2;
		encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO;
		encoder->audio->sample_rate = encoder->sampleRate;
		encoder->audio->sample_fmt = encoder->sampleFormat;
		AVDictionary* opts = 0;
		av_dict_set(&opts, "strict", "-2", 0);
		if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
			encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER;
		}
		avcodec_open2(encoder->audio, acodec, &opts);
		av_dict_free(&opts);
#if LIBAVCODEC_VERSION_MAJOR >= 55
		encoder->audioFrame = av_frame_alloc();
#else
		encoder->audioFrame = avcodec_alloc_frame();
#endif
		if (!encoder->audio->frame_size) {
			encoder->audio->frame_size = 1;
		}
		encoder->audioFrame->nb_samples = encoder->audio->frame_size;
		encoder->audioFrame->format = encoder->audio->sample_fmt;
		encoder->audioFrame->pts = 0;
		encoder->resampleContext = avresample_alloc_context();
		av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0);
		av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
		av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0);
		avresample_open(encoder->resampleContext);
		encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4;
		encoder->audioBuffer = av_malloc(encoder->audioBufferSize);
		encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0);
		encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize);
		avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0);

		if (encoder->audio->codec->id == AV_CODEC_ID_AAC &&
		    (strcasecmp(encoder->containerFormat, "mp4") ||
		        strcasecmp(encoder->containerFormat, "m4v") ||
		        strcasecmp(encoder->containerFormat, "mov"))) {
			// MP4 container doesn't support the raw ADTS AAC format that the encoder spits out
#ifdef FFMPEG_USE_NEW_BSF
			av_bsf_alloc(av_bsf_get_by_name("aac_adtstoasc"), &encoder->absf);
			avcodec_parameters_from_context(encoder->absf->par_in, encoder->audio);
			av_bsf_init(encoder->absf);
#else
			encoder->absf = av_bitstream_filter_init("aac_adtstoasc");
#endif
		}
#ifdef FFMPEG_USE_CODECPAR
		avcodec_parameters_from_context(encoder->audioStream->codecpar, encoder->audio);
#endif
	}

#ifdef FFMPEG_USE_CODECPAR
	encoder->videoStream = avformat_new_stream(encoder->context, NULL);
	encoder->video = avcodec_alloc_context3(vcodec);
#else
	encoder->videoStream = avformat_new_stream(encoder->context, vcodec);
	encoder->video = encoder->videoStream->codec;
#endif
	encoder->video->bit_rate = encoder->videoBitrate;
	encoder->video->width = encoder->width;
	encoder->video->height = encoder->height;
	encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY };
	encoder->video->pix_fmt = encoder->pixFormat;
	encoder->video->gop_size = 60;
	encoder->video->max_b_frames = 3;
	if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) {
		encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
	if (strcmp(vcodec->name, "libx264") == 0) {
		// Try to adaptively figure out when you can use a slower encoder
		if (encoder->width * encoder->height > 1000000) {
			av_opt_set(encoder->video->priv_data, "preset", "superfast", 0);
		} else if (encoder->width * encoder->height > 500000) {
			av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0);
		} else {
			av_opt_set(encoder->video->priv_data, "preset", "faster", 0);
		}
		av_opt_set(encoder->video->priv_data, "tune", "zerolatency", 0);
	}
	avcodec_open2(encoder->video, vcodec, 0);
#if LIBAVCODEC_VERSION_MAJOR >= 55
	encoder->videoFrame = av_frame_alloc();
#else
	encoder->videoFrame = avcodec_alloc_frame();
#endif
	encoder->videoFrame->format = encoder->video->pix_fmt;
	encoder->videoFrame->width = encoder->video->width;
	encoder->videoFrame->height = encoder->video->height;
	encoder->videoFrame->pts = 0;
	_ffmpegSetVideoDimensions(&encoder->d, encoder->iwidth, encoder->iheight);
	av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32);
#ifdef FFMPEG_USE_CODECPAR
	avcodec_parameters_from_context(encoder->videoStream->codecpar, encoder->video);
#endif

	avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE);
	return avformat_write_header(encoder->context, 0) >= 0;
}
static int open_input(HLSContext *c, struct variant *var)
{
    AVDictionary *opts = NULL;
    int ret;
    struct segment *seg = var->segments[var->cur_seq_no - var->start_seq_no];

    // broker prior HTTP options that should be consistent across requests
    av_dict_set(&opts, "user-agent", c->user_agent, 0);
    av_dict_set(&opts, "cookies", c->cookies, 0);
    av_dict_set(&opts, "seekable", "0", 0);

    if (seg->key_type == KEY_NONE) {
        ret = ffurl_open(&var->input, seg->url, AVIO_FLAG_READ,
                          &var->parent->interrupt_callback, &opts);
        goto cleanup;
    } else if (seg->key_type == KEY_AES_128) {
        char iv[33], key[33], url[MAX_URL_SIZE];
        if (strcmp(seg->key, var->key_url)) {
            URLContext *uc;
            if (ffurl_open(&uc, seg->key, AVIO_FLAG_READ,
                           &var->parent->interrupt_callback, &opts) == 0) {
                if (ffurl_read_complete(uc, var->key, sizeof(var->key))
                    != sizeof(var->key)) {
                    av_log(NULL, AV_LOG_ERROR, "Unable to read key file %s\n",
                           seg->key);
                }
                ffurl_close(uc);
            } else {
                av_log(NULL, AV_LOG_ERROR, "Unable to open key file %s\n",
                       seg->key);
            }
            av_strlcpy(var->key_url, seg->key, sizeof(var->key_url));
        }
        ff_data_to_hex(iv, seg->iv, sizeof(seg->iv), 0);
        ff_data_to_hex(key, var->key, sizeof(var->key), 0);
        iv[32] = key[32] = '\0';
        if (strstr(seg->url, "://"))
            snprintf(url, sizeof(url), "crypto+%s", seg->url);
        else
            snprintf(url, sizeof(url), "crypto:%s", seg->url);
        if ((ret = ffurl_alloc(&var->input, url, AVIO_FLAG_READ,
                               &var->parent->interrupt_callback)) < 0)
            goto cleanup;
        av_opt_set(var->input->priv_data, "key", key, 0);
        av_opt_set(var->input->priv_data, "iv", iv, 0);
        /* Need to repopulate options */
        av_dict_free(&opts);
        av_dict_set(&opts, "seekable", "0", 0);
        if ((ret = ffurl_connect(var->input, &opts)) < 0) {
            ffurl_close(var->input);
            var->input = NULL;
            goto cleanup;
        }
        ret = 0;
    }
    else
      ret = AVERROR(ENOSYS);

cleanup:
    av_dict_free(&opts);
    return ret;
}
Exemplo n.º 20
0
static int ffserver_save_avoption(const char *opt, const char *arg, int type,
                                  FFServerConfig *config)
{
    static int hinted = 0;
    int ret = 0;
    AVDictionaryEntry *e;
    const AVOption *o = NULL;
    const char *option = NULL;
    const char *codec_name = NULL;
    char buff[1024];
    AVCodecContext *ctx;
    AVDictionary **dict;
    enum AVCodecID guessed_codec_id;

    switch (type) {
    case AV_OPT_FLAG_VIDEO_PARAM:
        ctx = config->dummy_vctx;
        dict = &config->video_opts;
        guessed_codec_id = config->guessed_video_codec_id != AV_CODEC_ID_NONE ?
                           config->guessed_video_codec_id : AV_CODEC_ID_H264;
        break;
    case AV_OPT_FLAG_AUDIO_PARAM:
        ctx = config->dummy_actx;
        dict = &config->audio_opts;
        guessed_codec_id = config->guessed_audio_codec_id != AV_CODEC_ID_NONE ?
                           config->guessed_audio_codec_id : AV_CODEC_ID_AAC;
        break;
    default:
        av_assert0(0);
    }

    if (strchr(opt, ':')) {
        //explicit private option
        snprintf(buff, sizeof(buff), "%s", opt);
        codec_name = buff;
        if(!(option = strchr(buff, ':'))) {
            report_config_error(config->filename, config->line_num,
                                AV_LOG_ERROR, &config->errors,
                                "Syntax error. Unmatched ':'\n");
            return -1;

        }
        buff[option - buff] = '\0';
        option++;
        if ((ret = ffserver_set_codec(ctx, codec_name, config)) < 0)
            return ret;
        if (!ctx->codec || !ctx->priv_data)
            return -1;
    } else {
        option = opt;
    }

    o = av_opt_find(ctx, option, NULL, type | AV_OPT_FLAG_ENCODING_PARAM,
                    AV_OPT_SEARCH_CHILDREN);
    if (!o &&
            (!strcmp(option, "time_base")  || !strcmp(option, "pixel_format") ||
             !strcmp(option, "video_size") || !strcmp(option, "codec_tag")))
        o = av_opt_find(ctx, option, NULL, 0, 0);
    if (!o) {
        report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
                            &config->errors, "Option not found: '%s'\n", opt);
        if (!hinted && ctx->codec_id == AV_CODEC_ID_NONE) {
            hinted = 1;
            report_config_error(config->filename, config->line_num,
                                AV_LOG_ERROR, NULL, "If '%s' is a codec private"
                                "option, then prefix it with codec name, for "
                                "example '%s:%s %s' or define codec earlier.\n",
                                opt, avcodec_get_name(guessed_codec_id) ,opt,
                                arg);
        }
    } else if ((ret = av_opt_set(ctx, option, arg, AV_OPT_SEARCH_CHILDREN)) < 0) {
        report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
                            &config->errors, "Invalid value for option %s (%s): %s\n", opt,
                            arg, av_err2str(ret));
    } else if ((e = av_dict_get(*dict, option, NULL, 0))) {
        if ((o->type == AV_OPT_TYPE_FLAGS) && arg &&
                (arg[0] == '+' || arg[0] == '-'))
            return av_dict_set(dict, option, arg, AV_DICT_APPEND);
        report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
                            &config->errors, "Redeclaring value of option '%s'."
                            "Previous value was: '%s'.\n", opt, e->value);
    } else if (av_dict_set(dict, option, arg, 0) < 0) {
        return AVERROR(ENOMEM);
    }
    return 0;
}
Exemplo n.º 21
0
/*
 * Video encoding example
 */
static void video_encode_example(const char *filename, int codec_id)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, out_size, size, x, y, outbuf_size;
    FILE *f;
    AVFrame *picture;
    uint8_t *outbuf;

    printf("Video encoding\n");

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(codec_id);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    picture= avcodec_alloc_frame();

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* frames per second */
    c->time_base= (AVRational){1,25};
    c->gop_size = 10; /* emit one intra frame every ten frames */
    c->max_b_frames=1;
    c->pix_fmt = PIX_FMT_YUV420P;

    if(codec_id == CODEC_ID_H264)
        av_opt_set(c->priv_data, "preset", "slow", 0);

    /* open it */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }

    /* alloc image and output buffer */
    outbuf_size = 100000;
    outbuf = malloc(outbuf_size);

    /* the image can be allocated by any means and av_image_alloc() is
     * just the most convenient way if av_malloc() is to be used */
    av_image_alloc(picture->data, picture->linesize,
                   c->width, c->height, c->pix_fmt, 1);

    /* encode 1 second of video */
    for(i=0;i<25;i++) {
        fflush(stdout);
        /* prepare a dummy image */
        /* Y */
        for(y=0;y<c->height;y++) {
            for(x=0;x<c->width;x++) {
                picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
            }
        }

        /* Cb and Cr */
        for(y=0;y<c->height/2;y++) {
            for(x=0;x<c->width/2;x++) {
                picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
                picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
            }
        }

        /* encode the image */
        out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
        printf("encoding frame %3d (size=%5d)\n", i, out_size);
        fwrite(outbuf, 1, out_size, f);
    }

    /* get the delayed frames */
    for(; out_size; i++) {
        fflush(stdout);

        out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
        printf("write frame %3d (size=%5d)\n", i, out_size);
        fwrite(outbuf, 1, out_size, f);
    }

    /* add sequence end code to have a real mpeg file */
    outbuf[0] = 0x00;
    outbuf[1] = 0x00;
    outbuf[2] = 0x01;
    outbuf[3] = 0xb7;
    fwrite(outbuf, 1, 4, f);
    fclose(f);
    free(outbuf);

    avcodec_close(c);
    av_free(c);
    av_free(picture->data[0]);
    av_free(picture);
    printf("\n");
}
Exemplo n.º 22
0
int tdav_codec_h264_open_encoder(tdav_codec_h264_t* self)
{
#if HAVE_FFMPEG
	int ret;
	tsk_size_t size;

	if(self->encoder.context){
		TSK_DEBUG_ERROR("Encoder already opened");
		return -1;
	}
    
#if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0))
    if((self->encoder.context = avcodec_alloc_context3(self->encoder.codec))){
        avcodec_get_context_defaults3(self->encoder.context, self->encoder.codec);
    }
#else
    if((self->encoder.context = avcodec_alloc_context())){
        avcodec_get_context_defaults(self->encoder.context);
    }
#endif
    
    if(!self->encoder.context){
        TSK_DEBUG_ERROR("Failed to allocate context");
		return -1;
    }

#if TDAV_UNDER_X86 && LIBAVCODEC_VERSION_MAJOR <= 53
	self->encoder.context->dsp_mask = (FF_MM_MMX | FF_MM_MMXEXT | FF_MM_SSE);
#endif

	self->encoder.context->pix_fmt		= PIX_FMT_YUV420P;
	self->encoder.context->time_base.num  = 1;
	self->encoder.context->time_base.den  = TMEDIA_CODEC_VIDEO(self)->out.fps;
	self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width;
	self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height;
	self->encoder.max_bw_kpbs = TSK_CLAMP(
		0,
		tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps), 
		TMEDIA_CODEC(self)->bandwidth_max_upload
	);
	self->encoder.context->bit_rate = (self->encoder.max_bw_kpbs * 1024);// bps

	self->encoder.context->rc_min_rate = (self->encoder.context->bit_rate >> 3);
	self->encoder.context->rc_max_rate = self->encoder.context->bit_rate;

#if LIBAVCODEC_VERSION_MAJOR <= 53
	self->encoder.context->rc_lookahead = 0;
#endif
	self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality;
	
#if LIBAVCODEC_VERSION_MAJOR <= 53
    self->encoder.context->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_B8X8;
#endif
    self->encoder.context->me_method = ME_UMH;
	self->encoder.context->me_range = 16;
	self->encoder.context->qmin = 10;
	self->encoder.context->qmax = 51;
#if LIBAVCODEC_VERSION_MAJOR <= 53
    self->encoder.context->mb_qmin = self->encoder.context->qmin;
	self->encoder.context->mb_qmax = self->encoder.context->qmax;
#endif
	/* METROPOLIS = G2J.COM TelePresence client. Check Issue 378: No video when calling "TANDBERG/4129 (X8.1.1)" */
#if !METROPOLIS  && 0
	self->encoder.context->flags |= CODEC_FLAG_GLOBAL_HEADER;
#endif
    self->encoder.context->flags |= CODEC_FLAG_LOW_DELAY;
	if (self->encoder.context->profile == FF_PROFILE_H264_BASELINE) {
		self->encoder.context->max_b_frames = 0;
	}

	switch(TDAV_CODEC_H264_COMMON(self)->profile){
		case profile_idc_baseline:
		default:
			self->encoder.context->profile = FF_PROFILE_H264_BASELINE;
			self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level;
			break;
		case profile_idc_main:
			self->encoder.context->profile = FF_PROFILE_H264_MAIN;
			self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level;
			break;
	} 
	
	/* Comment from libavcodec/libx264.c:
     * Allow x264 to be instructed through AVCodecContext about the maximum
     * size of the RTP payload. For example, this enables the production of
     * payload suitable for the H.264 RTP packetization-mode 0 i.e. single
     * NAL unit per RTP packet.
     */
	self->encoder.context->rtp_payload_size = H264_RTP_PAYLOAD_SIZE;
	self->encoder.context->opaque = tsk_null;
	self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->out.fps * TDAV_H264_GOP_SIZE_IN_SECONDS);
	
#if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0))
    if((ret = av_opt_set_int(self->encoder.context->priv_data, "slice-max-size", H264_RTP_PAYLOAD_SIZE, 0))){
	    TSK_DEBUG_ERROR("Failed to set x264 slice-max-size to %d", H264_RTP_PAYLOAD_SIZE);
	}
    if((ret = av_opt_set(self->encoder.context->priv_data, "profile", (self->encoder.context->profile == FF_PROFILE_H264_BASELINE ? "baseline" : "main"), 0))){
	    TSK_DEBUG_ERROR("Failed to set x264 profile");
	}
    if((ret = av_opt_set(self->encoder.context->priv_data, "preset", "veryfast", 0))){
	    TSK_DEBUG_ERROR("Failed to set x264 preset to veryfast");
	}
    if((ret = av_opt_set_int(self->encoder.context->priv_data, "rc-lookahead", 0, 0)) && (ret = av_opt_set_int(self->encoder.context->priv_data, "rc_lookahead", 0, 0))){
        TSK_DEBUG_ERROR("Failed to set x264 rc_lookahead=0");
    }
	if((ret = av_opt_set(self->encoder.context->priv_data, "tune", "animation+zerolatency", 0))){
	    TSK_DEBUG_ERROR("Failed to set x264 tune to zerolatency");
	}
#endif

	// Picture (YUV 420)
	if(!(self->encoder.picture = avcodec_alloc_frame())){
		TSK_DEBUG_ERROR("Failed to create encoder picture");
		return -2;
	}
	avcodec_get_frame_defaults(self->encoder.picture);
	

	size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height);
	if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){
		TSK_DEBUG_ERROR("Failed to allocate encoder buffer");
		return -2;
	}

	// Open encoder
	if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){
		TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc);
		return ret;
	}
    
    self->encoder.frame_count = 0;

	TSK_DEBUG_INFO("[H.264] bitrate=%d bps", self->encoder.context->bit_rate);

	return ret;
#elif HAVE_H264_PASSTHROUGH
    self->encoder.frame_count = 0;
	return 0;
#endif

	TSK_DEBUG_ERROR("Not expected code called");
	return -1;
}
Exemplo n.º 23
0
AVStream* AVFormatWriter::AddVideoStream(void)
{
    AVCodecContext *c;
    AVStream *st;
    AVCodec *codec;

    st = avformat_new_stream(m_ctx, NULL);
    if (!st)
    {
        LOG(VB_RECORD, LOG_ERR,
            LOC + "AddVideoStream(): avformat_new_stream() failed");
        return NULL;
    }
    st->id = 0;

    c = st->codec;

    codec = avcodec_find_encoder(m_ctx->oformat->video_codec);
    if (!codec)
    {
        LOG(VB_RECORD, LOG_ERR,
            LOC + "AddVideoStream(): avcodec_find_encoder() failed");
        return NULL;
    }

    avcodec_get_context_defaults3(c, codec);

    c->codec                      = codec;
    c->codec_id                   = m_ctx->oformat->video_codec;
    c->codec_type                 = AVMEDIA_TYPE_VIDEO;

    c->bit_rate                   = m_videoBitrate;
    c->width                      = m_width;
    c->height                     = m_height;

    // c->sample_aspect_ratio.num    = (int)floor(m_aspect * 10000);
    // c->sample_aspect_ratio.den    = 10000;

    c->time_base                  = GetCodecTimeBase();

    st->time_base.den             = 90000;
    st->time_base.num             = 1;
    st->r_frame_rate.num          = 0;
    st->r_frame_rate.den          = 0;

    c->gop_size                   = m_keyFrameDist;
    c->pix_fmt                    = PIX_FMT_YUV420P;
    c->thread_count               = m_encodingThreadCount;
    c->thread_type                = FF_THREAD_SLICE;

    if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
        c->max_b_frames          = 2;
    }
    else if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO)
    {
        c->mb_decision           = 2;
    }
    else if (c->codec_id == AV_CODEC_ID_H264)
    {

        // Try to provide the widest software/device support by automatically using
        // the Baseline profile where the given bitrate and resolution permits

        if ((c->height > 720) || // Approximate highest resolution supported by Baseline 3.1
            (c->bit_rate > 1000000)) // 14,000 Kbps aka 14Mbps maximum permissable rate for Baseline 3.1
        {
            c->level = 40;
            av_opt_set(c->priv_data, "profile", "main", 0);
        }
        else if ((c->height > 576) || // Approximate highest resolution supported by Baseline 3.0
            (c->bit_rate > 1000000))  // 10,000 Kbps aka 10Mbps maximum permissable rate for Baseline 3.0
        {
            c->level = 31;
            av_opt_set(c->priv_data, "profile", "baseline", 0);
        }
        else
        {
            c->level = 30; // Baseline 3.0 is the most widely supported, but it's limited to SD
            av_opt_set(c->priv_data, "profile", "baseline", 0);
        }

        c->coder_type            = 0;
        c->max_b_frames          = 0;
        c->slices                = 8;

        c->flags                |= CODEC_FLAG_LOOP_FILTER;
        c->me_cmp               |= 1;
        c->me_method             = ME_HEX;
        c->me_subpel_quality     = 6;
        c->me_range              = 16;
        c->keyint_min            = 25;
        c->scenechange_threshold = 40;
        c->i_quant_factor        = 0.71;
        c->b_frame_strategy      = 1;
        c->qcompress             = 0.6;
        c->qmin                  = 10;
        c->qmax                  = 51;
        c->max_qdiff             = 4;
        c->refs                  = 3;
        c->trellis               = 0;

        av_opt_set(c, "partitions", "i8x8,i4x4,p8x8,b8x8", 0);
        av_opt_set_int(c, "direct-pred", 1, 0);
        av_opt_set_int(c, "rc-lookahead", 0, 0);
        av_opt_set_int(c, "fast-pskip", 1, 0);
        av_opt_set_int(c, "mixed-refs", 1, 0);
        av_opt_set_int(c, "8x8dct", 0, 0);
        av_opt_set_int(c, "weightb", 0, 0);

        av_opt_set(c->priv_data, "preset",
                   m_encodingPreset.toLatin1().constData(), 0);
        av_opt_set(c->priv_data, "tune",
                   m_encodingTune.toLatin1().constData(), 0);
    }

    if(m_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;

    return st;
}
Exemplo n.º 24
0
int main(int argc, char* argv[])
{
	AVCodec *pCodec;
	AVCodecContext *pCodecCtx = NULL;
	int i, ret, got_output;
	FILE *fp_in;
	FILE *fp_out;
	AVFrame *pFrame;
	AVPacket pkt;
	int y_size;
	int framecnt = 0;

	char filename_in[] = "../testResource/ds_480x272.yuv";

#if TEST_HEVC  
	AVCodecID codec_id = AV_CODEC_ID_HEVC;
	char filename_out[] = "ds.hevc";
#else  
	AVCodecID codec_id = AV_CODEC_ID_H264;
	char filename_out[] = "ds.h264";
#endif  


	int in_w = 480, in_h = 272;
	int framenum = 100;

	avcodec_register_all();

	pCodec = avcodec_find_encoder(codec_id);
	if (!pCodec) {
		printf("Codec not found\n");
		return -1;
	}
	pCodecCtx = avcodec_alloc_context3(pCodec);
	if (!pCodecCtx) {
		printf("Could not allocate video codec context\n");
		return -1;
	}
	pCodecCtx->bit_rate = 400000;
	pCodecCtx->width = in_w;
	pCodecCtx->height = in_h;
	pCodecCtx->time_base.num = 1;
	pCodecCtx->time_base.den = 25;
	pCodecCtx->gop_size = 10;
	pCodecCtx->max_b_frames = 1;
	pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;

	if (codec_id == AV_CODEC_ID_H264)
		av_opt_set(pCodecCtx->priv_data, "preset", "slow", 0);

	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		printf("Could not open codec\n");
		return -1;
	}

	pFrame = av_frame_alloc();
	if (!pFrame) {
		printf("Could not allocate video frame\n");
		return -1;
	}
	pFrame->format = pCodecCtx->pix_fmt;
	pFrame->width = pCodecCtx->width;
	pFrame->height = pCodecCtx->height;

	ret = av_image_alloc(pFrame->data, pFrame->linesize, pCodecCtx->width, pCodecCtx->height,
		pCodecCtx->pix_fmt, 16);
	if (ret < 0) {
		printf("Could not allocate raw picture buffer\n");
		return -1;
	}
	//Input raw data  
	fp_in = fopen(filename_in, "rb");
	if (!fp_in) {
		printf("Could not open %s\n", filename_in);
		return -1;
	}
	//Output bitstream  
	fp_out = fopen(filename_out, "wb");
	if (!fp_out) {
		printf("Could not open %s\n", filename_out);
		return -1;
	}

	y_size = pCodecCtx->width * pCodecCtx->height;
	//Encode  
	for (i = 0; i < framenum; i++) {
		av_init_packet(&pkt);
		pkt.data = NULL;    // packet data will be allocated by the encoder  
		pkt.size = 0;
		//Read raw YUV data  
		if (fread(pFrame->data[0], 1, y_size, fp_in) <= 0 ||       // Y  
			fread(pFrame->data[1], 1, y_size / 4, fp_in) <= 0 || // U  
			fread(pFrame->data[2], 1, y_size / 4, fp_in) <= 0) { // V  
			return -1;
		} else if (feof(fp_in)) {
			break;
		}

		pFrame->pts = i;
		/* encode the image */
		ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_output);
		if (ret < 0) {
			printf("Error encoding frame\n");
			return -1;
		}
		if (got_output) {
			printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, pkt.size);
			framecnt++;
			fwrite(pkt.data, 1, pkt.size, fp_out);
			av_free_packet(&pkt);
		}
	}
	//Flush Encoder  
	for (got_output = 1; got_output; i++) {
		ret = avcodec_encode_video2(pCodecCtx, &pkt, NULL, &got_output);
		if (ret < 0) {
			printf("Error encoding frame\n");
			return -1;
		}
		if (got_output) {
			printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", pkt.size);
			fwrite(pkt.data, 1, pkt.size, fp_out);
			av_free_packet(&pkt);
		}
	}

	fclose(fp_out);
	avcodec_close(pCodecCtx);
	av_free(pCodecCtx);
	av_freep(&pFrame->data[0]);
	av_frame_free(&pFrame);

	return 0;
}
Exemplo n.º 25
0
static int mmsh_open_internal(URLContext *h, const char *uri, int flags, int timestamp, int64_t pos)
{
    int i, port, err;
    char httpname[256], path[256], host[128];
    char *stream_selection = NULL;
    char headers[1024];
    MMSHContext *mmsh = h->priv_data;
    MMSContext *mms;

    mmsh->request_seq = h->is_streamed = 1;
    mms = &mmsh->mms;
    av_strlcpy(mmsh->location, uri, sizeof(mmsh->location));

    av_url_split(NULL, 0, NULL, 0,
        host, sizeof(host), &port, path, sizeof(path), mmsh->location);
    if (port<0)
        port = 80; // default mmsh protocol port
    ff_url_join(httpname, sizeof(httpname), "http", NULL, host, port, "%s", path);

    if (ffurl_alloc(&mms->mms_hd, httpname, AVIO_FLAG_READ,
                    &h->interrupt_callback) < 0) {
        return AVERROR(EIO);
    }

    snprintf(headers, sizeof(headers),
             "Accept: */*\r\n"
             USERAGENT
             "Host: %s:%d\r\n"
             "Pragma: no-cache,rate=1.000000,stream-time=0,"
             "stream-offset=0:0,request-context=%u,max-duration=0\r\n"
             CLIENTGUID
             "Connection: Close\r\n",
             host, port, mmsh->request_seq++);
    av_opt_set(mms->mms_hd->priv_data, "headers", headers, 0);

    err = ffurl_connect(mms->mms_hd, NULL);
    if (err) {
        goto fail;
    }
    err = get_http_header_data(mmsh);
    if (err) {
        av_log(NULL, AV_LOG_ERROR, "Get http header data failed!\n");
        goto fail;
    }

    // close the socket and then reopen it for sending the second play request.
    ffurl_close(mms->mms_hd);
    memset(headers, 0, sizeof(headers));
    if ((err = ffurl_alloc(&mms->mms_hd, httpname, AVIO_FLAG_READ,
                           &h->interrupt_callback)) < 0) {
        goto fail;
    }
    stream_selection = av_mallocz(mms->stream_num * 19 + 1);
    if (!stream_selection)
        return AVERROR(ENOMEM);
    for (i = 0; i < mms->stream_num; i++) {
        char tmp[20];
        err = snprintf(tmp, sizeof(tmp), "ffff:%d:0 ", mms->streams[i].id);
        if (err < 0)
            goto fail;
        av_strlcat(stream_selection, tmp, mms->stream_num * 19 + 1);
    }
    // send play request
    err = snprintf(headers, sizeof(headers),
                   "Accept: */*\r\n"
                   USERAGENT
                   "Host: %s:%d\r\n"
                   "Pragma: no-cache,rate=1.000000,request-context=%u\r\n"
                   "Pragma: xPlayStrm=1\r\n"
                   CLIENTGUID
                   "Pragma: stream-switch-count=%d\r\n"
                   "Pragma: stream-switch-entry=%s\r\n"
                   "Pragma: no-cache,rate=1.000000,stream-time=%u"
                   "Connection: Close\r\n",
                   host, port, mmsh->request_seq++, mms->stream_num, stream_selection, timestamp);
    av_freep(&stream_selection);
    if (err < 0) {
        av_log(NULL, AV_LOG_ERROR, "Build play request failed!\n");
        goto fail;
    }
    av_dlog(NULL, "out_buffer is %s", headers);
    av_opt_set(mms->mms_hd->priv_data, "headers", headers, 0);

    err = ffurl_connect(mms->mms_hd, NULL);
    if (err) {
          goto fail;
    }

    err = get_http_header_data(mmsh);
    if (err) {
        av_log(NULL, AV_LOG_ERROR, "Get http header data failed!\n");
        goto fail;
    }

    av_dlog(NULL, "Connection successfully open\n");
    return 0;
fail:
    av_freep(&stream_selection);
    av_dlog(NULL, "Connection failed with error %d\n", err);
    return err;
}
Exemplo n.º 26
0
bool CDVDVideoCodecFFmpeg::Open(CDVDStreamInfo &hints, CDVDCodecOptions &options)
{
  m_hints = hints;
  m_options = options;

  AVCodec* pCodec;

  m_iOrientation = hints.orientation;

  for(std::vector<ERenderFormat>::iterator it = options.m_formats.begin(); it != options.m_formats.end(); ++it)
  {
    m_formats.push_back((AVPixelFormat)CDVDCodecUtils::PixfmtFromEFormat(*it));
    if(*it == RENDER_FMT_YUV420P)
      m_formats.push_back(AV_PIX_FMT_YUVJ420P);
  }
  m_formats.push_back(AV_PIX_FMT_NONE); /* always add none to get a terminated list in ffmpeg world */

  pCodec = avcodec_find_decoder(hints.codec);

  if(pCodec == NULL)
  {
    CLog::Log(LOGDEBUG,"CDVDVideoCodecFFmpeg::Open() Unable to find codec %d", hints.codec);
    return false;
  }

  CLog::Log(LOGNOTICE,"CDVDVideoCodecFFmpeg::Open() Using codec: %s",pCodec->long_name ? pCodec->long_name : pCodec->name);

  m_pCodecContext = avcodec_alloc_context3(pCodec);
  m_pCodecContext->opaque = (void*)this;
  m_pCodecContext->debug_mv = 0;
  m_pCodecContext->debug = 0;
  m_pCodecContext->workaround_bugs = FF_BUG_AUTODETECT;
  m_pCodecContext->get_format = GetFormat;
  m_pCodecContext->codec_tag = hints.codec_tag;

  // setup threading model
  if (!hints.software)
  {
    bool tryhw = false;
#ifdef HAVE_LIBVDPAU
    if(CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOPLAYER_USEVDPAU))
      tryhw = true;
#endif
#ifdef HAVE_LIBVA
    if(CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOPLAYER_USEVAAPI))
      tryhw = true;
#endif
#ifdef HAS_DX
    if(CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOPLAYER_USEDXVA2))
      tryhw = true;
#endif
#ifdef TARGET_DARWIN_OSX
    if(CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOPLAYER_USEVDA))
      tryhw = true;
#endif
    if (tryhw && m_decoderState == STATE_NONE)
    {
      m_decoderState = STATE_HW_SINGLE;
    }
    else
    {
      int num_threads = std::min(8 /*MAX_THREADS*/, g_cpuInfo.getCPUCount());
      if( num_threads > 1)
        m_pCodecContext->thread_count = num_threads;
      m_pCodecContext->thread_safe_callbacks = 1;
      m_decoderState = STATE_SW_MULTI;
      CLog::Log(LOGDEBUG, "CDVDVideoCodecFFmpeg - open frame threaded with %d threads", num_threads);
    }
  }
  else
    m_decoderState = STATE_SW_SINGLE;

#if defined(TARGET_DARWIN_IOS)
  // ffmpeg with enabled neon will crash and burn if this is enabled
  m_pCodecContext->flags &= CODEC_FLAG_EMU_EDGE;
#else
  if (pCodec->id != AV_CODEC_ID_H264 && pCodec->capabilities & CODEC_CAP_DR1
      && pCodec->id != AV_CODEC_ID_VP8
     )
    m_pCodecContext->flags |= CODEC_FLAG_EMU_EDGE;
#endif

  // if we don't do this, then some codecs seem to fail.
  m_pCodecContext->coded_height = hints.height;
  m_pCodecContext->coded_width = hints.width;
  m_pCodecContext->bits_per_coded_sample = hints.bitsperpixel;

  if( hints.extradata && hints.extrasize > 0 )
  {
    m_pCodecContext->extradata_size = hints.extrasize;
    m_pCodecContext->extradata = (uint8_t*)av_mallocz(hints.extrasize + FF_INPUT_BUFFER_PADDING_SIZE);
    memcpy(m_pCodecContext->extradata, hints.extradata, hints.extrasize);
  }

  // advanced setting override for skip loop filter (see avcodec.h for valid options)
  // TODO: allow per video setting?
  if (g_advancedSettings.m_iSkipLoopFilter != 0)
  {
    m_pCodecContext->skip_loop_filter = (AVDiscard)g_advancedSettings.m_iSkipLoopFilter;
  }

  // set any special options
  for(std::vector<CDVDCodecOption>::iterator it = options.m_keys.begin(); it != options.m_keys.end(); ++it)
  {
    if (it->m_name == "surfaces")
      m_uSurfacesCount = atoi(it->m_value.c_str());
    else
      av_opt_set(m_pCodecContext, it->m_name.c_str(), it->m_value.c_str(), 0);
  }

  // If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() are reference-counted and are valid indefinitely.
  // Without this frames will get (deep) copied when deinterlace is set to automatic, but file is not deinterlaced.
  m_pCodecContext->refcounted_frames = 1;

  if (avcodec_open2(m_pCodecContext, pCodec, nullptr) < 0)
  {
    CLog::Log(LOGDEBUG,"CDVDVideoCodecFFmpeg::Open() Unable to open codec");
    return false;
  }

  m_pFrame = av_frame_alloc();
  if (!m_pFrame)
    return false;

  m_pDecodedFrame = av_frame_alloc();
  if (!m_pDecodedFrame)
    return false;

  m_pFilterFrame = av_frame_alloc();
  if (!m_pFilterFrame)
    return false;

  UpdateName();
  return true;
}
Exemplo n.º 27
0
bool Movie::Setup()
{
    if (!IsRecording())
        return false;
    if (!av)
        return false;

    bool success = true;
    std::string err_msg;
    
	alephone::Screen *scr = alephone::Screen::instance();
	view_rect = scr->window_rect();
	
	if (MainScreenIsOpenGL())
		view_rect.y = scr->height() - (view_rect.y + view_rect.h);
	
	view_rect.x *= scr->pixel_scale();
	view_rect.y *= scr->pixel_scale();
	view_rect.w *= scr->pixel_scale();
	view_rect.h *= scr->pixel_scale();

	temp_surface = SDL_CreateRGBSurface(SDL_SWSURFACE, view_rect.w, view_rect.h, 32,
										0x00ff0000, 0x0000ff00, 0x000000ff,
										0);
	success = (temp_surface != NULL);
	if (!success) err_msg = "Could not create SDL surface";

    Mixer *mx = Mixer::instance();
    
    av_register_all();
    avcodec_register_all();
    
    // Open output file
    AVOutputFormat *fmt;
    if (success)
    {
        fmt = av_guess_format("webm", NULL, NULL);
        success = fmt;
        if (!success) err_msg = "Could not find output format";
    }
    if (success)
    {
        av->fmt_ctx = avformat_alloc_context();
        success = av->fmt_ctx;
        if (!success) err_msg = "Could not allocate movie format context";
    }
    if (success)
    {
        av->fmt_ctx->oformat = fmt;
        strncpy(av->fmt_ctx->filename, moviefile.c_str(), 1024);
        success = (0 <= avio_open(&av->fmt_ctx->pb, av->fmt_ctx->filename, AVIO_FLAG_WRITE));
        if (!success) err_msg = "Could not open movie file for writing";
    }
    
    // Open output video stream
    AVCodec *video_codec;
    AVStream *video_stream;
    if (success)
    {
        video_codec = avcodec_find_encoder(AV_CODEC_ID_VP8);
        success = video_codec;
        if (!success) err_msg = "Could not find VP8 encoder";
    }
    if (success)
    {
        video_stream = avformat_new_stream(av->fmt_ctx, video_codec);
        success = video_stream;
        if (!success) err_msg = "Could not open output video stream";
    }
    if (success)
    {
        video_stream->codec->codec_id = video_codec->id;
        video_stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        video_stream->codec->width = view_rect.w;
        video_stream->codec->height = view_rect.h;
        video_stream->codec->time_base = (AVRational){1, TICKS_PER_SECOND};
        video_stream->codec->pix_fmt = AV_PIX_FMT_YUV420P;
        video_stream->codec->flags |= CODEC_FLAG_CLOSED_GOP;
        video_stream->codec->thread_count = get_cpu_count();
        
        if (av->fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            video_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        
        av->video_stream_idx = video_stream->index;
        
        // tuning options
        int vq = graphics_preferences->movie_export_video_quality;
        video_stream->codec->bit_rate = ScaleQuality(vq, 100*1024, 1024*1024, 10*1024*1024);
        video_stream->codec->qmin = ScaleQuality(vq, 10, 4, 0);
        video_stream->codec->qmax = ScaleQuality(vq, 63, 63, 50);
        std::string crf = boost::lexical_cast<std::string>(ScaleQuality(vq, 63, 10, 4));
        av_opt_set(video_stream->codec->priv_data, "crf", crf.c_str(), 0);
        
        success = (0 <= avcodec_open2(video_stream->codec, video_codec, NULL));
        if (!success) err_msg = "Could not open video codec";
    }
    if (success)
    {
        av->video_bufsize = view_rect.w * view_rect.h * 4 + 10000;
        av->video_buf = static_cast<uint8_t *>(av_malloc(av->video_bufsize));
        success = av->video_buf;
        if (!success) err_msg = "Could not allocate video buffer";
    }
    if (success)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,0)
        av->video_frame = avcodec_alloc_frame();
#else
        av->video_frame = av_frame_alloc();
#endif
        success = av->video_frame;
        if (!success) err_msg = "Could not allocate video frame";
    }
    if (success)
    {
        int numbytes = avpicture_get_size(video_stream->codec->pix_fmt, view_rect.w, view_rect.h);
        av->video_data = static_cast<uint8_t *>(av_malloc(numbytes));
        success = av->video_data;
        if (!success) err_msg = "Could not allocate video data buffer";
    }
    if (success)
    {
        avpicture_fill(reinterpret_cast<AVPicture *>(av->video_frame), av->video_data, video_stream->codec->pix_fmt, view_rect.w, view_rect.h);
    }
    
    // Open output audio stream
    AVCodec *audio_codec;
    AVStream *audio_stream;
    if (success)
    {
        audio_codec = avcodec_find_encoder(AV_CODEC_ID_VORBIS);
        success = audio_codec;
        if (!success) err_msg = "Could not find Vorbis encoder";
    }
    if (success)
    {
        audio_stream = avformat_new_stream(av->fmt_ctx, audio_codec);
        success = audio_stream;
        if (!success) err_msg = "Could not open output audio stream";
    }
    if (success)
    {
        audio_stream->codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
        audio_stream->codec->codec_id = audio_codec->id;
        audio_stream->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        audio_stream->codec->sample_rate = mx->obtained.freq;
        audio_stream->codec->time_base = (AVRational){1, mx->obtained.freq};
        audio_stream->codec->channels = 2;
        
        if (av->fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            audio_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        
        av->audio_stream_idx = audio_stream->index;
        
        // tuning options
        int aq = graphics_preferences->movie_export_audio_quality;
        audio_stream->codec->global_quality = FF_QP2LAMBDA * (aq / 10);
        audio_stream->codec->flags |= CODEC_FLAG_QSCALE;
        
        audio_stream->codec->sample_fmt = AV_SAMPLE_FMT_FLTP;
        success = (0 <= avcodec_open2(audio_stream->codec, audio_codec, NULL));
        if (!success) err_msg = "Could not open audio codec";
    }
    if (success)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,0)
        av->audio_frame = avcodec_alloc_frame();
#else
        av->audio_frame = av_frame_alloc();
#endif
        success = av->audio_frame;
        if (!success) err_msg = "Could not allocate audio frame";
    }
    if (success)
    {
        av->audio_fifo = av_fifo_alloc(262144);
        success = av->audio_fifo;
        if (!success) err_msg = "Could not allocate audio fifo";
    }
    if (success)
    {
        av->audio_data = reinterpret_cast<uint8_t *>(av_malloc(524288));
        success = av->audio_data;
        if (!success) err_msg = "Could not allocate audio data buffer";
    }
    if (success)
    {
        av->audio_data_conv = reinterpret_cast<uint8_t *>(av_malloc(524288));
        success = av->audio_data_conv;
        if (!success) err_msg = "Could not allocate audio conversion buffer";
    }
    
    // initialize conversion context
    if (success)
    {
        av->sws_ctx = sws_getContext(temp_surface->w, temp_surface->h, AV_PIX_FMT_RGB32,
                                     video_stream->codec->width,
                                     video_stream->codec->height,
                                     video_stream->codec->pix_fmt,
                                     SWS_BILINEAR,
                                     NULL, NULL, NULL);
        success = av->sws_ctx;
        if (!success) err_msg = "Could not create video conversion context";
    }
    
    // Start movie file
    if (success)
    {
        video_stream->time_base = (AVRational){1, TICKS_PER_SECOND};
        audio_stream->time_base = (AVRational){1, mx->obtained.freq};
        avformat_write_header(av->fmt_ctx, NULL);
    }
    
    // set up our threads and intermediate storage
    if (success)
    {
        videobuf.resize(av->video_bufsize);
        audiobuf.resize(2 * 2 * mx->obtained.freq / 30);
	}
	if (success)
	{
		encodeReady = SDL_CreateSemaphore(0);
		fillReady = SDL_CreateSemaphore(1);
		stillEncoding = true;
		success = encodeReady && fillReady;
		if (!success) err_msg = "Could not create movie thread semaphores";
	}
	if (success)
	{
		encodeThread = SDL_CreateThread(Movie_EncodeThread, "MovieSetup_encodeThread", this);
		success = encodeThread;
		if (!success) err_msg = "Could not create movie encoding thread";
	}
	
	if (!success)
	{
		StopRecording();
		std::string full_msg = "Your movie could not be exported. (";
		full_msg += err_msg;
		full_msg += ".)";
        logError(full_msg.c_str());
		alert_user(full_msg.c_str());
	}
    av->inited = success;
	return success;
}
bool VideoWriterUnit::OpenStreams(StreamSet* set) {
  // Setup FFMPEG.
  if (!ffmpeg_initialized_) {
    ffmpeg_initialized_ = true;
    av_register_all();
  }

  // Find video stream index.
  video_stream_idx_ = FindStreamIdx(options_.stream_name, set);

  if (video_stream_idx_ < 0) {
    LOG(ERROR) << "Could not find Video stream!\n";
    return false;
  }

  const VideoStream& vid_stream = set->at(video_stream_idx_)->As<VideoStream>();

  frame_width_ = vid_stream.frame_width();
  frame_height_ = vid_stream.frame_height();
  if (!options_.fps) {
    options_.fps = vid_stream.fps();
  }

  if (!options_.output_format.empty()) {
    output_format_ = av_guess_format(options_.output_format.c_str(), NULL, NULL);
  } else {
    output_format_ = av_guess_format(NULL, video_file_.c_str(), NULL);
  }

  output_width_ = frame_width_;
  output_height_ = frame_height_;

  if (options_.scale != 1) {
    if (options_.scale_max_dim || options_.scale_min_dim) {
      LOG(WARNING) << "Scale set, ignoring scale_[max|min]_dim.";
    }
    output_width_ *= options_.scale;
    output_height_ *= options_.scale;
  } else {
    if (options_.scale_max_dim) {
      float max_dim = std::max(frame_width_, frame_height_);
      output_width_ = (float)frame_width_ / max_dim * options_.scale_max_dim;
      output_height_ = (float)frame_height_ / max_dim * options_.scale_max_dim;
    } else if (options_.scale_min_dim) {
      float min_dim = std::min(frame_width_, frame_height_);
      output_width_ = (float)frame_width_ / min_dim * options_.scale_min_dim;
      output_height_ = (float)frame_height_ / min_dim * options_.scale_min_dim;
    }
  }

  int w_reminder = output_width_ % options_.fraction;
  if (w_reminder > 0) {
    if (w_reminder < options_.fraction / 2) {
      output_width_ -= w_reminder;
    } else {
      output_width_ += (options_.fraction - w_reminder);
    }
  }

  int h_reminder = output_height_ % options_.fraction;
  if (h_reminder > 0) {
    if (h_reminder < options_.fraction / 2) {
      output_height_ -= h_reminder;
    } else {
      output_height_ += (options_.fraction - h_reminder);
    }
  }

  avformat_alloc_output_context2(&format_context_, output_format_, NULL,
                                 video_file_.c_str());
  if(!format_context_) {
    LOG(ERROR) << "Could not open format context.\n";
    return false;
  }

  // Add video stream.
  video_stream_ = avformat_new_stream(format_context_, NULL);
  if (!video_stream_) {
    LOG(ERROR) << "Could not allocate video stream.\n";
    return false;
  }

  // Set standard parameters.
  codec_context_ = video_stream_->codec;
  const std::string file_ending = video_file_.substr(video_file_.size() - 3);
  if (file_ending == "mp4" || file_ending == "mov") {
    codec_context_->codec_id = CODEC_ID_H264;
  } else {
    codec_context_->codec_id = output_format_->video_codec;
  }

  codec_context_->codec_type = CODEC_TYPE_VIDEO;
  codec_context_->bit_rate = options_.bit_rate;
  codec_context_->bit_rate_tolerance = options_.bit_rate / 5;
  codec_context_->width = output_width_;
  codec_context_->height = output_height_;

  LOG(INFO) << "Encoding with " << options_.fps << " fps.";
  codec_context_->time_base = av_d2q(1.0 / options_.fps, 1000);

  LOG(INFO) << "time base : " << codec_context_->time_base.num
            << " / " << codec_context_->time_base.den;

  codec_context_->pix_fmt = PIX_FMT_YUV420P;

  if (codec_context_->codec_id == CODEC_ID_MPEG2VIDEO) {
    codec_context_->max_b_frames = 2;
  }

  if (codec_context_->codec_id == CODEC_ID_MPEG1VIDEO) {
    codec_context_->mb_decision = 2;
  }

  if (codec_context_->codec_id == CODEC_ID_H264) {
    // H264 settings.
    codec_context_->coder_type = FF_CODER_TYPE_AC;
    codec_context_->flags |= CODEC_FLAG_LOOP_FILTER | CODEC_FLAG_GLOBAL_HEADER;
    codec_context_->profile = FF_PROFILE_H264_BASELINE;
    codec_context_->scenechange_threshold = 40;
    codec_context_->gop_size = 10;
    codec_context_->max_b_frames = 0;
    codec_context_->max_qdiff = 4;
    codec_context_->me_method = ME_HEX;
    codec_context_->me_range = 16;
    codec_context_->me_subpel_quality = 6;
    codec_context_->qmin = 10;
    codec_context_->qmax = 51;
    codec_context_->qcompress = 0.6;
    codec_context_->keyint_min = 10;
    codec_context_->trellis = 0;
    codec_context_->level = 30;
    codec_context_->refs = 1;
    av_opt_set(codec_context_->priv_data, "preset", "slow", 0);
    av_opt_set(codec_context_->priv_data, "vprofile", "baseline", 0);
  }

  // Find and open codec.
  codec_ = avcodec_find_encoder(codec_context_->codec_id);
  if (!codec_) {
    LOG(ERROR) << "Codec not found.";
    return false;
  }

  if (avcodec_open2(codec_context_, codec_, NULL) < 0) {
    LOG(ERROR) << "Could not open codec.";
    return false;
  }

  frame_encode_ = av_frame_alloc();
  frame_bgr_ = av_frame_alloc();

  if (!frame_bgr_ || !frame_encode_) {
    LOG(ERROR) << "Could not alloc tmp. images.\n";
    return false;
  }

  uint8_t* encode_buffer =
      (uint8_t*)av_malloc(avpicture_get_size(codec_context_->pix_fmt,
                                             codec_context_->width,
                                             codec_context_->height));

  avpicture_fill((AVPicture*)frame_encode_, encode_buffer, codec_context_->pix_fmt,
                 codec_context_->width, codec_context_->height);

  uint8_t* bgr_buffer = (uint8_t*)av_malloc(avpicture_get_size(PIX_FMT_BGR24,
                                                               frame_width_,
                                                               frame_height_));
  avpicture_fill((AVPicture*)frame_bgr_,
                 bgr_buffer,
                 PIX_FMT_BGR24,
                 frame_width_,
                 frame_height_);

  // Open output file, if needed.
  if(!(output_format_->flags & AVFMT_NOFILE)) {
    if (avio_open(&format_context_->pb, video_file_.c_str(), AVIO_FLAG_WRITE) < 0) {
      LOG(ERROR) << " Could not open" << video_file_;
      return false;
    }
  }

  avformat_write_header(format_context_,0);

  // Setup color conversion.
  sws_context_ = sws_getContext(frame_width_,
                                frame_height_,
                                PIX_FMT_BGR24,
                                codec_context_->width,
                                codec_context_->height,
                                codec_context_->pix_fmt,
                                SWS_BICUBIC,
                                NULL,
                                NULL,
                                NULL);

  if (!sws_context_) {
    LOG(ERROR) << "Could initialize sws_context.";
    return false;
  }

  frame_num_ = 0;
  return true;
}
Exemplo n.º 29
0
JNIEXPORT void JNICALL Java_com_parizene_streamer_Streamer_init(JNIEnv *env,
		jobject obj, jstring filename, jint width, jint height,
		jint frameRate) {
	LOGD("init()");

	av_register_all();

	AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!codec) {
		LOGE("codec not found");
		exit(1);
	}

	codecContext = avcodec_alloc_context3(codec);
	if (!codec) {
		LOGE("couldn't allocate codec context");
		exit(1);
	}

	/* put sample parameters */
	codecContext->bit_rate = 400000;
	/* resolution must be a multiple of two */
	codecContext->width = width;
	codecContext->height = height;
	/* frames per second */
	codecContext->time_base = (AVRational ) {1, frameRate};
	codecContext->gop_size = frameRate; /* emit one intra frame every ten frames */
	codecContext->max_b_frames = 1;
	codecContext->pix_fmt = AV_PIX_FMT_YUV420P;

	av_opt_set(codecContext->priv_data, "profile", "baseline", 0);
	av_opt_set(codecContext->priv_data, "preset", "ultrafast", 0);

	if (avcodec_open2(codecContext, codec, NULL) < 0) {
		LOGE("couldn't open codec");
		exit(1);
	}

	inputFilename = env->GetStringUTFChars(filename, NULL);
	file = fopen(inputFilename, "wb");
	if (!file) {
		LOGE("couldn't open %s", inputFilename);
		exit(1);
	}

	frame = avcodec_alloc_frame();
	if (!frame) {
		LOGE("couldn't allocate frame");
		exit(1);
	}

	frame->format = codecContext->pix_fmt;
	frame->width = codecContext->width;
	frame->height = codecContext->height;

	if (av_image_alloc(frame->data, frame->linesize, codecContext->width,
			codecContext->height, codecContext->pix_fmt, 32) < 0) {
		LOGE("couldn't allocate raw picture buffer");
		exit(1);
	}

	tmpFrame = avcodec_alloc_frame();
	if (!tmpFrame) {
		LOGE("couldn't allocate frame");
		exit(1);
	}

	if (av_image_alloc(tmpFrame->data, tmpFrame->linesize, codecContext->width,
			codecContext->height, AV_PIX_FMT_NV21, 32) < 0) {
		LOGE("couldn't allocate raw picture buffer");
		exit(1);
	}

	count = 0;
}
Exemplo n.º 30
0
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
                             AVFilterContext **sink)
{
    AVFilterGraph *filter_graph;
    AVFilterContext *abuffer_ctx;
    AVFilter        *abuffer;
    AVFilterContext *volume_ctx;
    AVFilter        *volume;
    AVFilterContext *aformat_ctx;
    AVFilter        *aformat;
    AVFilterContext *abuffersink_ctx;
    AVFilter        *abuffersink;

    AVDictionary *options_dict = NULL;
    uint8_t options_str[1024];
    uint8_t ch_layout[64];

    int err;

    /* Create a new filtergraph, which will contain all the filters. */
    filter_graph = avfilter_graph_alloc();
    if (!filter_graph) {
        fprintf(stderr, "Unable to create filter graph.\n");
        return AVERROR(ENOMEM);
    }

    /* Create the abuffer filter;
     * it will be used for feeding the data into the graph. */
    abuffer = avfilter_get_by_name("abuffer");
    if (!abuffer) {
        fprintf(stderr, "Could not find the abuffer filter.\n");
        return AVERROR_FILTER_NOT_FOUND;
    }

    abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
    if (!abuffer_ctx) {
        fprintf(stderr, "Could not allocate the abuffer instance.\n");
        return AVERROR(ENOMEM);
    }

    /* Set the filter options through the AVOptions API. */
    av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
    av_opt_set    (abuffer_ctx, "channel_layout", ch_layout,                            AV_OPT_SEARCH_CHILDREN);
    av_opt_set    (abuffer_ctx, "sample_fmt",     av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
    av_opt_set_q  (abuffer_ctx, "time_base",      (AVRational){ 1, INPUT_SAMPLERATE },  AV_OPT_SEARCH_CHILDREN);
    av_opt_set_int(abuffer_ctx, "sample_rate",    INPUT_SAMPLERATE,                     AV_OPT_SEARCH_CHILDREN);

    /* Now initialize the filter; we pass NULL options, since we have already
     * set all the options above. */
    err = avfilter_init_str(abuffer_ctx, NULL);
    if (err < 0) {
        fprintf(stderr, "Could not initialize the abuffer filter.\n");
        return err;
    }

    /* Create volume filter. */
    volume = avfilter_get_by_name("volume");
    if (!volume) {
        fprintf(stderr, "Could not find the volume filter.\n");
        return AVERROR_FILTER_NOT_FOUND;
    }

    volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
    if (!volume_ctx) {
        fprintf(stderr, "Could not allocate the volume instance.\n");
        return AVERROR(ENOMEM);
    }

    /* A different way of passing the options is as key/value pairs in a
     * dictionary. */
    av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
    err = avfilter_init_dict(volume_ctx, &options_dict);
    av_dict_free(&options_dict);
    if (err < 0) {
        fprintf(stderr, "Could not initialize the volume filter.\n");
        return err;
    }

    /* Create the aformat filter;
     * it ensures that the output is of the format we want. */
    aformat = avfilter_get_by_name("aformat");
    if (!aformat) {
        fprintf(stderr, "Could not find the aformat filter.\n");
        return AVERROR_FILTER_NOT_FOUND;
    }

    aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
    if (!aformat_ctx) {
        fprintf(stderr, "Could not allocate the aformat instance.\n");
        return AVERROR(ENOMEM);
    }

    /* A third way of passing the options is in a string of the form
     * key1=value1:key2=value2.... */
    snprintf(options_str, sizeof(options_str),
             "sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
             av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
             (uint64_t)AV_CH_LAYOUT_STEREO);
    err = avfilter_init_str(aformat_ctx, options_str);
    if (err < 0) {
        av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
        return err;
    }

    /* Finally create the abuffersink filter;
     * it will be used to get the filtered data out of the graph. */
    abuffersink = avfilter_get_by_name("abuffersink");
    if (!abuffersink) {
        fprintf(stderr, "Could not find the abuffersink filter.\n");
        return AVERROR_FILTER_NOT_FOUND;
    }

    abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
    if (!abuffersink_ctx) {
        fprintf(stderr, "Could not allocate the abuffersink instance.\n");
        return AVERROR(ENOMEM);
    }

    /* This filter takes no options. */
    err = avfilter_init_str(abuffersink_ctx, NULL);
    if (err < 0) {
        fprintf(stderr, "Could not initialize the abuffersink instance.\n");
        return err;
    }

    /* Connect the filters;
     * in this simple case the filters just form a linear chain. */
    err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
    if (err >= 0)
        err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
    if (err >= 0)
        err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
    if (err < 0) {
        fprintf(stderr, "Error connecting filters\n");
        return err;
    }

    /* Configure the graph. */
    err = avfilter_graph_config(filter_graph, NULL);
    if (err < 0) {
        av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
        return err;
    }

    *graph = filter_graph;
    *src   = abuffer_ctx;
    *sink  = abuffersink_ctx;

    return 0;
}