コード例 #1
0
ファイル: h264encoder.cpp プロジェクト: CeBkCn/android-eye
void H264Encoder::init_(const int wid, const int hei) {
    // 0. building encoder parameters.
    x264_param_default_preset(&x264_opt_, "ultrafast", "zerolatency");

    x264_opt_.i_width = wid;
    x264_opt_.i_height = hei;
    x264_opt_.i_threads = 1;
    x264_opt_.b_repeat_headers = 1;
    x264_opt_.b_intra_refresh = 1;

    x264_opt_.rc.i_rc_method = X264_RC_CQP;
    x264_opt_.rc.i_qp_constant = 24;
    x264_opt_.rc.i_qp_min = 24;
    x264_opt_.rc.i_qp_max = 24;
    //x264_param_default(&opt);
    x264_param_apply_profile(&x264_opt_, "baseline");

    // 1. Prepare the output buffer and target file
    x264_picture_alloc(&x264_picin_,  X264_CSP_NV12, x264_opt_.i_width, x264_opt_.i_height);
    x264_picture_alloc(&x264_picout_, X264_CSP_NV12, x264_opt_.i_width, x264_opt_.i_height);

    // 2. Building the encoder handler
    x264_hdl_ = x264_encoder_open(&x264_opt_);
    x264_encoder_parameters(x264_hdl_, &x264_opt_);
}
コード例 #2
0
ファイル: h264encoder.cpp プロジェクト: MoshDev/AndroidRTC
int H264Encoder::Prepare(const MediaDescription& desc) {
    if ( desc.isVideo == false)
        return -1;
    
    int wid = desc.width;
    int hei = desc.height;

    // 0. building a default encoder parameters. 
    x264_param_default_preset(&x264_opt_, "ultrafast", "zerolatency");
    x264_opt_.rc.i_rc_method = X264_RC_CRF;
    x264_opt_.rc.i_bitrate = 512;
    x264_opt_.i_nal_hrd = X264_NAL_HRD_CBR; 
    //x264_param_default(&opt);
    
    // 1. Setting the fields of parameter struct
    x264_opt_.i_width = wid;
    x264_opt_.i_height = hei;
    //opt.i_slice_count = 5;
    //opt.b_intra_refresh = 1;
    
    // 3. Prepare the output buffer and target file
    x264_picture_alloc(&x264_picin_[0], X264_CSP_NV12, x264_opt_.i_width, x264_opt_.i_height);
    x264_picture_alloc(&x264_picin_[1], X264_CSP_NV12, x264_opt_.i_width, x264_opt_.i_height);
    x264_picture_alloc(&x264_picout_, X264_CSP_NV12, x264_opt_.i_width, x264_opt_.i_height);
    ppIndex = -1;

    // 4. Building the encoder handler
    x264_hdl_ = x264_encoder_open(&x264_opt_);
    x264_encoder_parameters(x264_hdl_, &x264_opt_);

    return 0;
}
コード例 #3
0
ファイル: myx264.cpp プロジェクト: my12doom/personalProjects
HRESULT x264::init(int width, int height, int bitrate)
{
	this->width = width;
	this->height = height;

	x264_param_t param;
	x264_param_default_preset(&param, "medium", "zerolatency");
// 	x264_param_apply_profile(&param, "baseline");
	param.i_frame_reference = 1;
	param.i_width = width;
	param.i_height = height;
	param.i_fps_num = 24000;
	param.i_fps_den = 1001;
	param.i_csp = X264_CSP_I420;

	param.i_keyint_max = 25;
	//param.b_intra_refresh = 1;
	param.b_cabac = 1;
	param.b_annexb = 1;
	param.rc.i_rc_method = X264_RC_ABR;
	param.rc.i_bitrate = bitrate;

	encoder = x264_encoder_open(&param);

	// init picture
	x264_picture_alloc(&pic_in, X264_CSP_I420, width, height);

	last_encode_time = timeGetTime();

	return S_OK;
}
コード例 #4
0
int H264EncWrapper::Initialize(int iWidth, int iHeight, int iRateBit, int iFps)
{
    m_param.i_width = iWidth;
    m_param.i_height = iHeight;
    
    m_param.i_fps_num = iFps;
    m_param.i_fps_den = 1;
    
    m_param.rc.i_bitrate = iRateBit;
    m_param.rc.i_rc_method = X264_RC_ABR;

    m_param.i_frame_reference = 4; /* 参考帧的最大帧数 */
    //m_param.i_keyint_max = 8;
    //m_param.i_keyint_min = 4;

    /* 根据输入参数param初始化总结构 x264_t *h     */
    if( ( m_h = x264_encoder_open( &m_param ) ) == NULL )
    {
        fprintf( stderr, "x264 [error]: x264_encoder_open failed\n" );
        return -1;
    }

    x264_picture_alloc( &m_pic, X264_CSP_I420, m_param.i_width, m_param.i_height );
    m_pic.i_type = X264_TYPE_AUTO;
    m_pic.i_qpplus1 = 0;
    
    return 0;
}
コード例 #5
0
/* Begin encode
 * file located at:
 *
 *   src/com/livecamera/encoder/h264encoder.java
 */
jlong
Java_com_livecamera_encoder_h264encoder_CompressBegin( JNIEnv* env, jobject thiz,
		jint width, jint height)
{
	Encoder *en = (Encoder*) malloc(sizeof(Encoder));
	en->param = (x264_param_t*) malloc(sizeof(x264_param_t));
	en->picture = (x264_picture_t*) malloc(sizeof(x264_picture_t));
	x264_param_default(en->param);                          //set default param
	en->param->i_log_level = X264_LOG_NONE;
	en->param->i_width = width;
	en->param->i_height = height;
	en->param->rc.i_lookahead = 0;
	en->param->i_bframe = 0;
	en->param->i_fps_num = 5;
	en->param->i_fps_den = 1;
	if ((en->handle = x264_encoder_open(en->param)) == 0) {
		return 0;
	}

	//create a new pic
	x264_picture_alloc(en->picture, X264_CSP_I420,
			en->param->i_width, en->param->i_height);

	return (jlong)en;
}
コード例 #6
0
ファイル: x264lib.c プロジェクト: svn2github/Xpra
int compress_image(struct x264lib_ctx *ctx, const uint8_t *in, int stride, uint8_t **out, int *outsz)
{
	if (!ctx->encoder || !ctx->rgb2yuv)
		return 1;

	x264_picture_t pic_in, pic_out;
	x264_picture_alloc(&pic_in, X264_CSP_I420, ctx->width, ctx->height);

	/* Colorspace conversion (RGB -> I420) */
	sws_scale(ctx->rgb2yuv, &in, &stride, 0, ctx->height, pic_in.img.plane, pic_in.img.i_stride);

	/* Encoding */
	pic_in.i_pts = 1;

	x264_nal_t* nals;
	int i_nals;
	int frame_size = x264_encoder_encode(ctx->encoder, &nals, &i_nals, &pic_in, &pic_out);
	if (frame_size >= 0) {
		/* Do not free that! */
		*out = nals[0].p_payload;
		*outsz = frame_size;
	} else {
		fprintf(stderr, "Problem\n");
		x264_picture_clean(&pic_in);
		return 2;
	}
  
	x264_picture_clean(&pic_in);
	return 0;
}
コード例 #7
0
ファイル: main.c プロジェクト: unixpickle/Processing-H264
int encode_context_frame(EncodeContext context, const char * frameData, char ** nalData, int * dataSize) {
    x264_picture_t pictureOut, pictureIn;
    x264_picture_alloc(&pictureIn, X264_CSP_I420, context.width, context.height);
    int rgbStride = context.width * 3;
    sws_scale(context.converter, (const uint8_t **)&frameData, &rgbStride, 0, context.height,
              pictureIn.img.plane, pictureIn.img.i_stride);
    
    x264_nal_t * nals;
    int i_nals;
    if (DEBUG_ENABLED) printf("encode_context_frame: passing data to x264\n");
    int frameSize = x264_encoder_encode(context.encoder, &nals, &i_nals, &pictureIn, &pictureOut); // TODO: figure out if picture_out can be NULL
    x264_picture_clean(&pictureIn);
    if (frameSize <= 0) return -1;
    
    if (DEBUG_ENABLED) printf("encode_context_frame: joining the frames\n");
    
    int totalSize = 0;
    for (int i = 0; i < i_nals; i++) {
        totalSize += nals[i].i_payload;
    }
    
    char * returnData = (char *)malloc(totalSize);
    int offset = 0;
    for (int i = 0; i < i_nals; i++) {
        memcpy(&returnData[offset], nals[i].p_payload, nals[i].i_payload);
        offset += nals[i].i_payload;
    }
    
    *nalData = returnData;
    *dataSize = totalSize;
    
    return 0;
}
コード例 #8
0
ファイル: capenc.c プロジェクト: ricann/videoctd
void encode_alloc()
{
	x264_encode.para = (x264_param_t *)Malloc(sizeof(x264_param_t));
	memset(x264_encode.para, 0, sizeof(x264_param_t));

	x264_encode.pic = (x264_picture_t *)Malloc(sizeof(x264_picture_t ));
	memset(x264_encode.pic, 0, sizeof(x264_picture_t ));

	x264_encode.nal = (x264_nal_t *)Malloc(sizeof(x264_nal_t ));
	memset(x264_encode.nal, 0, sizeof(x264_nal_t ));

	// alloc data for a picture. You must call x264_picture_clean on it
	// ricann todo
	if(capg.pixelfmt == V4L2_PIX_FMT_YUYV)
		x264_encode.colorspace = X264_CSP_I422;
	else if(capg.pixelfmt == V4L2_PIX_FMT_YUV420)
		x264_encode.colorspace = X264_CSP_I420;
	else
		CAP_DBG_EXIT("pixelfmt invalid\n");

	if(x264_picture_alloc(x264_encode.pic, x264_encode.colorspace,
			capg.width, capg.height) < 0)
		CAP_DBG_EXIT("x264_picture_alloc error\n");

}
コード例 #9
0
ファイル: video_encoder.c プロジェクト: repco/arcade_server
VideoEncoder* video_encoder_init(int width, int height, int fpsNum, int fpsDen, int maxWidth){
    printf("[arcade encoder init] %d x %d @ (%d / %d)\n",width,height,fpsNum,fpsDen);


    VideoEncoder *enc = (VideoEncoder*) malloc(sizeof(VideoEncoder));
    memset(enc,0,sizeof(VideoEncoder));
    //TODO: this needs to become a parameter
    enc->in_fmt =  AV_PIX_FMT_BGRA;

    enc->width = width;
    enc->height = height;
    enc->max_width = maxWidth;

    printf("[arcade encoder] capping width at %dpx\n",enc->max_width);
    float aspectRatio = (float) enc->width / (float) enc->height;
    enc->out_width = enc->width > enc->max_width ? enc->max_width : enc->width;
    enc->out_height = (int) ((float) enc->out_width / aspectRatio);

    printf("[arcade encoder init]\n\t%d x %d --> %d x %d\n\t@ (%d / %d)\n",
	   enc->width,enc->height,
	   enc->out_width,enc->out_height,
	   fpsNum,fpsDen);


    x264_param_t param;
    x264_param_default_preset(&param, "ultrafast", "zerolatency");
    param.i_threads = 1;
    param.i_width = enc->out_width;
    param.i_height = enc->out_height;
    param.i_fps_num = fpsNum;
    param.i_fps_den = fpsDen;
    // Intra refres:
    param.i_keyint_max = 30000;
    param.b_intra_refresh = 1;
    //Rate control:
    param.rc.i_rc_method = X264_RC_CRF;
    param.rc.f_rf_constant = 25;
    param.rc.f_rf_constant_max = 35;
    //For streaming:
    param.b_repeat_headers = 1;
    param.b_annexb = 1;
    x264_param_apply_profile(&param, "baseline");

    enc->encoder = x264_encoder_open(&param);
    x264_picture_alloc(&enc->pic_in, X264_CSP_I420, enc->out_width, enc->out_height);

    enc->output_buffer_size = (int)sizeof(uint8_t)*4096*1024;
    enc->output_buffer = malloc(enc->output_buffer_size);

    enc->sws = sws_getContext(enc->width, enc->height, enc->in_fmt,
			      enc->out_width, enc->out_height, AV_PIX_FMT_YUV420P,
			      SWS_FAST_BILINEAR, NULL, NULL, NULL);
    enc->num_frames = 0;

    return enc;

}
コード例 #10
0
ファイル: H264Encoder.cpp プロジェクト: Kristishka/libvision
void H264Encoder::Init(x264_param_t * param)
{
    m_encoder = x264_encoder_open(param);
    m_width = param->i_width;
    m_height = param->i_height;
    m_stride = m_width*4;
    x264_picture_alloc(&m_pic_in, X264_CSP_I420, m_width, m_height);
    m_convert_ctx = sws_getContext(m_width, m_height, PIX_FMT_RGBA, m_width, m_height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
}
コード例 #11
0
ファイル: x264Encoder.cpp プロジェクト: kwende/FFmpegTest1
void x264Encoder::Initilize()
{
    x264_param_default_preset(&parameters, "veryfast", "zerolatency");
    parameters.i_log_level = X264_LOG_INFO;
    parameters.i_threads = 1;
    parameters.i_width = 512 / 2;
    parameters.i_height = 424 / 2;
    parameters.i_fps_num = _fps;
    parameters.i_fps_den = 1;
    parameters.i_keyint_max = 15;
    parameters.b_intra_refresh = 1;
    parameters.rc.i_rc_method = X264_RC_CRF;
    parameters.rc.i_vbv_buffer_size = 1000000;
    parameters.rc.i_vbv_max_bitrate = 90000;
    parameters.rc.f_rf_constant = 25;
    parameters.rc.f_rf_constant_max = 35;
    parameters.i_sps_id = 7;
    // the following two value you should keep 1
    parameters.b_repeat_headers = 1;    // to get header before every I-Frame
    parameters.b_annexb = 1; // put start code in front of nal. we will remove start code later
    x264_param_apply_profile(&parameters, "baseline");

    encoder = x264_encoder_open(&parameters);

    x264_picture_alloc(&picture_in, X264_CSP_I420, 512 / 2, 424 / 2);
    picture_in.i_type = X264_TYPE_AUTO;
    picture_in.img.i_csp = X264_CSP_I420;

    x264_picture_alloc(&picture_out, X264_CSP_I420, parameters.i_width, parameters.i_height);
    picture_out.i_type = X264_TYPE_AUTO;
    picture_out.img.i_csp = X264_CSP_I420;

    // i have initilized my color space converter for BGR24 to YUV420 because my opencv 
    // video capture gives BGR24 image. You can initilize according to your input pixelFormat
    convertContext = sws_getContext(
        512 / 2,
        424 / 2,
        PIX_FMT_BGR24, 
        parameters.i_width, 
        parameters.i_height, 
        PIX_FMT_YUV420P, 
        SWS_FAST_BILINEAR, 
        NULL, NULL, NULL);
}
コード例 #12
0
ファイル: lavf.c プロジェクト: tufei/x264c64
static int picture_alloc( x264_picture_t *pic, int i_csp, int i_width, int i_height )
{
    if( x264_picture_alloc( pic, i_csp, i_width, i_height ) )
        return -1;
    lavf_pic_t *pic_h = pic->opaque = malloc( sizeof(lavf_pic_t) );
    if( !pic_h )
        return -1;
    avcodec_get_frame_defaults( &pic_h->frame );
    av_init_packet( &pic_h->packet );
    return 0;
}
コード例 #13
0
ファイル: x264lib.c プロジェクト: svn2github/Xpra
x264_picture_t *csc_image_rgb2yuv(struct x264lib_ctx *ctx, const uint8_t *in, int stride)
{
	if (!ctx->encoder || !ctx->rgb2yuv)
		return NULL;

	x264_picture_t *pic_in = malloc(sizeof(x264_picture_t));
	x264_picture_alloc(pic_in, ctx->colour_sampling, ctx->width, ctx->height);

	/* Colorspace conversion (RGB -> I4??) */
	sws_scale(ctx->rgb2yuv, &in, &stride, 0, ctx->height, pic_in->img.plane, pic_in->img.i_stride);
	return pic_in;
}
コード例 #14
0
ファイル: msx264.cpp プロジェクト: saoziyang/saozi.yang-study
void msx264::msx264_pic_init()
{
    pPicIn = new x264_picture_t;
    pPicOut = new x264_picture_t;

	x264_picture_init(pPicOut);

    x264_picture_alloc(pPicIn, X264_CSP_I420, params.i_width,
			   params.i_height);

	pPicIn->img.i_csp = X264_CSP_I420;
	pPicIn->img.i_plane = 3;
}
コード例 #15
0
void rtspStream::initH264Encoder(int width,int height,int fps,int bitRate)
{
	frame_num = 0; 
	pX264Handle   = NULL;
	pX264Param = new x264_param_t;
	assert(pX264Param);
	m_nFPS = 25;
	//* 配置参数
	//* 使用默认参数,在这里因为我的是实时网络传输,所以我使用了zerolatency的选项,使用这个选项之后就不会有delayed_frames,如果你使用的不是这样的话,还需要在编码完成之后得到缓存的编码帧
	x264_param_default_preset(pX264Param, "veryfast", "zerolatency");
	//* cpuFlags
	pX264Param->i_threads  = X264_SYNC_LOOKAHEAD_AUTO;//* 取空缓冲区继续使用不死锁的保证.
	//* 视频选项
	pX264Param->i_width   = width; //* 要编码的图像宽度.
	pX264Param->i_height  = height; //* 要编码的图像高度
	pX264Param->i_frame_total = 0; //* 编码总帧数.不知道用0.
	pX264Param->i_keyint_max = 10; 
	//* 流参数
	pX264Param->i_bframe  = 5;
	pX264Param->b_open_gop  = 0;
	pX264Param->i_bframe_pyramid = 0;
	pX264Param->i_bframe_adaptive = X264_B_ADAPT_TRELLIS;
	//* Log参数,不需要打印编码信息时直接注释掉就行
	// pX264Param->i_log_level  = X264_LOG_DEBUG;
	//* 速率控制参数
	pX264Param->rc.i_bitrate = bitRate;//* 码率(比特率,单位Kbps)
	//* muxing parameters
	pX264Param->i_fps_den  = 1; //* 帧率分母
	pX264Param->i_fps_num  = fps;//* 帧率分子
	pX264Param->i_timebase_den = pX264Param->i_fps_num;
	pX264Param->i_timebase_num = pX264Param->i_fps_den;
	//* 设置Profile.使用Baseline profile
	x264_param_apply_profile(pX264Param, x264_profile_names[0]);

	pNals = NULL;
	pPicIn = new x264_picture_t;
	pPicOut = new x264_picture_t;
	x264_picture_init(pPicOut);
	x264_picture_alloc(pPicIn, X264_CSP_I420, pX264Param->i_width, pX264Param->i_height);
	pPicIn->img.i_csp = X264_CSP_I420;
	pPicIn->img.i_plane = 3;
	//* 打开编码器句柄,通过x264_encoder_parameters得到设置给X264
	//* 的参数.通过x264_encoder_reconfig更新X264的参数
	pX264Handle = x264_encoder_open(pX264Param);
	assert(pX264Handle);

	pPicIn->img.plane[0] = PYUVBuf;
	pPicIn->img.plane[1] = PYUVBuf + width *height;
	pPicIn->img.plane[2] = PYUVBuf + width * height * 5 / 4;
	pPicIn->img.plane[3] = 0;
}
コード例 #16
0
ファイル: encoder.cpp プロジェクト: SummerLv/paperChapter4
void init_picture(size_t yuv_size)
{
	x264_picture_alloc(&pic_in, X264_CSP_I420, x264_width, x264_height);
	yuv_buffer=(unsigned char *)malloc(yuv_size);


	pic_in.img.plane[0]=yuv_buffer;// Y--component
	pic_in.img.plane[1]=pic_in.img.plane[0]+x264_width*x264_height;//U--component
	pic_in.img.plane[2]=pic_in.img.plane[1]+x264_width*x264_height/4;//V--component

	printf("in init_picture function in  [encoder.cpp] \n");

	//return yuv_buffer;
}
コード例 #17
0
ファイル: yuyv2yuv420p.c プロジェクト: heartpursue/video_njue
//initialize the ctx structure
void init_ctx(struct camera *cam)
{
    ctx.width = cam->width;
    ctx.height = cam->height;
    ctx.sws = sws_getContext(ctx.width, ctx.height, PIX_FMT_YUYV422, ctx.width, ctx.height, PIX_FMT_YUV420P,
                            SWS_FAST_BILINEAR, NULL, NULL, NULL);
    ctx.rows = ctx.height;
    ctx.bytesperrow = cam->bytesperrow;

    //test
//    avpicture_alloc(&ctx.pic_src, PIX_FMT_YUYV422, ctx.width, ctx.height);
    x264_picture_alloc(&ctx.pic_xsrc, X264_CSP_I422, ctx.width, ctx.height);

    printf("ctx width is %d\n", ctx.width);
    printf("ctx heigth is %d\n", ctx.height);
    printf("ctx bytesperline is %d\n", ctx.bytesperrow);
    printf("ctx rows is %d\n", ctx.rows);
}
コード例 #18
0
ファイル: h264Encoder.c プロジェクト: iam6000/SkyTXIOT
bool init_x264_encoder(Encoder * enc,int width,int height)
{
	enc->param = (x264_param_t*)malloc(sizeof(x264_param_t));
	enc->picture = (x264_picture_t*)malloc(sizeof(x264_picture_t));	
	enc->picture->i_pts = 0 ; 
	// set default  param  	
	// todo improvements later  
	x264_param_default(enc->param); 
	// set width and height 
	enc->param->i_width = width;
	enc->param->i_height = height ;

	enc->param->rc.i_lookahead = 0; 

	// set fps  
	enc->param->i_fps_num = 10 ; 
	enc->param->i_fps_den = 1 ;

	// set baseline  
	x264_param_apply_profile(enc->param, x264_profile_names[0]);

	 // open encoder 
	if( (enc->handle = x264_encoder_open(enc->param)) == 0 )
	{
	 	SKY_LOG(1,(TAG_H264ENCODER,"Could not Open x264_encoder"));
	 	// will free when encoder close  or now 
	 	//free(enc->param); 
	 	//free(enc->picture);
	 	return false ;
	}
	 
	 // create a new picture   malloc enc->picture here  X264_CSP_I422   X264_CSP_YV16  X264_CSP_NV16
	//x264_picture_alloc(enc->picture,X264_CSP_YV12,enc->param->i_width,enc->param->i_height);
	//enc->picture->img.i_csp = X264_CSP_YV12 ; 	
	//x264_picture_alloc(enc->picture,X264_CSP_NV12,enc->param->i_width,enc->param->i_height);	
	//enc->picture->img.i_csp = X264_CSP_NV12 ; 
	x264_picture_alloc(enc->picture,X264_CSP_YV12,enc->param->i_width,enc->param->i_height);	
	enc->picture->img.i_csp = X264_CSP_YV12 ; 
	enc->picture->img.i_plane = 3 ;   
	
	return true ; 
	 
}
コード例 #19
0
ファイル: x264_encoder.c プロジェクト: c1111f1/msc-p1
void encode_init(Encoder *encoder, int img_width, int img_height)
{
//Set default x264 parameters
	encoder->param = (x264_param_t *) malloc(sizeof(x264_param_t));
	encoder->picture = (x264_picture_t *) malloc(sizeof(x264_picture_t));
	x264_param_default(encoder->param);

encoder->param->i_width = img_width; //set frame width
encoder->param->i_height = img_height; //set frame height
encoder->param->rc.i_lookahead = 0; //表示i帧向前缓冲区
encoder->param->i_fps_num = 25; //帧率分子
encoder->param->i_fps_den = 1; //帧率分母
encoder->param->rc.i_lookahead = 0;
encoder->param->i_sync_lookahead = 0;
encoder->param->i_bframe = 0;
encoder->param->b_sliced_threads = 1;
encoder->param->b_vfr_input = 0;
encoder->param->rc.b_mb_tree = 0;

x264_param_apply_profile(encoder->param, x264_profile_names[0]);

encoder->handle = x264_encoder_open(encoder->param);

if (encoder->handle == 0) 
{
	return;
}
/* Create a new pic */

//encoder->picture->param->i_width = img_width;
//encoder->picture->param->i_height = img_height;

x264_picture_alloc(encoder->picture, X264_CSP_I420, 
	encoder->param->i_width,encoder->param->i_height);

encoder->picture->img.i_csp = X264_CSP_I420;
encoder->picture->img.i_plane = 3;

g_H264_Buf = (uint8_t *) malloc(
sizeof(uint8_t) * g_ImgWidth * g_ImgHeight * 3); // 设置缓冲区

}
コード例 #20
0
_declspec(dllexport) int __cdecl AllocEncoder(struct TranscoderContext* ctx) {
	struct TranscoderOptions* options = ctx->options;

	if (x264_picture_alloc(&pic_in, X264_CSP_I420, options->OutputWidth, options->OutputHeight) < 0) {
		return 1;
	}

	fprintf(stdout, "Width: %d\n", options->InputWidth);
	fprintf(stdout, "Height: %d\n", options->InputHeight);

	NALBYTES = malloc(options->OutputWidth * options->OutputHeight * 4);

	x264_param_t param;
	x264_param_default_preset(&param, "ultrafast", "zerolatency");
	param.i_threads = 0;
	param.i_width = options->OutputWidth;
	param.i_height = options->OutputHeight;
	param.rc.i_rc_method = X264_RC_CRF;

	//param.rc.i_bitrate = 3000;
	//param.rc.i_vbv_max_bitrate = 4000;
	param.rc.f_rf_constant = 20;
	fprintf(stdout, "rf_constant: %f\n", param.rc.f_rf_constant);
	fprintf(stdout, "rf_constant_max: %f\n", param.rc.f_rf_constant_max);
	//fprintf(stdout, "rf_constant_max: %f\n", param.rc.rf);

	//param.i_keyint_max = 25;
	param.i_keyint_max = 500;

	//param.b_intra_refresh = 1;
	param.b_repeat_headers = 1;
	param.b_annexb = 1;
	//param.i_log_level = -1;

	x264_t* encoder = x264_encoder_open(&param);
	ctx->encoder = encoder;

	convertCtx = sws_getContext(options->InputWidth, options->InputHeight, AV_PIX_FMT_RGBA, options->OutputWidth, options->OutputHeight, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);

	return 0;
}
コード例 #21
0
ファイル: export.cpp プロジェクト: ohgodhowdidthis/trance
H264Exporter::H264Exporter(const exporter_settings& settings)
: _success{false}
, _settings(settings)
, _file{settings.path, std::ios::binary}
, _frame{0}
, _encoder{nullptr}
{
  x264_param_t param;
  // Best quality (0) -> "veryslow" (8); worst quality (4) -> "ultrafast" (0).
  auto quality = std::to_string(2 * (4 - settings.quality));
  if (x264_param_default_preset(&param, quality.c_str(), "film") < 0) {
    std::cerr << "couldn't get default preset" << std::endl;
    return;
  }
  param.i_threads = settings.threads > 1 ? settings.threads - 1 : 1;
  param.i_lookahead_threads = settings.threads > 1 ? 1 : 0;

  param.i_width = settings.width;
  param.i_height = settings.height;
  param.i_fps_num = settings.fps;
  param.i_fps_den = 1;
  param.i_frame_total = settings.fps * settings.length;
  param.i_keyint_min = 0;
  param.i_keyint_max = settings.fps;
  if (x264_param_apply_profile(&param, "high") < 0) {
    std::cerr << "couldn't get apply profile" << std::endl;
    return;
  }

  _encoder = x264_encoder_open(&param);
  if (!_encoder) {
    std::cerr << "couldn't create encoder" << std::endl;
    return;
  }
  if (x264_picture_alloc(&_pic, X264_CSP_I420, _settings.width, _settings.height) < 0) {
    std::cerr << "couldn't allocate picture" << std::endl;
    return;
  }
  _success = true;
}
コード例 #22
0
ImageManagerComplex::ImageManagerComplex(int Width, int Height) : width(Width), height(Height), size(width *height * 4)
{


   //id = shmget(IPC_PRIVATE , width * height * 4, IPC_CREAT | 0777);
   //pointerToBig = (char*) shmat(id,0,0);
   imageFd = eventfd(numOfImages, EFD_SEMAPHORE);
   imageUsed = numOfImages;

   for (int i = 0; i < numOfImages; i++)
   {
      StoredImage storage;

      storage.image = std::shared_ptr<ImageType>(new ImageType(),killSharedMemory);
      storage.image->shmid = shmget(IPC_PRIVATE , width * height * 4, IPC_CREAT | 0777);
      storage.image->shmaddr = (unsigned char *) shmat(storage.image->shmid,0,0);
      storage.isUsed = false;

      storedImages.push_back(storage);
   }

   convertedImageFd = eventfd(numOfConvertedImages, EFD_SEMAPHORE);
   convertedImageUsed = numOfConvertedImages;

   for (int i = 0; i < numOfConvertedImages; i++)
   {
      StoredConvertedImage storage;


      std::shared_ptr<ConvertedImage> pic = std::shared_ptr<ConvertedImage>(new ConvertedImage(), killPicture);
      x264_picture_alloc(pic.get(),X264_CSP_I420, width, height);

      storage.image= pic;
      storage.isUsed = false;

      storedConvertedImages.push_back(storage);
   }

}
コード例 #23
0
ファイル: H264Android.c プロジェクト: chenxiaofa/Time-lapse
jlong Java_h264_com_H264Encoder_CompressBegin(JNIEnv* env, jobject thiz,
		jint width, jint height) {
	Encoder * en = (Encoder *) malloc(sizeof(Encoder));
	en->param = (x264_param_t *) malloc(sizeof(x264_param_t));
	en->picture = (x264_param_t *) malloc(sizeof(x264_picture_t));
	x264_param_default(en->param); //set default param
	x264_param_apply_profile(en->param,"baseline");
	//en->param->rc.i_rc_method = X264_RC_CQP;
	en->param->i_log_level = X264_LOG_NONE;
	en->param->i_width = width; //set frame width
	en->param->i_height = height; //set frame height
	en->param->rc.i_lookahead =0;

	en->param->i_fps_num =5;
	en->param->i_fps_den = 1;
	if ((en->handle = x264_encoder_open(en->param)) == 0) {
		return 0;
	}
	/* Create a new pic */
	x264_picture_alloc(en->picture, X264_CSP_I420, en->param->i_width,
			en->param->i_height);
	return (jlong) en;
}
コード例 #24
0
ファイル: yuv2h264.c プロジェクト: Chely400/x264-example
int main(int argc, char* argv[]){
    x264_param_t param;
    x264_t *h = NULL;
    x264_picture_t pic_in;
    x264_picture_t pic_out;
    x264_nal_t *nal;
    uint8_t *data = NULL;
    int widthXheight = width * height;
    int frame_size = width * height * 1.5;
    int read_sum = 0, write_sum = 0;
    int frames = 0;
    int i, rnum, i_size;
    x264_nal_t* pNals = NULL;

    x264_param_default(&param);
    param.i_width = width;
    param.i_height = height;
    param.i_bframe = 3;
    param.i_fps_num = 25;
    param.i_fps_den = 1;
    param.b_vfr_input = 0;
    param.i_keyint_max = 250;
    param.rc.i_bitrate = 1500;
    param.i_scenecut_threshold = 40;
    param.i_level_idc = 51;

    x264_param_apply_profile(&param, "high");

    h = x264_encoder_open( &param );

//    printf("param.rc.i_qp_min=%d, param.rc.i_qp_max=%d, param.rc.i_qp_step=%d param.rc.i_qp_constant=%d param.rc.i_rc_method=%d\n",
//            param.rc.i_qp_min, param.rc.i_qp_max, param.rc.i_qp_step, param.rc.i_qp_constant, param.rc.i_rc_method);
    printf("param:%s\n", x264_param2string(&param, 1));


    x264_picture_init( &pic_in );
    x264_picture_alloc(&pic_in, X264_CSP_YV12, width, height);
    pic_in.img.i_csp = X264_CSP_YV12;
    pic_in.img.i_plane = 3;

    data = (uint8_t*)malloc(0x400000);

    FILE* fpr = fopen(MFILE ".yuv", "rb");
    FILE* fpw1 = fopen(MFILE".szhu.h264", "wb");
//    FILE* fpw2 = fopen(MFILE".h264", "wb");

    if(!fpr || !fpw1 ) {
        printf("file open failed\n");
        return -1;
    }

    while(!feof(fpr)){
        rnum = fread(data, 1, frame_size, fpr);
        if(rnum != frame_size){
            printf("read file failed\n");
            break;
        }
        memcpy(pic_in.img.plane[0], data, widthXheight);
        memcpy(pic_in.img.plane[1], data + widthXheight, widthXheight >> 2);
        memcpy(pic_in.img.plane[2], data + widthXheight + (widthXheight >> 2), widthXheight >> 2);
        read_sum += rnum;
        frames ++;
//        printf("read frames=%d %.2fMB write:%.2fMB\n", frames, read_sum * 1.0 / 0x100000, write_sum * 1.0 / 0x100000);
        int i_nal;
        int i_frame_size = 0;

        if(0 && frames % 12 == 0){
            pic_in.i_type = X264_TYPE_I;
        }else{
            pic_in.i_type = X264_TYPE_AUTO;
        }
        i_frame_size = x264_encoder_encode( h, &nal, &i_nal, &pic_in, &pic_out );

        if(i_frame_size <= 0){
            //printf("\t!!!FAILED encode frame \n");
        }else{
            fwrite(nal[0].p_payload, 1, i_frame_size, fpw1);
//            printf("\t+++i_frame_size=%d\n", i_frame_size);
            write_sum += i_frame_size;
        }
#if 0
        for(i = 0; i < i_nal; i ++){
            i_size = nal[i].i_payload;
//            fwrite(nal[i].p_payload, 1, nal[i].i_payload, fpw1);
            fwrite(nal[i].p_payload, 1, i_frame_size, fpw1);
            x264_nal_encode(h, data, &nal[i]);
            if(i_size != nal[i].i_payload){
                printf("\t\ti_size=%d nal[i].i_payload=%d\n", i_size, nal[i].i_payload);
            }
            fwrite(data, 1, nal[i].i_payload, fpw2);
        }
#endif
    }

    free(data);
    x264_picture_clean(&pic_in);
    x264_picture_clean(&pic_out);
    if(h){
        x264_encoder_close(h);
        h = NULL;
    }
    fclose(fpw1);
//    fclose(fpw2);
    fclose(fpr);
    printf("h=0x%X", h);
    return 0;
}
コード例 #25
0
void EncoderVideoSource::H264_doGetNextFrame()
{
#ifdef SDKH264 
	int size = (fWidth*fHeight *3/2);
	int videoType;

	Debug(ckite_log_message, "EncoderVideoSource::H264_doGetNextFrame ENTRY\n");
	Debug(ckite_log_message, "fMaxSize = %d\n", fMaxSize);
	if (fp == NULL)
	{
		Debug(ckite_log_message, "video fp is NULL\n");
		return;
	}
	// handle per of nal
	for(int i = 0; i < 4; i++)
	{
		if(more_nal[i] != NULL)
		{
			Debug(ckite_log_message, "more_nal address %p\n", more_nal[i]);
			Debug(ckite_log_message, "more_nal len  %d\n", more_nal_len[i]);
			memcpy(fTo, more_nal[i], more_nal_len[i]);
			fFrameSize = more_nal_len[i];
			if(more_nal[i] != NULL)
			{
				delete [] more_nal[i];
				more_nal[i] = NULL;
				more_nal_len[i] = 0;
			}
			fPictureEndMarker = True;
			afterGetting(this);
			return ;
		}
	}
	computePresentationTime();
	if (strcmp(mediaType, "store") == 0)	
	{
		if (fWidth == 720 && fHeight == 576)
		{
			videoType = getLivehdFrame();
		}
		else
		{
//			getFileVideoFrame( fp, 0, (unsigned char *)fBuffer, &size, &videoType, false);
		}
	}
	else
	{
		if (fWidth == 720 && fHeight == 576)
		{
			videoType = getLivehdFrame();
		}
		else
		{
			videoGetFrameInfo(fChannel, fp, mediaType, fBuffer, &size, &videoType);
		}
	}
	if(size <= 0) return ;
	if (videoType == VIDEO_MPEG4 || videoType == VIDEO_H264)
	{
		fFrameSize = size;
	}
	else if (videoType == VIDEO_RAW)
	{
		if( x264_picture_alloc(&m_pic, m_param.i_csp, m_param.i_width, m_param.i_height) < 0)
		{
			Debug(ckite_log_message, "x264_picture_alloc is failed \n");
			return;
		}
		memcpy(m_pic.img.plane[0], fBuffer, m_param.i_width * m_param.i_height);
		memcpy(m_pic.img.plane[1], fBuffer + m_param.i_width * m_param.i_height, m_param.i_width * m_param.i_height / 4);
		memcpy(m_pic.img.plane[2], fBuffer + m_param.i_width * m_param.i_height * 5 / 4, m_param.i_width * m_param.i_height / 4);   

		static x264_picture_t pic_out;
		x264_nal_t *nal = NULL;
		int i_nal, i;

		if(x264_handle != NULL)
		{
			if( x264_encoder_encode( x264_handle, &nal, &i_nal, &m_pic, &pic_out ) < 0 )
			{
				return;
			}
		}
		int offset = 0;
		static int t = 0;
		FILE *fout;

		//unsigned char nal_type;
		Debug(ckite_log_message, "i_nal = %d\n", i_nal);
		for ( i = 0; i < i_nal; i++ )
		{
			if (t < 4)
			{
				char name[100] = {0};
				t++;
				snprintf(name, sizeof name, "nal%d.dat", t);
				fout = fopen(name, "wb+");
				size = fwrite(nal[i].p_payload,1,nal[i].i_payload,fout);
				fclose(fout);
				Debug(ckite_log_message, "size = %d\n",size);

			}
			if(nal[i].p_payload[2] == 1)
			{
				offset = 3;
				//nal_type = nal[i].p_payload[3];
			}
			else if (nal[i].p_payload[3] == 1)
			{
				offset = 4;
				//nal_type = nal[i].p_payload[4];
			}
			if(i >= 1)
			{
				if(more_nal[i-1] == NULL)
				{
					more_nal_len[i-1] = nal[i].i_payload - offset;
					more_nal[i-1] = new char [more_nal_len[i-1] + 1];
					if (more_nal[i-1] != NULL)
					{
						memset(more_nal[i-1], 0x0, nal[i].i_payload - offset + 1);
						memcpy(more_nal[i-1], nal[i].p_payload + offset, nal[i].i_payload - offset);
						//Debug(ckite_log_message, "new sucess more_nal[%d], nal size %d\n", i-1, more_nal_len[i-1]);
					}
					else
					{
						Debug(ckite_log_message, "new failed with %d nal\n", i);
					}
				}
			}
			else 
			{
				memcpy(fTo, nal[i].p_payload + offset, nal[i].i_payload - offset);
				fFrameSize = nal[i].i_payload - offset;
			}
		}
	}
	//Debug(ckite_log_message, "Deliver nal type %d with %d bytes.\n", nal_type, fFrameSize);
	fPictureEndMarker = True;
	afterGetting(this);
	x264_picture_clean(&m_pic);
#endif

}
コード例 #26
0
ファイル: qth264.c プロジェクト: Cuchulain/cinelerra
static int encode(quicktime_t *file, unsigned char **row_pointers, int track)
{
	int64_t offset = quicktime_position(file);
	quicktime_video_map_t *vtrack = &(file->vtracks[track]);
	quicktime_h264_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
	quicktime_trak_t *trak = vtrack->track;
	int width = quicktime_video_width(file, track);
	int height = quicktime_video_height(file, track);
	int w_2 = quicktime_quantize2(width);
// ffmpeg interprets the codec height as the presentation height
	int h_2 = quicktime_quantize2(height);
	int i;
	int result = 0;
	int bytes = 0;
	int is_keyframe = 0;
	int current_field = vtrack->current_position % codec->total_fields;
	quicktime_atom_t chunk_atom;
	unsigned char header[1024];
	int header_size = 0;
	int got_pps = 0;
	int got_sps = 0;
	quicktime_avcc_t *avcc = &trak->mdia.minf.stbl.stsd.table[0].avcc;






	pthread_mutex_lock(&h264_lock);

	if(!codec->encode_initialized[current_field])
	{
		codec->encode_initialized[current_field] = 1;
		codec->param.i_width = w_2;
		codec->param.i_height = h_2;
		codec->param.i_fps_num = quicktime_frame_rate_n(file, track);
		codec->param.i_fps_den = quicktime_frame_rate_d(file, track);

#if X264_BUILD >= 48
		codec->param.rc.i_rc_method = X264_RC_CQP;
#endif
// Reset quantizer if fixed bitrate
		x264_param_t default_params;
		x264_param_default(&default_params);
#if X264_BUILD < 48
		if(codec->param.rc.b_cbr)
#else
		if(codec->param.rc.i_qp_constant)
#endif
		{
			codec->param.rc.i_qp_constant = default_params.rc.i_qp_constant;
			codec->param.rc.i_qp_min = default_params.rc.i_qp_min;
			codec->param.rc.i_qp_max = default_params.rc.i_qp_max;
		}


		if(file->cpus > 1)
		{
			codec->param.i_threads = file->cpus;
		}

		codec->encoder[current_field] = x264_encoder_open(&codec->param);
		codec->pic[current_field] = calloc(1, sizeof(x264_picture_t));
//printf("encode 1 %d %d\n", codec->param.i_width, codec->param.i_height);
  		x264_picture_alloc(codec->pic[current_field],
			X264_CSP_I420,
			codec->param.i_width,
			codec->param.i_height);
	}






	codec->pic[current_field]->i_type = X264_TYPE_AUTO;
	codec->pic[current_field]->i_qpplus1 = 0;


	if(codec->header_only)
	{
		bzero(codec->pic[current_field]->img.plane[0], w_2 * h_2);
		bzero(codec->pic[current_field]->img.plane[1], w_2 * h_2 / 4);
		bzero(codec->pic[current_field]->img.plane[2], w_2 * h_2 / 4);
	}
	else
	if(file->color_model == BC_YUV420P)
	{
		memcpy(codec->pic[current_field]->img.plane[0], row_pointers[0], w_2 * h_2);
		memcpy(codec->pic[current_field]->img.plane[1], row_pointers[1], w_2 * h_2 / 4);
		memcpy(codec->pic[current_field]->img.plane[2], row_pointers[2], w_2 * h_2 / 4);
	}
	else
	{
//printf("encode 2 %p %p %p\n", codec->pic[current_field]->img.plane[0], codec->pic[current_field]->img.plane[1], codec->pic[current_field]->img.plane[2]);
		cmodel_transfer(0, /* Leave NULL if non existent */
			row_pointers,
			codec->pic[current_field]->img.plane[0], /* Leave NULL if non existent */
			codec->pic[current_field]->img.plane[1],
			codec->pic[current_field]->img.plane[2],
			row_pointers[0], /* Leave NULL if non existent */
			row_pointers[1],
			row_pointers[2],
			0,        /* Dimensions to capture from input frame */
			0,
			width,
			height,
			0,       /* Dimensions to project on output frame */
			0,
			width,
			height,
			file->color_model,
			BC_YUV420P,
			0,         /* When transfering BC_RGBA8888 to non-alpha this is the background color in 0xRRGGBB hex */
			width,       /* For planar use the luma rowspan */
			codec->pic[current_field]->img.i_stride[0]);

	}












    x264_picture_t pic_out;
    x264_nal_t *nals;
	int nnal = 0;
	do
	{
		x264_encoder_encode(codec->encoder[current_field],
			&nals,
			&nnal,
			codec->pic[current_field],
			&pic_out);
//printf("encode %d nnal=%d\n", __LINE__, nnal);
	} while(codec->header_only && !nnal);
	int allocation = w_2 * h_2 * 3;
	if(!codec->work_buffer)
	{
		codec->work_buffer = calloc(1, allocation);
	}

	codec->buffer_size = 0;
//printf("encode %d nnal=%d\n", __LINE__, nnal);
	for(i = 0; i < nnal; i++)
	{
#if X264_BUILD >= 76
                int size = nals[i].i_payload;
                memcpy(codec->work_buffer + codec->buffer_size,
			nals[i].p_payload,
			nals[i].i_payload);
#else
		int size_return = 0;
		int size = x264_nal_encode(codec->work_buffer + codec->buffer_size,
			&size_return,
			1,
			nals + i);
#endif
		unsigned char *ptr = codec->work_buffer + codec->buffer_size;

//printf("encode %d size=%d\n", __LINE__, size);
		if(size > 0)
		{
			if(size + codec->buffer_size > allocation)
			{
				printf("qth264.c %d: overflow size=%d allocation=%d\n",
					__LINE__,
					size,
					allocation);
			}

// Size of NAL for avc
			uint64_t avc_size = size - 4;

// Synthesize header.
// Hopefully all the parameter set NAL's are present in the first frame.
			if(!avcc->data_size)
			{
				if(header_size < 6)
				{
					header[header_size++] = 0x01;
					header[header_size++] = 0x4d;
					header[header_size++] = 0x40;
					header[header_size++] = 0x1f;
					header[header_size++] = 0xff;
					header[header_size++] = 0xe1;
				}

				int nal_type = (ptr[4] & 0x1f);
// Picture parameter or sequence parameter set
				if(nal_type == 0x7 && !got_sps)
				{
					got_sps = 1;
					header[header_size++] = (avc_size & 0xff00) >> 8;
					header[header_size++] = (avc_size & 0xff);
					memcpy(&header[header_size],
						ptr + 4,
						avc_size);
					header_size += avc_size;
				}
				else
				if(nal_type == 0x8 && !got_pps)
				{
					got_pps = 1;
// Number of sps nal's.
					header[header_size++] = 0x1;
					header[header_size++] = (avc_size & 0xff00) >> 8;
					header[header_size++] = (avc_size & 0xff);
					memcpy(&header[header_size],
						ptr + 4,
						avc_size);
					header_size += avc_size;
				}

// Write header
				if(got_sps && got_pps)
				{
/*
 * printf("encode %d\n", __LINE__);
 * int j;
 * for(j = 0; j < header_size; j++)
 * {
 * printf("%02x ", header[j]);
 * }
 * printf("\n");
 */
					quicktime_set_avcc_header(avcc,
		  				header,
		  				header_size);
				}
			}
コード例 #27
0
jlong Java_com_H264_H264Encoder_CompressBegin(JNIEnv* env,jobject thiz,
												jint width,jint height,
												jint FrameRate,
												jbyteArray filename){
	en = (Encoder *) malloc(sizeof(Encoder));
	en->param = (x264_param_t *) malloc(sizeof(x264_param_t));
	en->picture = (x264_picture_t *) malloc(sizeof(x264_picture_t));

	opt = (cli_opt_t *)malloc(sizeof(cli_opt_t));


	//test
	nalcount=0;
	last_pts = 0;
	i_frame= 0;
	//test

    x264_nal_t *headers;
    int i_nal;

	jbyte * fname = (jbyte*)(*env)->GetByteArrayElements(env, filename, 0);
	mp4_output.open_file( fname, &opt->hout, &output_opt );




 	x264_param_default(en->param); // default param
	 en->param->i_log_level = X264_LOG_NONE;
	 en->param->i_width = width; // frame width
	 en->param->i_height = height; // frame height
	 en->param->rc.i_lookahead =0;
	 en->param->i_bframe=0;
	 en->param->i_fps_num =FrameRate;
	 en->param->i_fps_den = 1;
	 en->param->i_frame_reference=5;
	 en->param->i_bframe_adaptive=1;




 	 en->param->b_vfr_input=1;
 	 en->param->i_timebase_num = 1;
 	 en->param->i_timebase_den = FrameRate;



 	en->param->i_csp =X264_CSP_I420;
 	en->param->analyse.b_psnr = 1;
 	en->param->analyse.b_ssim = 1;


 	int frames = 0;
 	en->param->i_frame_total = 0;

///////

	// Intra refres:
 	en->param->i_keyint_max = 30;
 	en->param->b_intra_refresh = 1;
	//Rate control:
 	en->param->rc.f_rf_constant = 25;
 	en->param->rc.f_rf_constant_max = 35;
	//For streaming:
 	en->param->b_repeat_headers = 0;
 	en->param->b_annexb = 0;


 	///////

 	uv = en->param->i_width * en->param->i_height;

	 if ((en->handle = x264_encoder_open(en->param)) == 0) {
		 return 0;
	 }
	 x264_encoder_parameters( en->handle, en->param );
	 /* Create a new pic */
	 x264_picture_alloc(en->picture, X264_CSP_I420, en->param->i_width,
	 en->param->i_height);

	 mp4_output.set_param(opt->hout,en->param);

	 ticks_per_frame = (int64_t)en->param->i_timebase_den * en->param->i_fps_den / en->param->i_timebase_num / en->param->i_fps_num;
	 ticks_per_frame = X264_MAX( ticks_per_frame, 1 );

	 __android_log_print(ANDROID_LOG_INFO, "H264Encoder native", "ticks_per_frame:%d",ticks_per_frame);

	 if(x264_encoder_headers( en->handle, &headers, &i_nal)<0)
		 ;
	 __android_log_print(ANDROID_LOG_INFO, "H264Encoder native", "encoder header:%d",i_nal);
	 mp4_output.write_headers(opt->hout, headers);
	 (*env)->ReleaseByteArrayElements(env,filename,fname,0);
	 return (jlong) en; 
}
コード例 #28
0
ファイル: server.c プロジェクト: freesnail/encoder
int main(int argc, char** argv)
{
  int listenfd, connfd;
  int byterecv;
  int bytesum = 0;
  int ret = 0;
  struct sockaddr_in servaddr;
  char *buff;
  int width, height;
  x264_param_t param;
  x264_picture_t pic;
  x264_picture_t pic_out;
  x264_t *h;
  int i_frame = 0;
  int i_frame_size;
  x264_nal_t *nal;
  int i_nal;
  FILE *fout = NULL;

   /* Get default params for preset/tuning */
  if(x264_param_default_preset( &param, "medium", NULL ) < 0)
    goto fail;

  /* Configure non-default params */
  param.i_csp = X264_CSP_I420;
  param.i_width  = WIDTH;
  param.i_height = HEIGHT;
  param.b_vfr_input = 0; //frame rate
  param.b_repeat_headers = 1;
  param.b_annexb = 1;

  /* Apply profile restrictions. */
  if(x264_param_apply_profile(&param, "baseline") < 0)
    goto fail;

  if(x264_picture_alloc(&pic, param.i_csp, param.i_width, param.i_height) < 0)
    goto fail;

#undef fail
#define fail fail2

  h = x264_encoder_open(&param);
  if(!h)
    goto fail;
  printf("x264 encoder init successfully\n");

  int luma_size = width * height;
  int chroma_size = luma_size / 4;
  fout = fopen("test.264", "wb+");

#undef fail
#define fail fail3

  /* socket init*/
  if((listenfd = socket(AF_INET, SOCK_STREAM, 0)) == -1){
    printf("create socket error: %s(errno: %d)\n",strerror(errno),errno);
    exit(0);
  }

  memset(&servaddr, 0, sizeof(servaddr));
  servaddr.sin_family = AF_INET;
  servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
  servaddr.sin_port = htons(6666);

  if(bind(listenfd, (struct sockaddr*)&servaddr, sizeof(servaddr)) == -1){
    printf("bind socket error: %s(errno: %d)\n",strerror(errno),errno);
    exit(0);
  }

  if(listen(listenfd, 10) == -1){
    printf("listen socket error: %s(errno: %d)\n",strerror(errno),errno);
    exit(0);
  }

  printf("======waiting for client's request======\n");
  if((connfd = accept(listenfd, (struct sockaddr*)NULL, NULL)) == -1){
    printf("accept socket error: %s(errno: %d)",strerror(errno),errno);
    exit(0);
  }

  /* malloc 5k buffer for recv raw data from client*/
  buff = (char *)malloc(5 * 1024 * 1024 * sizeof(char));
  while(1) {
    byterecv = recv(connfd, buff + bytesum, 32 * 1024, 0);
    if (byterecv > 0) {
      bytesum += byterecv;
      /* got one frame here, encode it*/
      if (bytesum >= RESOLUTION * 1.5) {
        pic.img.plane[0] = buff;
        pic.img.plane[1] = buff + luma_size;
        pic.img.plane[2] = buff + luma_size + chroma_size;
        pic.i_pts = i_frame;
        i_frame ++;

        i_frame_size = x264_encoder_encode(h, &nal, &i_nal, &pic, &pic_out);
        if(i_frame_size < 0)
          goto fail;
        else if(i_frame_size) {
          if(!fwrite( nal->p_payload, i_frame_size, 1, fout))
            goto fail;
          printf("encode frame %d\n", i_frame);
        }
        bytesum = bytesum - RESOLUTION * 1.5;
        memcpy(buff, buff + (int)(RESOLUTION * 1.5), bytesum);
      }
    }
    else if((byterecv < 0) && (errno == EAGAIN || errno == EINTR)) {
      printf("coutinue\n");
      continue;
    }
    else {
      //printf("stop recv stream frome client, bytesum: %d, total: %d\n", bytesum, total);
      break;
    }
  }

  /* Flush delayed frames */
  while(x264_encoder_delayed_frames(h)) {
    i_frame_size = x264_encoder_encode(h, &nal, &i_nal, NULL, &pic_out);
    if(i_frame_size < 0)
      goto fail;
    else if(i_frame_size) {
      if(!fwrite( nal->p_payload, i_frame_size, 1, fout))
        goto fail;
    }
  }

  if (fout)
    fclose(fout);

  x264_encoder_close(h);
  x264_picture_clean(&pic);

  close(connfd);
  close(listenfd);
  return 0;

#undef fail
fail3:
  x264_encoder_close(h);
fail2:
  x264_picture_clean(&pic);
fail:
  return -1;
}
コード例 #29
0
ファイル: x264.c プロジェクト: clzhan/x264-vs2008
/*****************************************************************************
 * Encode:
 *****************************************************************************/
static int  Encode( x264_param_t *param, cli_opt_t *opt )
{
    x264_t *h;
    x264_picture_t pic;

    int     i_frame, i_frame_total;
    int64_t i_start, i_end;
    int64_t i_file;
    int     i_frame_size;
    int     i_progress;

    i_frame_total = p_get_frame_total( opt->hin );
    i_frame_total -= opt->i_seek;
    if( ( i_frame_total == 0 || param->i_frame_total < i_frame_total )
        && param->i_frame_total > 0 )
        i_frame_total = param->i_frame_total;
    param->i_frame_total = i_frame_total;

    if( ( h = x264_encoder_open( param ) ) == NULL )
    {
        fprintf( stderr, "x264_encoder_open failed\n" );
        p_close_infile( opt->hin );
        p_close_outfile( opt->hout );
        return -1;
    }

    if( p_set_outfile_param( opt->hout, param ) )
    {
        fprintf( stderr, "can't set outfile param\n" );
        p_close_infile( opt->hin );
        p_close_outfile( opt->hout );
        return -1;
    }

    /* Create a new pic */
    x264_picture_alloc( &pic, X264_CSP_I420, param->i_width, param->i_height );

    i_start = x264_mdate();

    /* Encode frames */
    for( i_frame = 0, i_file = 0, i_progress = 0;
         b_ctrl_c == 0 && (i_frame < i_frame_total || i_frame_total == 0); )
    {
        if( p_read_frame( &pic, opt->hin, i_frame + opt->i_seek ) )
            break;

        pic.i_pts = (int64_t)i_frame * param->i_fps_den;

        if( opt->qpfile )
            parse_qpfile( opt, &pic, i_frame + opt->i_seek );
        else
        {
            /* Do not force any parameters */
            pic.i_type = X264_TYPE_AUTO;
            pic.i_qpplus1 = 0;
        }

        i_file += Encode_frame( h, opt->hout, &pic );

        i_frame++;

        /* update status line (up to 1000 times per input file) */
        if( opt->b_progress 
			&& 
			param->i_log_level < X264_LOG_DEBUG 
			&& 
            ( i_frame_total ? i_frame * 1000 / i_frame_total > i_progress : i_frame % 10 == 0 ) )
        {
            int64_t i_elapsed = x264_mdate() - i_start;
            double fps = i_elapsed > 0 ? i_frame * 1000000. / i_elapsed : 0;
            
			if( i_frame_total )
            {
                int eta = i_elapsed * (i_frame_total - i_frame) / ((int64_t)i_frame * 1000000);
                i_progress = i_frame * 1000 / i_frame_total;
                fprintf( stderr, "encoded frames: %d/%d (%.1f%%), %.2f fps, eta %d:%02d:%02d  \r",
                         i_frame, i_frame_total, (float)i_progress / 10, fps,
                         eta/3600, (eta/60)%60, eta%60 );
            }
            else
                fprintf( stderr, "encoded frames: %d, %.2f fps   \r", i_frame, fps );
            
			fflush( stderr ); // needed in windows
        }
    }
    /* Flush delayed B-frames */
    do 
	{
        i_file += i_frame_size = Encode_frame( h, opt->hout, NULL );
    } while( i_frame_size );

    i_end = x264_mdate();
    x264_picture_clean( &pic );
    x264_encoder_close( h );
    fprintf( stderr, "\n" );

    if( b_ctrl_c )
        fprintf( stderr, "aborted at input frame %d\n", opt->i_seek + i_frame );

    p_close_infile( opt->hin );
    p_close_outfile( opt->hout );

    if( i_frame > 0 )
    {
        double fps = (double)i_frame * (double)1000000 /
                     (double)( i_end - i_start );

        fprintf( stderr, "encoded %d frames, %.2f fps, %.2f kb/s\n", i_frame, fps,
                 (double) i_file * 8 * param->i_fps_num /
                 ( (double) param->i_fps_den * i_frame * 1000 ) );
    }

    return 0;
}
コード例 #30
0
gpointer
x264_gtk_encode_encode (X264_Thread_Data *thread_data)
{
  GIOStatus       status;
  gsize           size;
  X264_Pipe_Data  pipe_data;
  x264_param_t   *param;
  x264_picture_t  pic;
  x264_t         *h;
  hnd_t           hin;
  hnd_t           hout;
  int             i_frame;
  int             i_frame_total;
  int64_t         i_start;
  int64_t         i_end;
  int64_t         i_file;
  int             i_frame_size;
  int             i_progress;
  int             err;

  g_print (_("encoding...\n"));
  param = thread_data->param;
  err = x264_set_drivers (thread_data->in_container, thread_data->out_container);
  if (err < 0) {
    GtkWidget *no_driver;
    no_driver = gtk_message_dialog_new (GTK_WINDOW(thread_data->dialog),
                                        GTK_DIALOG_DESTROY_WITH_PARENT,
                                        GTK_MESSAGE_ERROR,
                                        GTK_BUTTONS_CLOSE,
                                        (err == -2) ? _("Error: unknown output file type")
                                                    : _("Error: unknown input file type"));
    gtk_dialog_run (GTK_DIALOG (no_driver));
    gtk_widget_destroy (no_driver);
    return NULL;
  }

  if (p_open_infile (thread_data->file_input, &hin, param)) {
    fprintf( stderr, _("could not open input file '%s'\n"), thread_data->file_input );
    return NULL;
  }

  p_open_outfile ((char *)thread_data->file_output, &hout);

  i_frame_total = p_get_frame_total (hin );
  if (((i_frame_total == 0) || (param->i_frame_total < i_frame_total)) &&
      (param->i_frame_total > 0))
    i_frame_total = param->i_frame_total;
  param->i_frame_total = i_frame_total;

  if ((h = x264_encoder_open (param)) == NULL)
    {
      fprintf (stderr, _("x264_encoder_open failed\n"));
      p_close_infile (hin);
      p_close_outfile (hout);
      g_free (param);

      return NULL;
    }

  if (p_set_outfile_param (hout, param))
    {
      fprintf (stderr, _("can't set outfile param\n"));
      p_close_infile (hin);
      p_close_outfile (hout);
      g_free (param);

      return NULL;
    }

  /* Create a new pic */
  x264_picture_alloc (&pic, X264_CSP_I420, param->i_width, param->i_height );

  i_start = x264_mdate();

  /* Encode frames */
  for (i_frame = 0, i_file = 0, i_progress = 0;
       ((i_frame < i_frame_total) || (i_frame_total == 0)); )
    {
      if (p_read_frame (&pic, hin, i_frame))
        break;

      pic.i_pts = (int64_t)i_frame * param->i_fps_den;

      i_file += x264_encode_frame (h, hout, &pic);

      i_frame++;

      /* update status line (up to 1000 times per input file) */
      if (param->i_log_level < X264_LOG_DEBUG &&
          (i_frame_total ? i_frame * 1000 / i_frame_total > i_progress
           : i_frame % 10 == 0))
        {
          int64_t i_elapsed = x264_mdate () - i_start;

          if (i_frame_total)
            {
              pipe_data.frame = i_frame;
              pipe_data.frame_total = i_frame_total;
              pipe_data.file = i_file;
              pipe_data.elapsed = i_elapsed;
              status = g_io_channel_write_chars (thread_data->io_write,
                                                 (const gchar *)&pipe_data,
                                                 sizeof (X264_Pipe_Data),
                                                 &size, NULL);
              if (status != G_IO_STATUS_NORMAL) {
                g_print (_("Error ! %d %d %d\n"), status, (int)sizeof (X264_Pipe_Data), (int)size);
              }
              else {
                /* we force the GIOChannel to write to the pipeline */
                status = g_io_channel_flush (thread_data->io_write,
                                             NULL);
                if (status != G_IO_STATUS_NORMAL) {
                  g_print (_("Error ! %d\n"), status);
                }
              }
            }
        }
    }
  /* Flush delayed B-frames */
  do {
    i_file += i_frame_size = x264_encode_frame (h, hout, NULL);
  } while (i_frame_size);

  i_end = x264_mdate ();
  x264_picture_clean (&pic);
  x264_encoder_close (h);
  fprintf (stderr, "\n");

  p_close_infile (hin);
  p_close_outfile (hout);

  if (i_frame > 0) {
    double fps = (double)i_frame * (double)1000000 /
      (double)(i_end - i_start);

    fprintf (stderr, _("encoded %d frames, %.2f fps, %.2f kb/s\n"),
             i_frame, fps,
             (double) i_file * 8 * param->i_fps_num /
             ((double) param->i_fps_den * i_frame * 1000));
  }

  gtk_widget_set_sensitive (thread_data->end_button, TRUE);
  gtk_widget_hide (thread_data->button);
  return NULL;
}