コード例 #1
0
ファイル: capenc.c プロジェクト: ricann/videoctd
// ricann todo, support other pixel format
int encode_frame(uint8_t *in, mmap_node_t *node, int *keyframe)
{
	int i = 0;
	int n_nal = -1;
	int result = 0;
	uint8_t *p_out = node->buf;
	x264_picture_t pic_out;

	if(capg.pixelfmt == V4L2_PIX_FMT_YUYV)
		encode_yuyv(in);
	else if(capg.pixelfmt == V4L2_PIX_FMT_YUV420)
		encode_yuv420(in);
	else
		CAP_DBG_EXIT("pixelfmt not support\n");

	x264_encode.pic->i_pts++;
	if(x264_encoder_encode(x264_encode.handle, &x264_encode.nal,
			&n_nal, x264_encode.pic, &pic_out) < 0)
		CAP_DBG_EXIT("x264_encoder_encode error!\n");

	*keyframe = pic_out.i_type==X264_TYPE_IDR;

	for(i = 0; i < n_nal; i++ ) {
		memcpy(p_out, x264_encode.nal[i].p_payload, x264_encode.nal[i].i_payload);
		p_out += x264_encode.nal[i].i_payload;
		result += x264_encode.nal[i].i_payload;
	}

	node->length = result;

	// ricann debug
	CAP_DBG("frame len: %d, key frame: %d\n", result, *keyframe);

	return result;
}
コード例 #2
0
ファイル: encoder.c プロジェクト: suonikeyinsuxiao/trunk
void flushEncoder(ENCODER_S* pcEncoder)
{
	x264_picture_t picOut;
	int i = 0;
	int nRet = 0;

	while(1)
	{
		nRet = x264_encoder_encode(pcEncoder->m_psHandle, &pcEncoder->m_psNal, &(pcEncoder->m_nNal), NULL, &picOut);
		if (0 > nRet)
		{
			fprintf(stderr, "[%s, %d] flush encoder x264_encoder_encode failed:%s", __func__, __LINE__, strerror(errno));		
			unInitEncoder(pcEncoder);
			return ;
		}
		if (0 == nRet)
			break;

		
        for (i = 0; i < pcEncoder->m_nNal; ++i)
		{
			if (-1 == pcEncoder->m_nFd)
			{
				printf("pcEncoder->m_nFd == -1\n");	
				return;
			}
			write(pcEncoder->m_nFd, pcEncoder->m_psNal[i].p_payload, pcEncoder->m_psNal[i].i_payload);
        }
	}

}
コード例 #3
0
static bool obs_x264_encode(void *data, struct encoder_frame *frame,
		struct encoder_packet *packet, bool *received_packet)
{
	struct obs_x264 *obsx264 = data;
	x264_nal_t      *nals;
	int             nal_count;
	int             ret;
	x264_picture_t  pic, pic_out;

	if (!frame || !packet || !received_packet)
		return false;

	if (frame)
		init_pic_data(obsx264, &pic, frame);

	ret = x264_encoder_encode(obsx264->context, &nals, &nal_count,
			(frame ? &pic : NULL), &pic_out);
	if (ret < 0) {
		warn("encode failed");
		return false;
	}

	*received_packet = (nal_count != 0);
	parse_packet(obsx264, packet, nals, nal_count, &pic_out);

	return true;
}
コード例 #4
0
ファイル: h264encoder.cpp プロジェクト: MoshDev/AndroidRTC
void H264Encoder::doEncoding() {
    static int count = 0;
    // encoding current picture 
    int nals;
    x264_nal_t *nal_pointer;
    
    /*
    if ( count % 100) {
        x264_picin_[ppIndex].i_type = X264_TYPE_IDR;
    } else {
        x264_picin_[ppIndex].i_type = X264_TYPE_AUTO;
    }
    */
    
    x264_encoder_encode(x264_hdl_, &nal_pointer, &nals, &x264_picin_[ppIndex], &x264_picout_);    
    
    // fetching the current data
    for ( int i = 0; i < nals; i++) {
        if( nal_pointer[i].i_type != 6)
            SignalCodedNAL(this, &nal_pointer[i], 0);
    }  
    
    ppIndex = 1 - ppIndex;
    count ++;
}
コード例 #5
0
ファイル: main.c プロジェクト: unixpickle/Processing-H264
int encode_context_frame(EncodeContext context, const char * frameData, char ** nalData, int * dataSize) {
    x264_picture_t pictureOut, pictureIn;
    x264_picture_alloc(&pictureIn, X264_CSP_I420, context.width, context.height);
    int rgbStride = context.width * 3;
    sws_scale(context.converter, (const uint8_t **)&frameData, &rgbStride, 0, context.height,
              pictureIn.img.plane, pictureIn.img.i_stride);
    
    x264_nal_t * nals;
    int i_nals;
    if (DEBUG_ENABLED) printf("encode_context_frame: passing data to x264\n");
    int frameSize = x264_encoder_encode(context.encoder, &nals, &i_nals, &pictureIn, &pictureOut); // TODO: figure out if picture_out can be NULL
    x264_picture_clean(&pictureIn);
    if (frameSize <= 0) return -1;
    
    if (DEBUG_ENABLED) printf("encode_context_frame: joining the frames\n");
    
    int totalSize = 0;
    for (int i = 0; i < i_nals; i++) {
        totalSize += nals[i].i_payload;
    }
    
    char * returnData = (char *)malloc(totalSize);
    int offset = 0;
    for (int i = 0; i < i_nals; i++) {
        memcpy(&returnData[offset], nals[i].p_payload, nals[i].i_payload);
        offset += nals[i].i_payload;
    }
    
    *nalData = returnData;
    *dataSize = totalSize;
    
    return 0;
}
コード例 #6
0
_declspec(dllexport) int __cdecl EncodeFrame(struct TranscoderContext* ctx, char* bgraInput, char* packetOutput) {
	//fprintf(stdout, "------------------------\n");
	clock_t begin = clock();
	//TheInit(width, height, fps);
	//x264_t* encoder = AllocEncoder(width, height, fps);
	//AVCodecContext* av_codec_context = AllocFfmpeg(width, height);

	x264_t* encoder = ctx->encoder;

	struct TranscoderOptions* options = ctx->options;
	int iWidth = options->InputWidth;
	int iHeight = options->InputHeight;
	int oWidth = options->OutputWidth;
	int oHeight = options->OutputHeight;

	int size = 0;
	int result = 0;

	//struct SwsContext* convertCtx = sws_getContext(iWidth, iHeight, AV_PIX_FMT_RGBA, oWidth, oHeight, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
	if (!convertCtx) {
		return -1;
	}

	//data is a pointer to you RGBA structure
	int srcstride = iWidth * 4; //RGBA stride is just 4*width
	sws_scale(convertCtx, &bgraInput, &srcstride, 0, iHeight, pic_in.img.plane, pic_in.img.i_stride);
	//sws_freeContext(convertCtx);

	x264_nal_t* nals;
	int i_nals;
	x264_picture_t pic_out;
	int frame_size = x264_encoder_encode(encoder, &nals, &i_nals, &pic_in, &pic_out);

	int i;
	int apparentSize = 0;
	//char* NALBYTES = malloc(width*height * 4);
	if (frame_size >= 0)
	{
		int index = HEADER_SIZE;
		//index = 0;
		for (i = 0; i < i_nals; i++)
		{
			x264_nal_t nal = nals[i];
			//memcpy(&(NALBYTES[index]), nal.p_payload, nal.i_payload);
			memcpy(&(packetOutput[index]), nal.p_payload, nal.i_payload);
			index += nal.i_payload;
			apparentSize += nal.i_payload;
		}

		//fprintf(stdout, "SIZE: %d\n", apparentSize);
		int type = VIDEO_FRAME;
		memcpy(packetOutput, &type, sizeof(int));
		memcpy(&(packetOutput[sizeof(int)]), &apparentSize, sizeof(int));

		//memcpy(packetOutput, &apparentSize, sizeof(int));
	}

	int packetSize = apparentSize + HEADER_SIZE;
	return packetSize;
}
コード例 #7
0
ファイル: x264.c プロジェクト: clzhan/x264-vs2008
static int  Encode_frame( x264_t *h, hnd_t hout, x264_picture_t *pic )
{
    x264_picture_t pic_out;
    x264_nal_t *nal;
    int i_nal, i;
    int i_file = 0;

    if( x264_encoder_encode( h, &nal, &i_nal, pic, &pic_out ) < 0 )
    {
        fprintf( stderr, "x264_encoder_encode failed\n" );
    }

    for( i = 0; i < i_nal; i++ )
    {
        int i_size;
        int i_data;

        i_data = DATA_MAX;
        if( ( i_size = x264_nal_encode( data, &i_data, 1, &nal[i] ) ) > 0 )
        {
            i_file += p_write_nalu( hout, data, i_size );
        }
        else if( i_size < 0 )
        {
            fprintf( stderr, "need to increase buffer size (size=%d)\n", -i_size );
        }
    }
    if (i_nal)
        p_set_eop( hout, &pic_out );

    return i_file;
}
コード例 #8
0
ファイル: h264encoder.cpp プロジェクト: CeBkCn/android-eye
int H264Encoder::doEncode(const unsigned char* yuv, unsigned char* outBuffer, const int flag) {
    int width = x264_opt_.i_width;
    int height = x264_opt_.i_height;
    memcpy(x264_picin_.img.plane[0], yuv, width*height);
    memcpy(x264_picin_.img.plane[1], yuv + width*height - 1, width*height/2);

    if ( flag == 1) {
        x264_picin_.i_type = X264_TYPE_IDR;
    } else {
        x264_picin_.i_type = X264_TYPE_P;
    }

    int nals;
    x264_nal_t *nal_pointer;
    int ret = x264_encoder_encode(x264_hdl_, &nal_pointer, &nals, &x264_picin_, &x264_picout_);
    if ( ret <= 0) {
        return ret;
    }

    int outLength = 0;
    for ( int i = 0; i < nals; i++) {
        if( nal_pointer[i].i_type != 6) {
            x264_nal_t* nal = &nal_pointer[i];
            memcpy(&outBuffer[outLength], nal->p_payload, nal->i_payload);
            outLength += nal->i_payload;
        }
   }

   return outLength;
}
コード例 #9
0
ファイル: msx264.cpp プロジェクト: saoziyang/saozi.yang-study
//x264_t* msx264::msx264_encoder(void* data)
int msx264::msx264_encoder(void* data)
{
    int ret = 0;

	memcpy(pPicIn->img.plane[0], data, 640*480);
	memcpy(pPicIn->img.plane[1], data+640*480, 640*480 / 4);
	memcpy(pPicIn->img.plane[2], data+640*480+640*480/4, 640*480 / 4);

	//encode(pX264Handle, pPicIn, pPicOut);
    ret = x264_encoder_encode(pX264Handle, &pNals, &iNal, pPicIn, pPicOut);
    if (ret == 0) {
        printf("succes\n");
    } else if (ret < 0) {
        printf("encode error\n");
    } else if (ret > 0) {
        printf("get encode data\n");
    }

    for (int i = 0; i < iNal; ++i) {
		write(file, pNals[i].p_payload, pNals[i].i_payload);
	}

	int iFrames = x264_encoder_delayed_frames(pX264Handle);

    return iFrames;
}
コード例 #10
0
ファイル: LiveParser.cpp プロジェクト: dulton/jorhy-prj
int CLiveParser::encode(x264_t* pX264Handle, x264_picture_t* pPicIn, x264_picture_t* pPicOut)
{
	 int iResult   = 0;
	 iResult = x264_encoder_encode(pX264Handle, &m_pNals, &m_iNal, pPicIn, pPicOut);
	 if (0 == iResult)
	 {
	  //J_OS::LOGINFO("编码成功,但被缓存了");
	 }
	 else if(iResult < 0)
	 {
		 J_OS::LOGINFO("编码出错");
	 }
	 else if (iResult > 0)
	 {
		 //J_OS::LOGINFO("得到编码数据");
	 }

	 /* {{ 作用不明
	 unsigned char* pNal = NULL;
	 for (int i = 0;i < iNal; ++i)
	 {
	 int iData = 1024 * 32;
	 x264_nal_encode(pX264Handle, pNal,&pNals[i]);
	 }
	 * }} */

	 //* 获取X264中缓冲帧数.
	 int iFrames = x264_encoder_delayed_frames(pX264Handle);
	 //J_OS::LOGINFO("当前编码器中缓存数据: %d", iFrames);
	 return iFrames;
}
コード例 #11
0
ファイル: x264lib.c プロジェクト: svn2github/Xpra
int compress_image(struct x264lib_ctx *ctx, x264_picture_t *pic_in, uint8_t **out, int *outsz)
{
	if (!ctx->encoder || !ctx->rgb2yuv) {
		free_csc_image(pic_in);
		*out = NULL;
		*outsz = 0;
		return 1;
	}
	x264_picture_t pic_out;

	/* Encoding */
	pic_in->i_pts = 1;

	x264_nal_t* nals;
	int i_nals;
	int frame_size = x264_encoder_encode(ctx->encoder, &nals, &i_nals, pic_in, &pic_out);
	if (frame_size < 0) {
		fprintf(stderr, "Problem during x264_encoder_encode: frame_size is invalid!\n");
		free_csc_image(pic_in);
		*out = NULL;
		*outsz = 0;
		return 2;
	}
	/* Do not clean that! */
	*out = nals[0].p_payload;
	*outsz = frame_size;
	free_csc_image(pic_in);
	return 0;
}
コード例 #12
0
ファイル: x264lib.c プロジェクト: svn2github/Xpra
int compress_image(struct x264lib_ctx *ctx, const uint8_t *in, int stride, uint8_t **out, int *outsz)
{
	if (!ctx->encoder || !ctx->rgb2yuv)
		return 1;

	x264_picture_t pic_in, pic_out;
	x264_picture_alloc(&pic_in, X264_CSP_I420, ctx->width, ctx->height);

	/* Colorspace conversion (RGB -> I420) */
	sws_scale(ctx->rgb2yuv, &in, &stride, 0, ctx->height, pic_in.img.plane, pic_in.img.i_stride);

	/* Encoding */
	pic_in.i_pts = 1;

	x264_nal_t* nals;
	int i_nals;
	int frame_size = x264_encoder_encode(ctx->encoder, &nals, &i_nals, &pic_in, &pic_out);
	if (frame_size >= 0) {
		/* Do not free that! */
		*out = nals[0].p_payload;
		*outsz = frame_size;
	} else {
		fprintf(stderr, "Problem\n");
		x264_picture_clean(&pic_in);
		return 2;
	}
  
	x264_picture_clean(&pic_in);
	return 0;
}
コード例 #13
0
ファイル: myx264.cpp プロジェクト: my12doom/personalProjects
int x264::encode_a_frame(void *data, void*nal_out, bool *IDR)
{
	if (!encoder)
		return E_INVALIDARG;

	pic_in.img.plane[0] = (uint8_t*)data;
	pic_in.img.plane[1] = pic_in.img.plane[0] + width * height;
	pic_in.img.plane[2] = pic_in.img.plane[1] + width * height / 4;


	x264_nal_t *nals;
	int nnal;
	int frame_size = 0;

	pic_in.i_pts = i_pts++;
	pic_in.i_type = IDR && *IDR ? X264_TYPE_IDR : X264_TYPE_AUTO;
	x264_encoder_encode(encoder, &nals, &nnal, &pic_in, &pic_out);

	BYTE *p = ((BYTE*)nal_out) + 4;
	for (x264_nal_t *nal = nals; nal < nals + nnal; nal++) {
		memcpy(p, nal->p_payload, nal->i_payload);
		p += nal->i_payload;
		frame_size += nal->i_payload;

		if (nal->i_type == 5 && IDR)
			*IDR = true;
	}
	last_encode_time = timeGetTime();
	return frame_size;
}
コード例 #14
0
int X264Encoder::Encode(unsigned char* szYUVFrame, unsigned char* outBuf, int& outLen, bool& isKeyframe)
{
    // 可以优化为m_pic中保存一个指针,直接执行szYUVFrame
    memcpy(m_pic.img.plane[0], szYUVFrame, m_param.i_width * m_param.i_height*3 / 2);
    //m_pic.img.plane[0] = szYUVFrame;

    m_param.i_frame_total++;
    m_pic.i_pts = (int64_t)m_param.i_frame_total * m_param.i_fps_den;
    if (isKeyframe)
        m_pic.i_type = X264_TYPE_IDR;
    else
        m_pic.i_type = X264_TYPE_AUTO;

    x264_picture_t pic_out;
    x264_nal_t *nal=0;
    int i_nal, i; // nal的个数

    if( x264_encoder_encode( m_h, &nal, &i_nal, &m_pic, &pic_out ) < 0 )
    {
        //fprintf( stderr, "x264 [error]: x264_encoder_encode failed\n" );
        return -1;
    }

    char* tmpbuf = NULL;
    char* ptmpbuf = NULL;
    if (pps_ == NULL)
    {
        tmpbuf = (char*)malloc(m_param.i_width * m_param.i_height * 100);
        ptmpbuf = tmpbuf;
    }

    int maxlen = outLen;
    outLen = 0;
    for( i = 0; i < i_nal; i++ )
    {
        int i_size = 0;
        x264_nal_encode(outBuf+outLen, &i_size, 1, &nal[i] );

        if (ptmpbuf)
        {
            memcpy(ptmpbuf, outBuf+outLen, i_size);
            ptmpbuf += i_size;
        }

        // 将起始码0x00000001,替换为nalu的大小
        UI32ToBytes((char*)(outBuf+outLen), i_size-4);
        outLen += i_size;
    }

    if (tmpbuf)
    {
        FindSpsAndPPsFromBuf(tmpbuf, ptmpbuf-tmpbuf);
        free(tmpbuf);
    }

    isKeyframe = (pic_out.i_type == X264_TYPE_IDR);

    return 0;
}
コード例 #15
0
ファイル: video_encoder.c プロジェクト: repco/arcade_server
uint8_t* video_encoder_encode_frame(VideoEncoder *enc,int *bytesRead, uint8_t *pixels){
    int bytesFilled = avpicture_fill(&enc->pic_raw,pixels,enc->in_fmt,enc->width,enc->height);
    if(!bytesFilled){
	fprintf(stderr,"Could not fill raw picture bytes\n");
	return NULL;
    }
    sws_scale(enc->sws,
	      (const uint8_t* const*) (enc->pic_raw.data),
	      enc->pic_raw.linesize,
              0,
              enc->height,
              enc->pic_in.img.plane,
              enc->pic_in.img.i_stride
              );

    enc->pic_in.i_pts = enc->num_frames;

    x264_nal_t *nals;
    int num_nals;

    int out_bytes = x264_encoder_encode(
                                       enc->encoder,
                                       &nals,
                                       &num_nals,
                                       &enc->pic_in,
                                       &enc->pic_out
                                       );



    if (out_bytes > enc->output_buffer_size){
        //TODO: dynamically allocate buffer (probably not, this is RT!)
        fprintf(stderr,
		"buffer overflow! x264_encoder_encode_frame returns %d total bytes, buffer is %d bytes\n",
		out_bytes,
		enc->output_buffer_size);
        return NULL;
    }

    size_t offset = 0;

    int i;
    for(i = 0; i < num_nals; i++){
        x264_nal_t nal = nals[i];
        memcpy(
               &(enc->output_buffer[offset]),
               nal.p_payload,
               nal.i_payload
               );

        offset += nal.i_payload;
    }

    enc->num_frames += 1;
    *bytesRead = offset;
    return enc->output_buffer;
}
コード例 #16
0
static void enc_process(MSFilter *f){
	EncData *d=(EncData*)f->data;
	uint32_t ts=f->ticker->time*90LL;
	mblk_t *im;
	MSPicture pic;
	MSQueue nalus;
	ms_queue_init(&nalus);
	while((im=ms_queue_get(f->inputs[0]))!=NULL){
		if (ms_yuv_buf_init_from_mblk(&pic,im)==0){
			x264_picture_t xpic;
			x264_picture_t oxpic;
			x264_nal_t *xnals=NULL;
			int num_nals=0;

			memset(&xpic, 0, sizeof(xpic));
			memset(&oxpic, 0, sizeof(oxpic));

			/*send I frame 2 seconds and 4 seconds after the beginning */
			if (video_starter_need_i_frame(&d->starter,f->ticker->time))
				d->generate_keyframe=TRUE;

			if (d->generate_keyframe){
				xpic.i_type=X264_TYPE_IDR;
				d->generate_keyframe=FALSE;
			}else xpic.i_type=X264_TYPE_AUTO;
			xpic.i_qpplus1=0;
			xpic.i_pts=d->framenum;
			xpic.param=NULL;
			xpic.img.i_csp=X264_CSP_I420;
			xpic.img.i_plane=3;
			xpic.img.i_stride[0]=pic.strides[0];
			xpic.img.i_stride[1]=pic.strides[1];
			xpic.img.i_stride[2]=pic.strides[2];
			xpic.img.i_stride[3]=0;
			xpic.img.plane[0]=pic.planes[0];
			xpic.img.plane[1]=pic.planes[1];
			xpic.img.plane[2]=pic.planes[2];
			xpic.img.plane[3]=0;
            
			if (x264_encoder_encode(d->enc,&xnals,&num_nals,&xpic,&oxpic)>=0){
				x264_nals_to_msgb(xnals,num_nals,&nalus);
				/*if (num_nals == 0)	ms_message("Delayed frames info: current=%d max=%d\n", 
					x264_encoder_delayed_frames(d->enc),
					x264_encoder_maximum_delayed_frames(d->enc));
				*/
				rfc3984_pack(d->packer,&nalus,f->outputs[0],ts);
				d->framenum++;
				if (d->framenum==0)
					video_starter_first_frame(&d->starter,f->ticker->time);
			}else{
				ms_error("x264_encoder_encode() error.");
			}
		}
		freemsg(im);
	}
}
コード例 #17
0
    bool Encode(LPVOID picInPtr, LPVOID nalOut, int *pNalNum)
    {
        x264_picture_t *picIn = (x264_picture_t*)picInPtr;
        x264_picture_t picOut;
		x264_picture_init(&picOut);

        if(x264_encoder_encode(x264, (x264_nal_t**)&nalOut, pNalNum, picIn, &picOut) < 0)
        {
            DOLOG("x264 encode failed");
            return false;
        }
		int countCacheFrame = x264_encoder_delayed_frames(x264);
		DOLOG("当前被缓存的帧数为" + countCacheFrame);
		int res = 0;
		
		if (countCacheFrame>0)
		{
			res = x264_encoder_encode(x264, (x264_nal_t**)&nalOut, pNalNum, NULL, &picOut);
		}

        return true;
    }
コード例 #18
0
ファイル: export.cpp プロジェクト: ohgodhowdidthis/trance
bool H264Exporter::add_frame(x264_picture_t* pic)
{
  x264_nal_t* nal;
  int nal_size;
  int frame_size = x264_encoder_encode(_encoder, &nal, &nal_size, pic, &_pic_out);
  if (frame_size < 0) {
    std::cerr << "couldn't encode frame" << std::endl;
    return false;
  }
  if (frame_size) {
    _file.write((const char*) nal->p_payload, frame_size);
  }
  return true;
}
コード例 #19
0
//FILE* ff1 ;
int H264EncWrapper::Encode(unsigned char* szYUVFrame, TNAL*& pNALArray, int& iNalNum)
{
    // 可以优化为m_pic中保存一个指针,直接执行szYUVFrame
    memcpy(m_pic.img.plane[0], szYUVFrame, m_param.i_width * m_param.i_height*3 / 2);
    
    m_pic.i_pts = (int64_t)m_iFrameNum * m_param.i_fps_den;

    x264_picture_t pic_out;
    x264_nal_t *nal;
    int i_nal, i; // nal的个数

    if( x264_encoder_encode( m_h, &nal, &i_nal, &m_pic, &pic_out ) < 0 )
    {
        fprintf( stderr, "x264 [error]: x264_encoder_encode failed\n" );
        return -1;
    }

    int i_size = 0;
    pNALArray = new TNAL[i_nal];
    memset(pNALArray, 0, i_nal+1);
    
    for( i = 0; i < i_nal; i++ )
    {
        if( m_iBufferSize < nal[i].i_payload * 3/2 + 4 )
        {
            m_iBufferSize = nal[i].i_payload * 2 + 4;
            my_free( m_pBuffer );
            m_pBuffer = (uint8_t*)my_malloc( m_iBufferSize );
        }

        i_size = m_iBufferSize;
        x264_nal_encode( m_pBuffer, &i_size, 1, &nal[i] );
        //DEBUG_LOG(INF, "Encode frame[%d], NAL[%d],  length = %d, ref_idc = %d, type = %d", 
        //    m_iFrameNum, i, i_size, nal[i].i_ref_idc, nal[i].i_type);
        //printf("Encode frame[%d], NAL[%d],  length = %d, ref_idc = %d, type = %d\n", 
        //    m_iFrameNum, i, i_size, nal[i].i_ref_idc, nal[i].i_type);
        
        //fwrite(m_pBuffer, 1, i_size, ff1);
        
        //去掉buffer中前面的 00 00 00 01 才是真正的nal unit
        pNALArray[i].size = i_size;
        pNALArray[i].data = new unsigned char[i_size];
        memcpy(pNALArray[i].data, m_pBuffer, i_size);
        
    }

    iNalNum = i_nal;    
    m_iFrameNum++;
    return 0;
}
コード例 #20
0
void mpeg_video_recorder::process_frame(const void* data, uints size, uint64 timestamp_ns, uint nbatchframes, bool video_end){

	if (!m_IsRecording){
		// this should't be there but last frame isn't uniqe so i must skip frames sent after "last" frame
		return;
	}

	if (m_iLastFramesize > 0 && m_uiFrameIndex > 0){
		coid::uint64 timeMs = (timestamp_ns - m_uiLastTimestampNs) / 1000000.0;
		MP4Duration frameDuration = timeMs * 90; // 90 000 ticks per seconds in mp4 container
		MP4WriteSample(m_hMp4FileHandle, m_iVideoTrackID, m_pLastNals[0].p_payload, m_iLastFramesize, frameDuration);
		m_uiLastTimestampNs = timestamp_ns;
	}
	else if(m_uiFrameIndex == 0){
		m_uiLastTimestampNs = timestamp_ns;
	}
	else{
		log(WARNINGMESSAGE("Frameskip detected!"));
	}

	uchar * ucharData = (uchar*)data;

	uint width4 = GetDivisibleBy4(m_iWidth);
	uint halfWidth4 = GetDivisibleBy4(m_iWidth >> 1);

	x264_picture_t pic_in,pic_out;
	x264_picture_init(&pic_in);
	pic_in.img.i_csp = X264_CSP_I420;
	pic_in.img.i_plane = 3;
	pic_in.img.plane[0] = ucharData;
	pic_in.img.plane[1] = ucharData + width4;
	pic_in.img.plane[2] = ucharData + 2 * width4 + halfWidth4;
	pic_in.img.i_stride[0] = width4 + halfWidth4;
	pic_in.img.i_stride[2] = pic_in.img.i_stride[1] = (width4 + halfWidth4) * 2;
	
	int i_nals;
	
	m_iLastFramesize = x264_encoder_encode(m_pEncoder, &m_pLastNals, &i_nals, &pic_in, &pic_out);

	m_uiFrameIndex++;

	if (video_end){
		if (m_iLastFramesize > 0){
			MP4Duration frameDuration = 33 * 90;  // 90 000 ticks per seconds in mp4 container
			MP4WriteSample(m_hMp4FileHandle, m_iVideoTrackID, m_pLastNals[0].p_payload, m_iLastFramesize, frameDuration);
		}
		StopRecording();
	}	
}
コード例 #21
0
ファイル: x264Encoder.cpp プロジェクト: yujun1703/264
HRESULT CX264Encoder::Encode(VQQUCHAR *pIn, VQQLONG nInLen, VQQUCHAR **ppOut, VQQLONG *pOutLen, EmFrameType FrameType)
{
    x264_picture_t pic_out;
    int i = 0, i_nal = 0, nals_size = 0;
    x264_nal_t *nal;
    int i_frame_size = 0;

    if ( !pIn || nInLen == 0 || !ppOut || !pOutLen )
    {
        return E_INVALIDARG;
    }

    int nPixelSize = m_stEncParam.iWidth * m_stEncParam.iHeight;
    int nFrameSize = nPixelSize * 3 / 2;
    if ( nInLen < nFrameSize )
    {
        return E_INVALIDARG;
    }    

    ((x264_picture_t*)m_pPic)->img.plane[0] = pIn;
    ((x264_picture_t*)m_pPic)->img.plane[1] = pIn + nPixelSize;
    ((x264_picture_t*)m_pPic)->img.plane[2] = pIn + nPixelSize * 5 /4;

    if ( 0 != SetFrameRefInfo( (x264_picture_t*)m_pPic, FrameType ) )
    {
        LOG(stderr, "x264 [error]: Frame Type unsupport\n");
        return false;
    }
    
    if ( (i_frame_size = x264_encoder_encode( (x264_t*)m_px264Handle, &nal, &i_nal, (x264_picture_t*)m_pPic, &pic_out )) < 0 )
    {
        LOG(stderr, "x264 [error]: x264_encoder_encode failed\n");
        return false;
    }

    if ( i_frame_size > 0 )
    {
        *ppOut = nal[0].p_payload;
         UpdateRefStatus( FrameType );
    }
    else if(!i_frame_size)
        printf("drop 1 frame\n");


    *pOutLen = i_frame_size;
    return true;
}
コード例 #22
0
ファイル: H264Encoder.cpp プロジェクト: Kristishka/libvision
H264EncoderResult H264Encoder::Encode(vision::Image<uint32_t, vision::RGB> & img, uint64_t pts)
{
    x264_nal_t* nals;
    int i_nals;
    H264EncoderResult res;

    /* Convert from RGBA to YUV420P */
    uint8_t *buf_in[4]={(uint8_t*)img.raw_data,NULL,NULL,NULL};
    sws_scale(m_convert_ctx, (const uint8_t* const*)buf_in, &m_stride, 0, m_height, m_pic_in.img.plane, m_pic_in.img.i_stride);
    m_pic_in.i_pts = pts;

    /* Encode */
    while( (res.frame_size = x264_encoder_encode(m_encoder, &nals, &i_nals, &m_pic_in, &m_pic_out)) == 0 ) { m_pic_in.i_pts++; }
    res.frame_data = nals[0].p_payload;

    return res;
}
コード例 #23
0
ファイル: H264Encoder.cpp プロジェクト: 9crk/EasyClient
int CH264Encoder::Clean()
{
	if (m_hx264!=NULL)
	{
		m_bIsworking=false;
		int m_inal=0;		
		x264_encoder_encode(m_hx264,&m_x264_nal,&m_inal,NULL,&m_x264_picout);
		x264_encoder_close(m_hx264);
		m_hx264=NULL;
		if (m_x264_picin!=NULL)
		{
			free(m_x264_picin);
			m_x264_picin=NULL;
		}		
	}
	return 0;
}
コード例 #24
0
ファイル: x264_code.c プロジェクト: babatengwo/v4l2_view
//compress a frame and deal with the result with the nal_handle function
int	compress_frame(int type, uint8 *in, nal_fun_handle nal_handle)
{
	struct encoder *en = h264_encoder;
	x264_picture_t	pic_out;
	int 	nNal = -1;

	//picture format conversion and save it in the picture planar field
	yuyv_to_i420p_format(in, en->picture);

	switch (type)
	{
	case 0:
		en->picture->i_type = X264_TYPE_P;
		break;

	case 1:
		en->picture->i_type = X264_TYPE_IDR;
		break;

	case 2:
		en->picture->i_type = X264_TYPE_I;
		break;

	default:
		en->picture->i_type = X264_TYPE_AUTO;
		break;
	}

	//encode a picture
	if (x264_encoder_encode(en->handle, &(en->nal),
			&nNal, en->picture, &pic_out) < 0)
	{
		printf("x264_encoder_encode failure\n");

		return BASICERROR;
	}

	//update pts
	en->picture->i_pts++;

    //deal with the compress result
	nal_handle( en->nal, nNal);

	return SUCCESS;
}
コード例 #25
0
ファイル: x264Encoder.cpp プロジェクト: kwende/FFmpegTest1
//void x264Encoder::EncodeFrame(const cv::Mat& image)
void x264Encoder::EncodeFrame(cv::Mat& image)
{
    int srcStride = 512 / 2 * 3;

    sws_scale(convertContext, &(image.data), &srcStride, 0, 424 / 2, picture_out.img.plane, picture_out.img.i_stride);
    x264_nal_t* nals;
    int i_nals = 0;
    int frameSize = -1;

    frameSize = x264_encoder_encode(encoder, &nals, &i_nals, &picture_out, &picture_out2);
    if (frameSize > 0)
    {
        for (int i = 0; i< i_nals; i++)
        {
            outputQueue.push(nals[i]);
        }
    }
}
コード例 #26
0
ファイル: encoder.cpp プロジェクト: SummerLv/paperChapter4
void encode_frame(void)
{
	x264_nal_t *nals,*nal;
	int nnal;

	while(read(inf,yuv_buffer,yuv_size)>0)
	{
		pic_in.i_pts=i_pts++;
		x264_encoder_encode(encoder,&nals,&nnal,&pic_in,&pic_out);
		
		for(nal=nals;nal<nals+nnal;nal++)
		{
			write(outf,nal->p_payload,nal->i_payload);
		}
	}
	i_pts=0;

}
コード例 #27
0
long X264Encoder::x264EncoderProcess(x264_picture_t *pPicture, x264_nal_t **nals, int& nalsCount)
{
    pPicture->i_pts = (int64_t)(frameNo * pParameter->i_fps_den);
    pPicture->i_type = X264_TYPE_AUTO;
    pPicture->i_qpplus1 = 0;//X264_QP_AUTO;

    if (isForceIDRFrameEnabled) {
        pPicture->i_type = X264_TYPE_IDR;
        isForceIDRFrameEnabled = false;
    }

    int32_t framesize = -1;

    framesize = x264_encoder_encode(x264EncoderHandle, nals, &nalsCount, pPicture, pOutput);

    if (framesize>0) {
        frameNo++;
    }

    return framesize;
}
コード例 #28
0
ファイル: H264Encoder.cpp プロジェクト: 9crk/EasyClient
unsigned char* CH264Encoder::Encoder(unsigned char *indata, int inlen, int &outlen, bool &bIsKeyFrame)
{
	if (m_bIsworking&&(inlen==m_ncheckyuvsize))
	{	
		int m_inal=0;
		memcpy(m_x264_picin->img.plane[0],indata,inlen);
		outlen=x264_encoder_encode(m_hx264,&m_x264_nal,&m_inal,m_x264_picin,&m_x264_picout);
		m_x264_picin->i_pts++;
	}else
	{
		outlen=-1;
	}
	if (outlen>0)
	{
		bIsKeyFrame=m_x264_picout.b_keyframe==1;
		return m_x264_nal[0].p_payload;
	}else
	{
		return NULL;
	}
}
コード例 #29
0
ファイル: x264lib.c プロジェクト: svn2github/Xpra
int compress_image(struct x264lib_ctx *ctx, x264_picture_t *pic_in, uint8_t **out, int *outsz, int quality_override)
{
	if (!ctx->encoder || !ctx->rgb2yuv) {
		free_csc_image(pic_in);
		*out = NULL;
		*outsz = 0;
		return 1;
	}
	x264_picture_t pic_out;

	/* Encoding */
	pic_in->i_pts = 1;
	if (quality_override>=0) {
		// Retrieve current parameters and override quality for this frame
		float new_q = get_x264_quality(quality_override);
		if (new_q!=ctx->x264_quality) {
			x264_param_t *param = malloc(sizeof(x264_param_t));
			x264_encoder_parameters(ctx->encoder, param);
			param->rc.f_rf_constant = new_q;
			pic_in->param = param;
			pic_in->param->param_free = free;
		}
	}

	x264_nal_t* nals;
	int i_nals;
	int frame_size = x264_encoder_encode(ctx->encoder, &nals, &i_nals, pic_in, &pic_out);
	if (frame_size < 0) {
		fprintf(stderr, "Problem during x264_encoder_encode: frame_size is invalid!\n");
		free_csc_image(pic_in);
		*out = NULL;
		*outsz = 0;
		return 2;
	}
	free_csc_image(pic_in);
	/* Do not clean that! */
	*out = nals[0].p_payload;
	*outsz = frame_size;
	return 0;
}
コード例 #30
0
static int
x264_encode_frame (x264_t *h, void *handle, x264_picture_t *pic)
{
  x264_picture_t pic_out;
  x264_nal_t    *nal;
  int            i_nal;
  int            i;
  int            i_file = 0;

  /* Do not force any parameters */
  if (pic)
    {
      pic->i_type = X264_TYPE_AUTO;
      pic->i_qpplus1 = 0;
    }
  if (x264_encoder_encode (h, &nal, &i_nal, pic, &pic_out) < 0)
    {
      fprintf (stderr, _("x264_encoder_encode failed\n"));
    }

  for (i = 0; i < i_nal; i++)
    {
      int i_size;
      int i_data;

      i_data = DATA_MAX;
      if ((i_size = x264_nal_encode (data, &i_data, 1, &nal[i])) > 0 )
        {
          i_file += p_write_nalu (handle, data, i_size);
        }
      else if (i_size < 0)
        {
          fprintf (stderr, _("need to increase buffer size (size=%d)\n"), -i_size);
        }
    }
  if (i_nal)
    p_set_eop (handle, &pic_out);

  return i_file;
}