Esempio n. 1
0
int FFmpegEncoder::convertPixFmt(const uint8_t *src, int srclen, int srcw, int srch, PixelFormat srcfmt, 
								 uint8_t *dst, int dstlen, int dstw, int dsth, PixelFormat dstfmt)
{
	LOGI("[FFmpegEncoder::%s] begin", __FUNCTION__);
	if (!src || !dst) {
		LOGE("[FFmpegEncoder::%s] src or dst is NULL", __FUNCTION__);
		return -1;
	}

	// src input frame
	AVPicture srcPic;
	FFmpegVideoParam srcParam(srcw, srch, srcfmt, 0, 0, "");
	if(avpicture_fill(&srcPic, (uint8_t *)src, srcParam.pixelFormat, srcParam.width, srcParam.height) == -1) {
		LOGE("[FFmpegEncoder::%s] fail to avpicture_fill for src picture", __FUNCTION__);
		return -1;
	}

	// dst output frame
	AVPicture dstPic;
	FFmpegVideoParam dstParam(dstw, dsth, dstfmt, 0, 0, "");
	if(avpicture_alloc(&dstPic, dstParam.pixelFormat, dstParam.width, dstParam.height) == -1) {
		LOGE("[FFmpegEncoder::%s] fail to avpicture_alloc for dst picture", __FUNCTION__);
		return -1;
	}

	int ret = -1;
	if (convertPixFmt(&srcPic, &dstPic, &srcParam, &dstParam) < 0) {
		LOGE("[FFmpegEncoder::%s] fail to convertPixFmt", __FUNCTION__);
	}else {
		ret = avpicture_layout(&dstPic, dstParam.pixelFormat, dstParam.width, dstParam.height, dst, dstlen);
	}
	avpicture_free(&dstPic);
	
	return ret;
}
int read_frame(AVFormatContext *pFormatCtx, int videostream, AVCodecContext *pCodecCtx, uint8_t *buff)
{
    AVPacket packet;
    AVFrame* pFrame;
    int frameFinished, ret;

    pFrame = avcodec_alloc_frame();
    ret = av_read_frame(pFormatCtx, &packet);

    if(packet.stream_index==videostream) {
        // Decode video frame
        avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
        // Did we get a video frame?
        if(frameFinished) {
            avpicture_layout((AVPicture *)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, buff,
                    avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height)*sizeof(uint8_t));

            // Free the packet that was allocated by av_read_frame
            av_free_packet(&packet);
        }
    }

    av_free(pFrame);

    return ret;
}
Esempio n. 3
0
/*
 * decode h264 frame
 * args:
 *    out_buf - pointer to decoded data
 *    in_buf - pointer to h264 data
 *    size - in_buf size
 *
 * asserts:
 *    h264_ctx is not null
 *    in_buf is not null
 *    out_buf is not null
 *
 * returns: decoded data size
 */
int h264_decode(uint8_t *out_buf, uint8_t *in_buf, int size)
{
	/*asserts*/
	assert(h264_ctx != NULL);
	assert(in_buf != NULL);
	assert(out_buf != NULL);

	AVPacket avpkt;

	av_init_packet(&avpkt);
	
	avpkt.size = size;
	avpkt.data = in_buf;

	int got_picture = 0;
	int len = avcodec_decode_video2(h264_ctx->context, h264_ctx->picture, &got_picture, &avpkt);

	if(len < 0)
	{
		fprintf(stderr, "V4L2_CORE: (H264 decoder) error while decoding frame\n");
		return len;
	}

	if(got_picture)
	{
		avpicture_layout((AVPicture *) h264_ctx->picture, h264_ctx->context->pix_fmt, 
			h264_ctx->width, h264_ctx->height, out_buf, h264_ctx->pic_size);
		return len;
	}
	else
		return 0;

}
Esempio n. 4
0
/*
Decode a video buffer.
Returns:
	0 : buffer decoded, but no image produced (incomplete).
	> 0 : decoded n images.
	-1 : error while decoding.
*/
int video_decode_packet(uint8_t* buffer, int buf_size) {
	//number of bytes processed by the frame parser and the decoder
	int parsedLen = 0, decodedLen = 0;
	//do we have a whole frame ?
	int complete_frame = 0;
	//how many frames have we decoded ?
	int nb_frames = 0;

	if(buf_size <= 0 || buffer == NULL)
		return 0;

	//parse the video packet. If the parser returns a frame, decode it.
	while(buf_size > 0) {
		//1. parse the newly-received packet. If the parser has assembled a whole frame, store it in the video_packet structure.
		//TODO: confirm/infirm usefulness of frameOffset
		parsedLen = av_parser_parse2(cpContext, context, &video_packet.data, &video_packet.size, buffer, buf_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
		
		//2. modify our buffer's data offset to reflect the parser's progression.
		buffer += parsedLen;
		buf_size -= parsedLen;
		frameOffset += parsedLen;
		
		//3. do we have a frame to decode ?
		if(video_packet.size > 0) {
			//printf("Packet size : %d\n", video_packet.size);
			decodedLen = avcodec_decode_video2(context, current_frame, &complete_frame, &video_packet);
			if(decodedLen < 0) {
				fprintf(stderr, "Error : couldn't decode frame.\n");
				return 0;
			}
			//If we get there, we should've decoded a frame.
			if(complete_frame) {
				nb_frames++;
				//check if the video size has changed
				if(current_frame->width != current_width || current_frame->height != current_height)
					if(video_alloc_frame_buffer() < 0) {
						fprintf(stderr, "Error : couldn't allocate memory for decoding.\n");
						return -1;
					}
				//write the raw frame data in our temporary buffer...
				int picsize = avpicture_layout((const AVPicture*)current_frame, current_frame->format, 
				current_frame->width, current_frame->height, tempBuffer, tempBufferSize);
				//...that we then pass to the processing callback.
				if(frame_processing_callback(tempBuffer, current_frame->width, current_frame->height, picsize) < 0)
					return -1;
				//printf("Decoded frame : %d bytes, format : %d, size : %dx%d\n", picsize, current_frame->format, current_frame->width, current_frame->height);
				//free the frame's references for reuse
				av_frame_unref(current_frame);
			}
			
			//reinit frame offset for next frame
			frameOffset = 0;
		}
	}
	return nb_frames;
}
Esempio n. 5
0
static int raw_encode(AVCodecContext *avctx,
                            unsigned char *frame, int buf_size, void *data)
{
    int ret = avpicture_layout((AVPicture *)data, avctx->pix_fmt, avctx->width,
                                               avctx->height, frame, buf_size);

    if(avctx->codec_tag == AV_RL32("yuv2") && ret > 0 &&
       avctx->pix_fmt   == PIX_FMT_YUYV422) {
        int x;
        for(x = 1; x < avctx->height*avctx->width*2; x += 2)
            frame[x] ^= 0x80;
    }
    return ret;
}
Esempio n. 6
0
static void
mjpeg2rgb(char *MJPEG, int len, char *RGB, int NumPixels)
{
  int got_picture;

  memset(RGB, 0, avframe_rgb_size);

#if LIBAVCODEC_VERSION_MAJOR > 52
  int decoded_len;
  AVPacket avpkt;
  av_init_packet(&avpkt);
  
  avpkt.size = len;
  avpkt.data = (unsigned char*)MJPEG;
  decoded_len = avcodec_decode_video2(avcodec_context, avframe_camera, &got_picture, &avpkt);

  if (decoded_len < 0) {
      fprintf(stderr, "Error while decoding frame.\n");
      return;
  }
#else
  avcodec_decode_video(avcodec_context, avframe_camera, &got_picture, (uint8_t *) MJPEG, len);
#endif

  if (!got_picture) {
    fprintf(stderr,"Webcam: expected picture but didn't get it...\n");
    return;
  }

  int xsize = avcodec_context->width;
  int ysize = avcodec_context->height;
  int pic_size = avpicture_get_size(avcodec_context->pix_fmt, xsize, ysize);
  if (pic_size != avframe_camera_size) {
    fprintf(stderr,"outbuf size mismatch.  pic_size: %d bufsize: %d\n",pic_size,avframe_camera_size);
    return;
  }

  video_sws = sws_getContext( xsize, ysize, avcodec_context->pix_fmt, xsize, ysize, PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL);
  sws_scale(video_sws, avframe_camera->data, avframe_camera->linesize, 0, ysize, avframe_rgb->data, avframe_rgb->linesize );
  sws_freeContext(video_sws);  

  int size = avpicture_layout((AVPicture *) avframe_rgb, PIX_FMT_RGB24, xsize, ysize, (uint8_t *)RGB, avframe_rgb_size);
  if (size != avframe_rgb_size) {
    fprintf(stderr,"webcam: avpicture_layout error: %d\n",size);
    return;
  }
}
Esempio n. 7
0
int H264Encoder::uyvy422ToYuv420p(unsigned char* src, int src_w, int src_h, unsigned char* dec, int dec_w, int dec_h) {
   if (src == NULL || dec == NULL) {
      return -1;
   }

   int one_uyvy_size = avpicture_get_size(AV_PIX_FMT_UYVY422, src_w, src_h);
   int one_yuv420p_size = avpicture_get_size(AV_PIX_FMT_YUV420P, dec_w, dec_h);
	 
	 
   int fill_size = avpicture_fill(&mAvInputPic, src, AV_PIX_FMT_UYVY422, src_w, src_h);
   int sws_rel = sws_scale(mSwsCt, mAvInputPic.data, mAvInputPic.linesize, 0, src_h,
	            mAvOutputPic.data, mAvOutputPic.linesize);
	 
   int layout_rel = avpicture_layout(&mAvOutputPic, AV_PIX_FMT_YUV420P, dec_w, dec_h, dec, 1382400);
   printf("Fill PIX_FMT_YUY2, Size:[%d], sws_scale return:[%d], layout return:[%d]\n", fill_size, sws_rel, layout_rel);
   return 0;
}
Esempio n. 8
0
static int render_rgb_to_buffer(char* buffer, int size) {
	int err;

	// Draw the frame to the buffer
	err = avpicture_layout((AVPicture*)rgb_frame,
		render_pix_fmt,
		decoder_ctx->width,
		decoder_ctx->height,
		buffer,
		size);
	if (err < 0) {
		__android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC",
			"Picture fill failed");
		return 0;
	}

	return 1;
}
Esempio n. 9
0
static SchroFrame *libschroedinger_frame_from_data(AVCodecContext *avccontext,
                                                   const AVFrame *frame)
{
    SchroEncoderParams *p_schro_params = avccontext->priv_data;
    SchroFrame *in_frame;
    /* Input line size may differ from what the codec supports. Especially
     * when transcoding from one format to another. So use avpicture_layout
     * to copy the frame. */
    in_frame = ff_create_schro_frame(avccontext, p_schro_params->frame_format);

    if (in_frame)
        avpicture_layout((const AVPicture *)frame, avccontext->pix_fmt,
                          avccontext->width, avccontext->height,
                          in_frame->components[0].data,
                          p_schro_params->frame_size);

    return in_frame;
}
Esempio n. 10
0
int nv_avc_get_raw_frame(char* buffer, int size) {
	AVFrame *our_yuv_frame;
	int err;

	our_yuv_frame = dequeue_new_frame();
	if (our_yuv_frame == NULL) {
		return 0;
	}

	err = avpicture_layout((AVPicture*)our_yuv_frame,
		decoder_ctx->pix_fmt,
		decoder_ctx->width,
		decoder_ctx->height,
		buffer,
		size);

	av_frame_free(&our_yuv_frame);

	return (err >= 0);
}
Esempio n. 11
0
//--------------------------------------------------------------------
void ofUCUtils::new_frame (unicap_data_buffer_t * buffer)
{
	if(!deviceReady)
		return;

	if(src_pix_fmt!=PIX_FMT_RGB24){
		avpicture_fill(src,buffer->data,src_pix_fmt,format.size.width,format.size.height);

		if(sws_scale(toRGB_convert_ctx,
			src->data, src->linesize, 0, buffer->format.size.height,
			dst->data, dst->linesize)<0)
				ofLog(OF_ERROR,"ofUCUtils: can't convert colorspaces");

		lock_buffer();
			avpicture_layout(dst,PIX_FMT_RGB24,d_width,d_height,pixels,d_width*d_height*3);

	}else{
		lock_buffer();
			pixels=buffer->data;
	}
	bUCFrameNew = true;
	unlock_buffer();
}
Esempio n. 12
0
static int raw_encode(AVCodecContext *avctx, AVPacket *pkt,
                      const AVFrame *frame, int *got_packet)
{
    int ret = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);

    if (ret < 0)
        return ret;

    if ((ret = ff_alloc_packet(pkt, ret)) < 0)
        return ret;
    if ((ret = avpicture_layout((const AVPicture *)frame, avctx->pix_fmt, avctx->width,
                                avctx->height, pkt->data, pkt->size)) < 0)
        return ret;

    if(avctx->codec_tag == AV_RL32("yuv2") && ret > 0 &&
       avctx->pix_fmt   == PIX_FMT_YUYV422) {
        int x;
        for(x = 1; x < avctx->height*avctx->width*2; x += 2)
            pkt->data[x] ^= 0x80;
    }
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Esempio n. 13
0
//--------------------------------------------------------------------
bool ofUCUtils::getFrameUC(unsigned char ** _pixels) {

	if ( !SUCCESS( unicap_queue_buffer( handle, &buffer ) )) {
		printf("Unicap : Failed to queue a buffer\n");
		return false;
	}
	/*
	 Wait until the image buffer is ready
	 */
	if ( !SUCCESS( unicap_wait_buffer( handle, &returned_buffer ) )) {
		printf("Unicap : Failed to wait for buffer\n");
		return false;
	}

	if(src_pix_fmt!=PIX_FMT_RGB24){
		avpicture_fill(src,returned_buffer->data,src_pix_fmt,format.size.width,format.size.height);
		img_convert(dst,PIX_FMT_RGB24,src,src_pix_fmt,format.size.width,format.size.height);
		avpicture_layout(dst,PIX_FMT_RGB24,format.size.width,format.size.height,*_pixels,format.size.width*format.size.height*3);
	}else{
		*_pixels = returned_buffer->data;
	}
	return true;

}
Esempio n. 14
0
static int raw_encode(AVCodecContext *avctx,
                            unsigned char *frame, int buf_size, void *data)
{
    return avpicture_layout((AVPicture *)data, avctx->pix_fmt, avctx->width,
                                               avctx->height, frame, buf_size);
}
/**
* @brief 
*
* @return 
*/
int LocalVideoInput::run()
{
    AVInputFormat *inputFormat = av_find_input_format( "video4linux2" );
    if ( inputFormat == NULL)
        Fatal( "Can't load input format" );

#if 0
    AVProbeData probeData;
    probeData.filename = mSource.c_str();
    probeData.buf = new unsigned char[1024];
    probeData.buf_size = 1024;
    inputFormat = av_probe_input_format( &probeData, 0 );
    if ( inputFormat == NULL)
        Fatal( "Can't probe input format" );

    AVFormatParameters formatParameters ;
    memset( &formatParameters, 0, sizeof(formatParameters) );
    formatParameters.channels = 1;
    formatParameters.channel = 0;
    formatParameters.standard = "PAL";
    formatParameters.pix_fmt = PIX_FMT_RGB24;
    //formatParameters.time_base.num = 1;
    //formatParameters.time_base.den = 10;
    formatParameters.width = 352;
    formatParameters.height = 288;
    //formatParameters.prealloced_context = 1;
#endif

    /* New API */
    AVDictionary *opts = NULL;
    av_dict_set( &opts, "standard", "PAL", 0 );
    av_dict_set( &opts, "video_size", "320x240", 0 );
    av_dict_set( &opts, "channel", "0", 0 );
    av_dict_set( &opts, "pixel_format", "rgb24", 0 );
    //av_dict_set( &opts, "framerate", "10", 0 );
    avDumpDict( opts );

    int avError = 0;
    AVFormatContext *formatContext = NULL;
    //if ( av_open_input_file( &formatContext, mSource.c_str(), inputFormat, 0, &formatParameters ) !=0 )
    if ( (avError = avformat_open_input( &formatContext, mSource.c_str(), inputFormat, &opts )) < 0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), avStrError(avError) );

    avDumpDict( opts );
#if 0
    if ( av_open_input_stream( &formatContext, 0, mSource.c_str(), inputFormat, &formatParameters ) !=0 )
        Fatal( "Unable to open input %s due to: %s", mSource.c_str(), strerror(errno) );
#endif

    // Locate stream info from input
    if ( (avError = avformat_find_stream_info( formatContext, &opts )) < 0 )
        Fatal( "Unable to find stream info from %s due to: %s", mSource.c_str(), avStrError(avError) );
    
    if ( dbgLevel > DBG_INF )
        av_dump_format( formatContext, 0, mSource.c_str(), 0 );

    // Find first video stream present
    int videoStreamId = -1;
    for ( int i=0; i < formatContext->nb_streams; i++ )
    {
        if ( formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
        {
            videoStreamId = i;
            //set_context_opts( formatContext->streams[i]->codec, avcodec_opts[CODEC_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
            break;
        }
    }
    if ( videoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mSource.c_str() );
    mStream = formatContext->streams[videoStreamId];
    mCodecContext = mStream->codec;

    // Try and get the codec from the codec context
    AVCodec *codec = NULL;
    if ( (codec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mSource.c_str() );

    // Open the codec
    if ( avcodec_open2( mCodecContext, codec, &opts ) < 0 )
        Fatal( "Unable to open codec for video stream from %s", mSource.c_str() );

    //AVFrame *savedFrame = avcodec_alloc_frame();

    // Allocate space for the native video frame
    AVFrame *frame = avcodec_alloc_frame();

    // Determine required buffer size and allocate buffer
    int pictureSize = avpicture_get_size( mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height );
    
    ByteBuffer frameBuffer( pictureSize );
    
    //avpicture_fill( (AVPicture *)savedFrame, mLastFrame.mBuffer.data(), mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height);

    AVPacket packet;
    while( !mStop )
    {
        int frameComplete = false;
        while ( !frameComplete && (av_read_frame( formatContext, &packet ) >= 0) )
        {
            Debug( 5, "Got packet from stream %d", packet.stream_index );
            if ( packet.stream_index == videoStreamId )
            {
                frameComplete = false;
                if ( avcodec_decode_video2( mCodecContext, frame, &frameComplete, &packet ) < 0 )
                    Fatal( "Unable to decode frame at frame %ju", mFrameCount );

                Debug( 3, "Decoded video packet at frame %ju, pts %jd", mFrameCount, packet.pts );

                if ( frameComplete )
                {
                    Debug( 3, "Got frame %d, pts %jd (%.3f)", mCodecContext->frame_number, frame->pkt_pts, (((double)(packet.pts-mStream->start_time)*mStream->time_base.num)/mStream->time_base.den) );

                    avpicture_layout( (AVPicture *)frame, mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height, frameBuffer.data(), frameBuffer.capacity() );

                    uint64_t timestamp = packet.pts;
                    //Debug( 3, "%d: TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );
                    //Info( "%ld:TS: %lld, TS1: %lld, TS2: %lld, TS3: %.3f", time( 0 ), timestamp, packet.pts, ((1000000LL*packet.pts*mStream->time_base.num)/mStream->time_base.den), (((double)packet.pts*mStream->time_base.num)/mStream->time_base.den) );

                    VideoFrame *videoFrame = new VideoFrame( this, mCodecContext->frame_number, timestamp, frameBuffer );
                    distributeFrame( FramePtr( videoFrame ) );
                }
            }
            av_free_packet( &packet );
        }
        usleep( INTERFRAME_TIMEOUT );
    }
    cleanup();

    av_freep( &frame );
    if ( mCodecContext )
    {
       avcodec_close( mCodecContext );
       mCodecContext = NULL; // Freed by avformat_close_input
    }
    if ( formatContext )
    {
        avformat_close_input( &formatContext );
        formatContext = NULL;
        //av_free( formatContext );
    }
    return( !ended() );
}
Esempio n. 16
0
static tsk_size_t tdav_codec_h264_decode(tmedia_codec_t* self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
{
	tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
	const trtp_rtp_header_t* rtp_hdr = (const trtp_rtp_header_t*)proto_hdr;
	
	const uint8_t* pay_ptr = tsk_null;
	tsk_size_t pay_size = 0;
	int ret;
	tsk_bool_t sps_or_pps, append_scp, end_of_unit;
	tsk_size_t retsize = 0, size_to_copy = 0;
	static const tsk_size_t xmax_size = (3840 * 2160 * 3) >> 3; // >>3 instead of >>1 (not an error)
	static tsk_size_t start_code_prefix_size = sizeof(H264_START_CODE_PREFIX);
#if HAVE_FFMPEG
	int got_picture_ptr = 0;
#endif

	if(!h264 || !in_data || !in_size || !out_data
#if HAVE_FFMPEG
		|| !h264->decoder.context
#endif
		)
	{
		TSK_DEBUG_ERROR("Invalid parameter");
		return 0;
	}
	
	//TSK_DEBUG_INFO("SeqNo=%hu", rtp_hdr->seq_num);

	/* Packet lost? */
	if((h264->decoder.last_seq + 1) != rtp_hdr->seq_num && h264->decoder.last_seq){
		TSK_DEBUG_INFO("[H.264] Packet loss, seq_num=%d", (h264->decoder.last_seq + 1));
	}
	h264->decoder.last_seq = rtp_hdr->seq_num;


	/* 5.3. NAL Unit Octet Usage
	  +---------------+
      |0|1|2|3|4|5|6|7|
      +-+-+-+-+-+-+-+-+
      |F|NRI|  Type   |
      +---------------+
	*/
	if(*((uint8_t*)in_data) & 0x80){
		TSK_DEBUG_WARN("F=1");
		/* reset accumulator */
		h264->decoder.accumulator_pos = 0;
		return 0;
	}

	/* get payload */
	if((ret = tdav_codec_h264_get_pay(in_data, in_size, (const void**)&pay_ptr, &pay_size, &append_scp, &end_of_unit)) || !pay_ptr || !pay_size){
		TSK_DEBUG_ERROR("Depayloader failed to get H.264 content");
		return 0;
	}
	//append_scp = tsk_true;
	size_to_copy = pay_size + (append_scp ? start_code_prefix_size : 0);
	// whether it's SPS or PPS (append_scp is false for subsequent FUA chuncks)
	sps_or_pps = append_scp && pay_ptr && ((pay_ptr[0] & 0x1F) == 7 || (pay_ptr[0] & 0x1F) == 8);
	
	// start-accumulator
	if(!h264->decoder.accumulator){
		if(size_to_copy > xmax_size){
			TSK_DEBUG_ERROR("%u too big to contain valid encoded data. xmax_size=%u", size_to_copy, xmax_size);
			return 0;
		}
		if(!(h264->decoder.accumulator = tsk_calloc(size_to_copy, sizeof(uint8_t)))){
			TSK_DEBUG_ERROR("Failed to allocated new buffer");
			return 0;
		}
		h264->decoder.accumulator_size = size_to_copy;
	}
	if((h264->decoder.accumulator_pos + size_to_copy) >= xmax_size){
		TSK_DEBUG_ERROR("BufferOverflow");
		h264->decoder.accumulator_pos = 0;
		return 0;
	}
	if((h264->decoder.accumulator_pos + size_to_copy) > h264->decoder.accumulator_size){
		if(!(h264->decoder.accumulator = tsk_realloc(h264->decoder.accumulator, (h264->decoder.accumulator_pos + size_to_copy)))){
			TSK_DEBUG_ERROR("Failed to reallocated new buffer");
			h264->decoder.accumulator_pos = 0;
			h264->decoder.accumulator_size = 0;
			return 0;
		}
		h264->decoder.accumulator_size = (h264->decoder.accumulator_pos + size_to_copy);
	}

	if(append_scp){
		memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], H264_START_CODE_PREFIX, start_code_prefix_size);
		h264->decoder.accumulator_pos += start_code_prefix_size;
	}
	memcpy(&((uint8_t*)h264->decoder.accumulator)[h264->decoder.accumulator_pos], pay_ptr, pay_size);
	h264->decoder.accumulator_pos += pay_size;
	// end-accumulator

	if(sps_or_pps){
		// http://libav-users.943685.n4.nabble.com/Decode-H264-streams-how-to-fill-AVCodecContext-from-SPS-PPS-td2484472.html
		// SPS and PPS should be bundled with IDR
		TSK_DEBUG_INFO("Receiving SPS or PPS ...to be tied to an IDR");
	}
	else if(rtp_hdr->marker){
		if(h264->decoder.passthrough){
			if(*out_max_size < h264->decoder.accumulator_pos){
				if((*out_data = tsk_realloc(*out_data, h264->decoder.accumulator_pos))){
					*out_max_size = h264->decoder.accumulator_pos;
				}
				else{
					*out_max_size = 0;
					return 0;
				}
			}
			memcpy(*out_data, h264->decoder.accumulator, h264->decoder.accumulator_pos);
			retsize = h264->decoder.accumulator_pos;
		}
		else { // !h264->decoder.passthrough
#if HAVE_FFMPEG
			AVPacket packet;

			/* decode the picture */
			av_init_packet(&packet);
			packet.dts = packet.pts = AV_NOPTS_VALUE;
			packet.size = (int)h264->decoder.accumulator_pos;
			packet.data = h264->decoder.accumulator;
			ret = avcodec_decode_video2(h264->decoder.context, h264->decoder.picture, &got_picture_ptr, &packet);

			if(ret <0){
				TSK_DEBUG_INFO("Failed to decode the buffer with error code =%d, size=%u, append=%s", ret, h264->decoder.accumulator_pos, append_scp ? "yes" : "no");
				if(TMEDIA_CODEC_VIDEO(self)->in.callback){
					TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error;
					TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
					TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
				}
			}
			else if(got_picture_ptr){
				tsk_size_t xsize;
			
				/* IDR ? */
				if(((pay_ptr[0] & 0x1F) == 0x05) && TMEDIA_CODEC_VIDEO(self)->in.callback){
					TSK_DEBUG_INFO("Decoded H.264 IDR");
					TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_idr;
					TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
					TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
				}
				/* fill out */
				xsize = avpicture_get_size(h264->decoder.context->pix_fmt, h264->decoder.context->width, h264->decoder.context->height);
				if(*out_max_size<xsize){
					if((*out_data = tsk_realloc(*out_data, (xsize + FF_INPUT_BUFFER_PADDING_SIZE)))){
						*out_max_size = xsize;
					}
					else{
						*out_max_size = 0;
						return 0;
					}
				}
				retsize = xsize;
				TMEDIA_CODEC_VIDEO(h264)->in.width = h264->decoder.context->width;
				TMEDIA_CODEC_VIDEO(h264)->in.height = h264->decoder.context->height;
				avpicture_layout((AVPicture *)h264->decoder.picture, h264->decoder.context->pix_fmt, (int)h264->decoder.context->width, (int)h264->decoder.context->height,
						*out_data, (int)retsize);
			}
#endif /* HAVE_FFMPEG */
		} // else(h264->decoder.passthrough)

		h264->decoder.accumulator_pos = 0;
	} // else if(rtp_hdr->marker)

	return retsize;
}
Esempio n. 17
0
QList<QbPacket> VideoStream::readPackets(AVPacket *packet)
{
    QList<QbPacket> packets;

    if (!this->isValid())
        return packets;

    AVFrame iFrame;
    avcodec_get_frame_defaults(&iFrame);

    int gotFrame;

    avcodec_decode_video2(this->codecContext(),
                          &iFrame,
                          &gotFrame,
                          packet);

    if (!gotFrame)
        return packets;

    int frameSize = avpicture_get_size(this->codecContext()->pix_fmt,
                                       this->codecContext()->width,
                                       this->codecContext()->height);

    QSharedPointer<uchar> oBuffer(new uchar[frameSize]);

    if (!oBuffer)
        return packets;

    static bool sync;

    if (this->m_fst)
    {
        sync = av_frame_get_best_effort_timestamp(&iFrame)? false: true;
        this->m_pts = 0;
        this->m_duration = this->fps().invert().value() * this->timeBase().invert().value();
        this->m_fst = false;
    }
    else
        this->m_pts += this->m_duration;

    avpicture_layout((AVPicture *) &iFrame,
                     this->codecContext()->pix_fmt,
                     this->codecContext()->width,
                     this->codecContext()->height,
                     (uint8_t *) oBuffer.data(),
                     frameSize);

    QbCaps caps = this->caps();
    caps.setProperty("sync", sync);

    QbPacket oPacket(caps,
                     oBuffer,
                     frameSize);

    oPacket.setPts(this->m_pts);
    oPacket.setDuration(this->m_duration);
    oPacket.setTimeBase(this->timeBase());
    oPacket.setIndex(this->index());

    packets << oPacket;

    return packets;
}
Esempio n. 18
0
int main(int argc,char ** argv)
{
        int  write_fd,ret,videoStream;
        AVFormatContext * formatContext=NULL;
        AVCodec * codec;
        AVCodecContext * codecContext;
        AVFrame * decodedFrame;
        AVPacket packet;
        uint8_t *decodedBuffer;
        unsigned int decodedBufferSize;
        int finishedFrame;

      
    //初始化环境
        av_register_all();

        write_fd = open(OUTPUT_FILE_NAME,O_RDWR | O_CREAT,0666);
        if(write_fd<0){
                perror("open");
                exit(1);
        }
        printf("input ");

        ret = avformat_open_input(&formatContext, INPUT_FILE_NAME, NULL,NULL);
        if(ret<0)
                error_handle("avformat_open_input error");

        ret = avformat_find_stream_info(formatContext,NULL);
        if(ret<0)
                error_handle("av_find_stream_info");

    //打印输入文件的详细信息
    av_dump_format(formatContext,0,INPUT_FILE_NAME,0);


        videoStream = 0;
        codecContext = formatContext->streams[videoStream]->codec;

        codec = avcodec_find_decoder(AV_CODEC_ID_H264);
        if(codec == NULL)
                error_handle("avcodec_find_decoder error!\n");

        ret = avcodec_open2(codecContext,codec,NULL);
        if(ret<0)
                error_handle("avcodec_open2");

    //分配保存视频帧的空间
        decodedFrame = avcodec_alloc_frame();
        if(!decodedFrame)
                error_handle("avcodec_alloc_frame!");

    //分配解码后视频帧的空间
        decodedBufferSize = avpicture_get_size(DECODED_OUTPUT_FORMAT,IMAGE_WIDTH,IMAGE_HEIGHT);
        decodedBuffer = (uint8_t *)malloc(decodedBufferSize);
        if(!decodedBuffer)
                error_handle("malloc decodedBuffer error!");

        av_init_packet(&packet);
		int count=0;
        while(av_read_frame(formatContext,&packet)>=0){
                        ret = avcodec_decode_video2(codecContext,decodedFrame,&finishedFrame,&packet);
                        if(ret<0)
                                error_handle("avcodec_decode_video2 error!");
                        if(finishedFrame){
								count ++;							
				
                                avpicture_layout((AVPicture*)decodedFrame,DECODED_OUTPUT_FORMAT,IMAGE_WIDTH,IMAGE_HEIGHT,decodedBuffer,decodedBufferSize);
                                ret = write(write_fd,decodedBuffer,decodedBufferSize);
                                if(ret<0)
                                        error_handle("write yuv stream error!");
								
                        }					

                av_free_packet(&packet);
				#if DEC_ONE_YUV
					if(count ==1 )
					{
						printf("decode one yuv\n ");
				    	break;
					}
				#endif
        }

		#if DEC_ONE_YUV
		#else
    /*防止视频解码完毕后丢帧的情况*/
        while(1){
                packet.data = NULL;
                packet.size = 0;
                ret = avcodec_decode_video2(codecContext,decodedFrame,&finishedFrame,&packet);
                if(ret<=0 && (finishedFrame<=0))
                        break;
                if(finishedFrame){
                        avpicture_layout((AVPicture*)decodedFrame,DECODED_OUTPUT_FORMAT,IMAGE_WIDTH,IMAGE_HEIGHT,decodedBuffer,decodedBufferSize);
                        ret = write(write_fd,decodedBuffer,decodedBufferSize);
                        if(ret<0)
                                error_handle("write yuv stream error!");
                }

                av_free_packet(&packet);
        }
		#endif


        avformat_close_input(&formatContext);
        free(decodedBuffer);
        av_free(decodedFrame);
        avcodec_close(codecContext);

        return 0;
}
Esempio n. 19
0
QbPacket VideoStream::convert(AVFrame *iFrame)
{
    AVPicture *oPicture;
    AVPixelFormat oFormat;
    bool delFrame = false;

    if (outputFormats->contains(AVPixelFormat(iFrame->format))) {
        oPicture = (AVPicture *) iFrame;
        oFormat = AVPixelFormat(iFrame->format);
    }
    else {
        oPicture = new AVPicture;
        oFormat = AV_PIX_FMT_BGRA;

        avpicture_alloc(oPicture,
                        oFormat,
                        iFrame->width,
                        iFrame->height);

        this->m_scaleContext = sws_getCachedContext(this->m_scaleContext,
                                                    iFrame->width,
                                                    iFrame->height,
                                                    AVPixelFormat(iFrame->format),
                                                    iFrame->width,
                                                    iFrame->height,
                                                    oFormat,
                                                    SWS_FAST_BILINEAR,
                                                    NULL,
                                                    NULL,
                                                    NULL);

        sws_scale(this->m_scaleContext,
                  (uint8_t **) iFrame->data,
                  iFrame->linesize,
                  0,
                  iFrame->height,
                  oPicture->data,
                  oPicture->linesize);

        delFrame = true;
    }

    QbVideoPacket packet;
    packet.caps().isValid() = true;
    packet.caps().format() = outputFormats->value(oFormat);
    packet.caps().width() = iFrame->width;
    packet.caps().height() = iFrame->height;
    packet.caps().fps() = this->fps();

    int frameSize = avpicture_get_size(oFormat,
                                       iFrame->width,
                                       iFrame->height);

    QbBufferPtr oBuffer(new char[frameSize]);

    avpicture_layout(oPicture,
                     oFormat,
                     iFrame->width,
                     iFrame->height,
                     (uint8_t *) oBuffer.data(),
                     frameSize);

    packet.buffer() = oBuffer;
    packet.bufferSize() = frameSize;
    packet.pts() = av_frame_get_best_effort_timestamp(iFrame);
    packet.timeBase() = this->timeBase();
    packet.index() = this->index();
    packet.id() = this->id();

    if (delFrame) {
        avpicture_free(oPicture);
        delete oPicture;
    }

    return packet.toPacket();
}
Esempio n. 20
0
void H264CubemapSource::getNextCubemapLoop()
{
    int64_t lastPTS = 0;
    boost::chrono::system_clock::time_point lastDisplayTime(boost::chrono::microseconds(0));
    
    while (true)
    {
        size_t pendingCubemaps;
        int frameSeqNum;
        // Get frames with the oldest frame seq # and remove the associated bucket
        std::vector<AVFrame*> frames;
        {
            boost::mutex::scoped_lock lock(frameMapMutex);
            
            if (frameMap.size() < 30)
            {
                frameMapCondition.wait(lock);
            }
            pendingCubemaps = frameMap.size();
            
            auto it = frameMap.begin();
            frameSeqNum = it->first;
            lastFrameSeqNum = frameSeqNum;
            frames = it->second;
            frameMap.erase(it);
        }
        
        StereoCubemap* cubemap;
        
        // Allocate cubemap if necessary
        if (!oldCubemap)
        {
            int width, height;
            for (AVFrame* frame : frames)
            {
                if (frame)
                {
                    width  = frame->width;
                    height = frame->height;
                    break;
                }
            }
            
            std::vector<Cubemap*> eyes;
            for (int j = 0, faceIndex = 0; j < StereoCubemap::MAX_EYES_COUNT && faceIndex < sinks.size(); j++)
            {
                std::vector<CubemapFace*> faces;
                for (int i = 0; i < Cubemap::MAX_FACES_COUNT && faceIndex < sinks.size(); i++, faceIndex++)
                {
                    Frame* content = Frame::create(width,
                                                   height,
                                                   format,
                                                   boost::chrono::system_clock::time_point(),
                                                   heapAllocator);
                    
                    CubemapFace* face = CubemapFace::create(content,
                                                            i,
                                                            heapAllocator);
                    
                    faces.push_back(face);
                }
                
                eyes.push_back(Cubemap::create(faces, heapAllocator));
            }
            
            cubemap = StereoCubemap::create(eyes, heapAllocator);
        }
        else
        {
            cubemap = oldCubemap;
        }
        
        size_t count = 0;
        // Fill cubemap making sure stereo pairs match
        for (int i = 0; i < (std::min)(frames.size(), (size_t)CUBEMAP_MAX_FACES_COUNT); i++)
        {
            AVFrame*     leftFrame = frames[i];
            CubemapFace* leftFace  = cubemap->getEye(0)->getFace(i, true);
            
            AVFrame*     rightFrame = nullptr;
            CubemapFace* rightFace  = nullptr;
            
            if (frames.size() > i + CUBEMAP_MAX_FACES_COUNT)
            {
                rightFrame = frames[i+CUBEMAP_MAX_FACES_COUNT];
                rightFace  = cubemap->getEye(1)->getFace(i, true);
            }
            
            if (matchStereoPairs && frames.size() > i + CUBEMAP_MAX_FACES_COUNT)
            {
                // check if matched
                if (!leftFrame || !rightFrame)
                {
                    // if they don't match give them back and forget about them
                    sinks[i]->returnFrame(leftFrame);
                    sinks[i + CUBEMAP_MAX_FACES_COUNT]->returnFrame(rightFrame);
                    leftFrame  = nullptr;
                    rightFrame = nullptr;
                }
            }
            
            // Fill the cubemapfaces with pixels if pixels are available
            if (leftFrame)
            {
                count++;
                leftFace->setNewFaceFlag(true);
                avpicture_layout((AVPicture*)leftFrame, (AVPixelFormat)leftFrame->format,
                                 leftFrame->width, leftFrame->height,
                                 (unsigned char*)leftFace->getContent()->getPixels(), leftFace->getContent()->getWidth() * leftFace->getContent()->getHeight() * 4);
                sinks[i]->returnFrame(leftFrame);
                if (onScheduledFrameInCubemap) onScheduledFrameInCubemap(this, i);
            }
            else
            {
                leftFace->setNewFaceFlag(false);
            }
            
            if (rightFrame)
            {
                count++;
                rightFace->setNewFaceFlag(true);
                avpicture_layout((AVPicture*)rightFrame, (AVPixelFormat)rightFrame->format,
                                 rightFrame->width, rightFrame->height,
                                 (unsigned char*)rightFace->getContent()->getPixels(), rightFace->getContent()->getWidth() * rightFace->getContent()->getHeight() * 4);
                sinks[i + CUBEMAP_MAX_FACES_COUNT]->returnFrame(rightFrame);
                if (onScheduledFrameInCubemap) onScheduledFrameInCubemap(this, i+CUBEMAP_MAX_FACES_COUNT);
            }
            else if (rightFace)
            {
                rightFace->setNewFaceFlag(false);
            }
        }
        
        // Give it to the user of this library (AlloPlayer etc.)
        if (onNextCubemap)
        {
            // calculate PTS for the cubemap (median of the individual faces' PTS)
            boost::accumulators::accumulator_set<int64_t, boost::accumulators::features<boost::accumulators::tag::median> > acc;
            for (AVFrame* frame : frames)
            {
                if (frame)
                {
                    acc(frame->pts);
                }
            }
            int64_t pts = boost::accumulators::median(acc);
            
            // Calculate time interval from until this cubemap should be displayed
            if (lastPTS == 0)
            {
                lastPTS = pts;
            }
            
            if (lastDisplayTime.time_since_epoch().count() == 0)
            {
                lastDisplayTime = boost::chrono::system_clock::now();
                lastDisplayTime += boost::chrono::seconds(5);
            }
            uint64_t ptsDiff = pts - lastPTS;
            lastDisplayTime += boost::chrono::microseconds(ptsDiff);
            lastPTS = pts;
            
            boost::chrono::milliseconds sleepDuration = boost::chrono::duration_cast<boost::chrono::milliseconds>(lastDisplayTime - boost::chrono::system_clock::now());
            
            // Wait until frame should be displayed
            //boost::this_thread::sleep_for(sleepDuration);
            
            // Display frame
            oldCubemap = onNextCubemap(this, cubemap);
		}
    }
}
Esempio n. 21
0
tsk_size_t tdav_codec_mp4ves_decode(tmedia_codec_t* _self, const void* in_data, tsk_size_t in_size, void** out_data, tsk_size_t* out_max_size, const tsk_object_t* proto_hdr)
{ 
	tdav_codec_mp4ves_t* self = (tdav_codec_mp4ves_t*)_self;
	const trtp_rtp_header_t* rtp_hdr = proto_hdr;

	tsk_size_t xsize, retsize = 0;
	int got_picture_ptr;
	int ret;

	if(!self || !in_data || !in_size || !out_data || !self->decoder.context){
		TSK_DEBUG_ERROR("Invalid parameter");
		return 0;
	}

	// get expected size
	xsize = avpicture_get_size(self->decoder.context->pix_fmt, self->decoder.context->width, self->decoder.context->height);

	/* Packet lost? */
	if(self->decoder.last_seq != (rtp_hdr->seq_num - 1) && self->decoder.last_seq){
		if(self->decoder.last_seq == rtp_hdr->seq_num){
			// Could happen on some stupid emulators
			TSK_DEBUG_INFO("Packet duplicated, seq_num=%d", rtp_hdr->seq_num);
			return 0;
		}
		TSK_DEBUG_INFO("Packet lost, seq_num=%d", rtp_hdr->seq_num);
	}
	self->decoder.last_seq = rtp_hdr->seq_num;

	if((self->decoder.accumulator_pos + in_size) <= xsize){
		memcpy(&((uint8_t*)self->decoder.accumulator)[self->decoder.accumulator_pos], in_data, in_size);
		self->decoder.accumulator_pos += in_size;
	}
	else{
		TSK_DEBUG_WARN("Buffer overflow");
		self->decoder.accumulator_pos = 0;
		return 0;
	}

	if(rtp_hdr->marker){
		AVPacket packet;
		/* allocate destination buffer */
		if(*out_max_size <xsize){
			if(!(*out_data = tsk_realloc(*out_data, xsize))){
				TSK_DEBUG_ERROR("Failed to allocate new buffer");
				self->decoder.accumulator_pos = 0;
				*out_max_size = 0;
				return 0;
			}
			*out_max_size = xsize;
		}		

		av_init_packet(&packet);
		packet.size = (int)self->decoder.accumulator_pos;
		packet.data = self->decoder.accumulator;
		ret = avcodec_decode_video2(self->decoder.context, self->decoder.picture, &got_picture_ptr, &packet);

		if(ret < 0){
			TSK_DEBUG_WARN("Failed to decode the buffer with error code = %d", ret);
			if(TMEDIA_CODEC_VIDEO(self)->in.callback){
				TMEDIA_CODEC_VIDEO(self)->in.result.type = tmedia_video_decode_result_type_error;
				TMEDIA_CODEC_VIDEO(self)->in.result.proto_hdr = proto_hdr;
				TMEDIA_CODEC_VIDEO(self)->in.callback(&TMEDIA_CODEC_VIDEO(self)->in.result);
			}
		}
		else if(got_picture_ptr){
			retsize = xsize;
			TMEDIA_CODEC_VIDEO(self)->in.width = self->decoder.context->width;
			TMEDIA_CODEC_VIDEO(self)->in.height = self->decoder.context->height;

			/* copy picture into a linear buffer */
			avpicture_layout((AVPicture *)self->decoder.picture, self->decoder.context->pix_fmt, (int)self->decoder.context->width, (int)self->decoder.context->height,
				*out_data, (int)retsize);
		}
		/* in all cases: reset accumulator */
		self->decoder.accumulator_pos = 0;		
	}

	return retsize;
}
Esempio n. 22
0
static int libdirac_encode_frame(AVCodecContext *avccontext,
                                 unsigned char *frame,
                                 int buf_size, void *data)
{
    int enc_size = 0;
    dirac_encoder_state_t state;
    FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data;
    FfmpegDiracSchroEncodedFrame* p_frame_output = NULL;
    FfmpegDiracSchroEncodedFrame* p_next_output_frame = NULL;
    int go = 1;

    if (data == NULL) {
        /* push end of sequence if not already signalled */
        if (!p_dirac_params->eos_signalled) {
            dirac_encoder_end_sequence( p_dirac_params->p_encoder );
            p_dirac_params->eos_signalled = 1;
        }
    } else {

        /* Allocate frame data to Dirac input buffer.
         * Input line size may differ from what the codec supports,
         * especially when transcoding from one format to another.
         * So use avpicture_layout to copy the frame. */
        avpicture_layout ((AVPicture *)data, avccontext->pix_fmt,
                          avccontext->width, avccontext->height,
                          p_dirac_params->p_in_frame_buf,
                          p_dirac_params->frame_size);

        /* load next frame */
        if (dirac_encoder_load (p_dirac_params->p_encoder,
                                p_dirac_params->p_in_frame_buf,
                                p_dirac_params->frame_size ) < 0) {
            av_log(avccontext, AV_LOG_ERROR, "Unrecoverable Encoder Error."
                   " dirac_encoder_load failed...\n");
            return -1;
        }
    }

    if (p_dirac_params->eos_pulled)
        go = 0;

    while(go) {
        p_dirac_params->p_encoder->enc_buf.buffer = frame;
        p_dirac_params->p_encoder->enc_buf.size   = buf_size;
        /* process frame */
        state = dirac_encoder_output ( p_dirac_params->p_encoder );

        switch (state)
        {
        case ENC_STATE_AVAIL:
        case ENC_STATE_EOS:
            assert (p_dirac_params->p_encoder->enc_buf.size > 0);
            /* create output frame */
            p_frame_output = av_mallocz(sizeof(FfmpegDiracSchroEncodedFrame));
            /* set output data */
            p_frame_output->p_encbuf =
                         av_malloc(p_dirac_params->p_encoder->enc_buf.size);

            memcpy(p_frame_output->p_encbuf,
                   p_dirac_params->p_encoder->enc_buf.buffer,
                   p_dirac_params->p_encoder->enc_buf.size);

            p_frame_output->size = p_dirac_params->p_encoder->enc_buf.size;

            p_frame_output->frame_num =
                            p_dirac_params->p_encoder->enc_pparams.pnum;

            if (p_dirac_params->p_encoder->enc_pparams.ptype == INTRA_PICTURE &&
                p_dirac_params->p_encoder->enc_pparams.rtype == REFERENCE_PICTURE)
                p_frame_output->key_frame = 1;

            ff_dirac_schro_queue_push_back (&p_dirac_params->enc_frame_queue,
                                            p_frame_output);

            if (state == ENC_STATE_EOS) {
                p_dirac_params->eos_pulled = 1;
                go = 0;
            }
            break;

        case ENC_STATE_BUFFER:
            go = 0;
            break;

        case ENC_STATE_INVALID:
            av_log(avccontext, AV_LOG_ERROR,
                   "Unrecoverable Dirac Encoder Error. Quitting...\n");
            return -1;

        default:
            av_log(avccontext, AV_LOG_ERROR, "Unknown Dirac Encoder state\n");
            return -1;
        }
    }

    /* copy 'next' frame in queue */
    p_next_output_frame =
          ff_dirac_schro_queue_pop(&p_dirac_params->enc_frame_queue);

    if (p_next_output_frame == NULL)
        return 0;

    memcpy(frame, p_next_output_frame->p_encbuf, p_next_output_frame->size);
    avccontext->coded_frame->key_frame = p_next_output_frame->key_frame;
    /* Use the frame number of the encoded frame as the pts. It is OK to do
     * so since Dirac is a constant framerate codec. It expects input to be
     * of constant framerate. */
    avccontext->coded_frame->pts = p_next_output_frame->frame_num;
    enc_size = p_next_output_frame->size;

    /* free frame */
    DiracFreeFrame(p_next_output_frame);

    return enc_size;
}
Esempio n. 23
0
int JPG_to_Pixel(const unsigned char *jpgBuff, int jpgSize, int pixelFmt, int pixelWidth, int pixelHeight, unsigned char *pixelBuff, int *pixelSize) {	
	AVFormatContext *formatContext;
	AVInputFormat *inputFormat;
	AVIOContext *ioContext;
	AVStream *stream;
	AVCodecContext *codecContext;
	AVCodec *codec;
	AVFrame *frame, *frame2;
	AVPacket packet;
	struct SwsContext *swsContext;
	int streamIndex;
	int gotFrame;
	int codecRet;
	int result = -1;

	av_register_all();
	formatContext = avformat_alloc_context();
	ioContext = avio_alloc_context((unsigned char *)jpgBuff, jpgSize, 0, NULL, NULL, NULL, NULL);
	inputFormat = av_find_input_format("mjpeg");
	av_probe_input_buffer2(ioContext, &inputFormat, NULL, NULL, 0, 0);
	formatContext->pb = ioContext;
	formatContext->iformat = inputFormat;
	avformat_open_input(&formatContext, NULL, NULL, NULL);
	av_find_stream_info(formatContext);

	av_init_packet(&packet);
	for (streamIndex = 0; streamIndex < formatContext->nb_streams; streamIndex++) {
		av_read_frame(formatContext, &packet);
		if (formatContext->streams[streamIndex]->codec->codec_type == AVMEDIA_TYPE_VIDEO && 0 < packet.size) {
			stream = formatContext->streams[streamIndex];
			codecContext = stream->codec;
			codec = avcodec_find_decoder(codecContext->codec_id);
			avcodec_open2(codecContext, codec, NULL);
			frame = avcodec_alloc_frame();
			codecRet = avcodec_decode_video2(codecContext, frame, &gotFrame, &packet);
			if (0 <= codecRet && 1 == gotFrame) {
				frame2 = av_frame_clone(frame);
				frame2->format = PF(pixelFmt);
				swsContext = sws_getContext(codecContext->width, codecContext->height, codecContext->pix_fmt, pixelWidth, pixelHeight, (AVPixelFormat)frame2->format, SWS_BICUBIC, NULL, NULL, NULL);   
				sws_scale(swsContext, (const uint8_t *const *)frame->data, frame->linesize, 0, codecContext->height, frame2->data, frame2->linesize);  
				sws_freeContext(swsContext);

				*pixelSize = avpicture_layout((const AVPicture *)frame2, (enum AVPixelFormat)frame2->format, pixelWidth, pixelHeight, pixelBuff, *pixelSize);
				result = *pixelSize;

				av_frame_free(&frame2);
			}	
			if (1 == codecContext->refcounted_frames) av_frame_unref(frame); 
			avcodec_free_frame(&frame);
			avcodec_close(codecContext);
		}
		av_free_packet(&packet);
		if (-1 != result)
			break;
	}

	avformat_close_input(&formatContext);
	av_free(ioContext->buffer);
	av_free(ioContext);
	avformat_free_context(formatContext);
	return result;
}