예제 #1
0
static int dec_h264(struct vidcodec_st *st, struct vidframe *frame,
		    bool eof, struct mbuf *src)
{
	int err;

	if (!src)
		return 0;

	err = h264_decode(st, src);
	if (err)
		return err;

	return ffdecode(st, frame, eof, src);
}
예제 #2
0
파일: decode.c 프로젝트: QXIP/baresip
int decode_h264(struct viddec_state *st, struct vidframe *frame,
		bool eof, uint16_t seq, struct mbuf *src)
{
	int err;

	(void)seq;

	if (!src)
		return 0;

	err = h264_decode(st, src);
	if (err)
		return err;

	return ffdecode(st, frame, eof, src);
}
예제 #3
0
/*
 * decode video stream ( from raw_frame to frame buffer (yuyv format))
 * args:
 *    vd - pointer to device data
 *    frame - pointer to frame buffer
 *
 * asserts:
 *    vd is not null
 *
 * returns: error code ( 0 - E_OK)
*/
int decode_v4l2_frame(v4l2_dev_t *vd, v4l2_frame_buff_t *frame)
{
	/*asserts*/
	assert(vd != NULL);

	if(!frame->raw_frame || frame->raw_frame_size == 0)
	{
		fprintf(stderr, "V4L2_CORE: not decoding empty raw frame (frame of size %i at 0x%p)\n", (int) frame->raw_frame_size, frame->raw_frame);
		return E_DECODE_ERR;
	}

	if(verbosity > 3)
		printf("V4L2_CORE: decoding raw frame of size %i at 0x%p\n",
			(int) frame->raw_frame_size, frame->raw_frame );

	int ret = E_OK;

	int width = vd->format.fmt.pix.width;
	int height = vd->format.fmt.pix.height;

	frame->isKeyframe = 0; /*reset*/

	/*
	 * use the requested format since it may differ
	 * from format.fmt.pix.pixelformat (muxed H264)
	 */
	int format = vd->requested_fmt;

	int framesizeIn =(width * height << 1);//2 bytes per pixel
	switch (format)
	{
		case V4L2_PIX_FMT_H264:
			/*
			 * get the h264 frame in the tmp_buffer
			 */
			frame->h264_frame_size = demux_h264(
				frame->h264_frame,
				frame->raw_frame,
				frame->raw_frame_size,
				frame->h264_frame_max_size);

			/*
			 * store SPS and PPS info (usually the first two NALU)
			 * and check/store the last IDR frame
			 */
			store_extra_data(vd, frame);

			/*
			 * check for keyframe and store it
			 */
			frame->isKeyframe = is_h264_keyframe(vd, frame);

			//decode if we already have a IDR frame
			if(vd->h264_last_IDR_size > 0)
			{
				/*no need to convert output*/
				h264_decode(frame->yuv_frame, frame->h264_frame, frame->h264_frame_size);
			}
			break;

		case V4L2_PIX_FMT_JPEG:
		case V4L2_PIX_FMT_MJPEG:
			if(frame->raw_frame_size <= HEADERFRAME1)
			{
				// Prevent crash on empty image
				fprintf(stderr, "V4L2_CORE: (jpeg decoder) Ignoring empty buffer\n");
				ret = E_DECODE_ERR;
				return (ret);
			}

			ret = jpeg_decode(frame->yuv_frame, frame->raw_frame, frame->raw_frame_size);

			//memcpy(frame->tmp_buffer, frame->raw_frame, frame->raw_frame_size);
			//ret = jpeg_decode(&frame->yuv_frame, frame->tmp_buffer, width, height);
			//if ( ret < 0)
			//{
			//	fprintf(stderr, "V4L2_CORE: jpeg decoder exit with error (%i) (res: %ix%i - %x)\n", ret, width, height, vd->format.fmt.pix.pixelformat);
			//	return E_DECODE_ERR;
			//}
			if(verbosity > 3)
				fprintf(stderr, "V4L2_CORE: (jpeg decoder) decode frame of size %i\n", ret);
			ret = E_OK;
			break;

		case V4L2_PIX_FMT_UYVY:
			uyvy_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_VYUY:
			vyuy_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_YVYU:
			yvyu_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_YYUV:
			yyuv_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_YUV444:
			y444_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_YUV555:
			yuvo_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_YUV565:
			yuvp_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_YUV32:
			yuv4_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_YUV420:
			if(frame->raw_frame_size > (width * height * 3/2))
				frame->raw_frame_size = width * height * 3/2;
			memcpy(frame->yuv_frame, frame->raw_frame, frame->raw_frame_size);
			break;

		case V4L2_PIX_FMT_YUV422P:
			yuv422p_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_YVU420:
			yv12_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_NV12:
			nv12_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_NV21:
			nv21_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_NV16:
			nv16_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_NV61:
			nv61_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_NV24:
			nv24_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

	case V4L2_PIX_FMT_NV42:
			nv42_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_Y41P:
			y41p_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_GREY:
			grey_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_Y10BPACK:
			y10b_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

	    case V4L2_PIX_FMT_Y16:
			y16_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;
#ifdef V4L2_PIX_FMT_Y16_BE
		case V4L2_PIX_FMT_Y16_BE:
			y16x_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;
#endif
		case V4L2_PIX_FMT_SPCA501:
			s501_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_SPCA505:
			s505_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_SPCA508:
			s508_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_YUYV:
			if(vd->isbayer>0)
			{
				if (!(frame->tmp_buffer))
				{
					/* rgb buffer for decoding bayer data*/
					frame->tmp_buffer_max_size = width * height * 3;
					frame->tmp_buffer = calloc(frame->tmp_buffer_max_size, sizeof(uint8_t));
					if(frame->tmp_buffer == NULL)
					{
						fprintf(stderr, "V4L2_CORE: FATAL memory allocation failure (v4l2core_frame_decode): %s\n", strerror(errno));
						exit(-1);
					}
				}
				/*convert raw bayer to iyuv*/
				bayer_to_rgb24 (frame->raw_frame, frame->tmp_buffer, width, height, vd->bayer_pix_order);
				rgb24_to_yu12(frame->yuv_frame, frame->tmp_buffer, width, height);
			}
			else
				yuyv_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_SGBRG8: //0
			bayer_to_rgb24 (frame->raw_frame, frame->tmp_buffer, width, height, 0);
			rgb24_to_yu12(frame->yuv_frame, frame->tmp_buffer, width, height);
			break;

		case V4L2_PIX_FMT_SGRBG8: //1
			bayer_to_rgb24 (frame->raw_frame, frame->tmp_buffer, width, height, 1);
			rgb24_to_yu12(frame->yuv_frame, frame->tmp_buffer, width, height);
			break;

		case V4L2_PIX_FMT_SBGGR8: //2
			bayer_to_rgb24 (frame->raw_frame, frame->tmp_buffer, width, height, 2);
			rgb24_to_yu12(frame->yuv_frame, frame->tmp_buffer, width, height);
			break;
		case V4L2_PIX_FMT_SRGGB8: //3
			bayer_to_rgb24 (frame->raw_frame, frame->tmp_buffer, width, height, 3);
			rgb24_to_yu12(frame->yuv_frame, frame->tmp_buffer, width, height);
			break;

		case V4L2_PIX_FMT_RGB24:
			rgb24_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_BGR24:
			bgr24_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_RGB332:
			rgb1_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_RGB565:
			rgbp_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_RGB565X:
			rgbr_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_RGB444:
#ifdef V4L2_PIX_FMT_ARGB444
		case V4L2_PIX_FMT_ARGB444:
		case V4L2_PIX_FMT_XRGB444: //same as above but without alpha channel
#endif
			ar12_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_RGB555:
#ifdef V4L2_PIX_FMT_ARGB555
		case V4L2_PIX_FMT_ARGB555:
		case V4L2_PIX_FMT_XRGB555: //same as above but without alpha channel
#endif
			ar15_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_RGB555X:
#ifdef V4L2_PIX_FMT_ARGB4555X
		case V4L2_PIX_FMT_ARGB555X:
		case V4L2_PIX_FMT_XRGB555X: //same as above but without alpha channel
#endif
			ar15x_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_BGR666:
			bgrh_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_BGR32:
#ifdef V4L2_PIX_FMT_ABGR32
		case V4L2_PIX_FMT_ABGR32:
		case V4L2_PIX_FMT_XBGR32: //same as above but without alpha channel
#endif
			ar24_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		case V4L2_PIX_FMT_RGB32:
#ifdef V4L2_PIX_FMT_ARGB32
		case V4L2_PIX_FMT_ARGB32:
		case V4L2_PIX_FMT_XRGB32: //same as above but without alpha channel
#endif
			ba24_to_yu12(frame->yuv_frame, frame->raw_frame, width, height);
			break;

		default:
			fprintf(stderr, "V4L2_CORE: error decoding frame: unknown format: %i\n", format);
			ret = E_UNKNOWN_ERR;
			break;
	}

	return ret;
}