コード例 #1
0
ファイル: idct.c プロジェクト: Jsoucek/q3ce
int IDCTBackup(idct* p,idctbackup* Backup)
{
	int No;
	planes Planes;
	video Format;

	memset(Backup,0,sizeof(idctbackup));

	if (p->Get(p,IDCT_FORMAT,&Backup->Format,sizeof(video))!=ERR_NONE || !Backup->Format.Pixel.Flags ||
		p->Get(p,IDCT_BUFFERWIDTH,&Backup->Width,sizeof(int))!=ERR_NONE ||
		p->Get(p,IDCT_BUFFERHEIGHT,&Backup->Height,sizeof(int))!=ERR_NONE ||
		p->Get(p,IDCT_BUFFERCOUNT,&Backup->Count,sizeof(int))!=ERR_NONE ||
		p->Get(p,IDCT_SHOW,&Backup->Show,sizeof(int))!=ERR_NONE)
		return ERR_INVALID_DATA;

	for (No=0;No<Backup->Count;++No)
	{
		idctbufferbackup* Buffer = Backup->Buffer+No;

		Buffer->FrameNo = -1;
		p->Get(p,IDCT_FRAMENO+No,&Buffer->FrameNo,sizeof(int));

		if (p->Lock(p,No,Planes,&Buffer->Brightness,&Format) == ERR_NONE)
		{
			Buffer->Format = Format;
			DefaultPitch(&Buffer->Format);
			if (SurfaceAlloc(Buffer->Buffer,&Buffer->Format) == ERR_NONE)
				SurfaceCopy(&Format,&Buffer->Format,Planes,Buffer->Buffer,NULL);
			p->Unlock(p,No);
		}
	}

	p->Set(p,IDCT_FORMAT,NULL,0);
	return ERR_NONE;
}
コード例 #2
0
ファイル: tiff.c プロジェクト: BigHNF/tcpmp-revive
static int UpdateInput(tiff* p)
{
	p->ErrorShowed = 0;
	BufferClear(&p->Buffer);
	if (p->Codec.In.Format.Type == PACKET_VIDEO)
	{
		PacketFormatCopy(&p->Codec.Out.Format,&p->Codec.In.Format);
		p->Codec.Out.Format.Format.Video.Pixel.Flags = PF_RGB;
		p->Codec.Out.Format.Format.Video.Pixel.BitCount = 24;
		p->Codec.Out.Format.Format.Video.Pixel.BitMask[0] = 0xFF;
		p->Codec.Out.Format.Format.Video.Pixel.BitMask[1] = 0xFF00;
		p->Codec.Out.Format.Format.Video.Pixel.BitMask[2] = 0xFF0000;
		DefaultPitch(&p->Codec.Out.Format.Format.Video);
	}
	return ERR_NONE;
}
コード例 #3
0
ファイル: ffmpegv.c プロジェクト: hhool/tcpmp-android
static bool_t BuildOutputFormat(ffmpeg_video* p)
{
	int pix_fmt = p->Context->pix_fmt;
	if (pix_fmt<0)
		pix_fmt = PIX_FMT_YUV420P; // is this needed?

	PacketFormatClear(&p->Codec.Out.Format);
	p->Codec.Out.Format.Type = PACKET_VIDEO;

	switch (pix_fmt)
	{
	case PIX_FMT_YUV410P:
		p->Codec.Out.Format.Format.Video.Pixel.Flags = PF_YUV410;
		break;
	case PIX_FMT_YUV420P:
		p->Codec.Out.Format.Format.Video.Pixel.Flags = PF_YUV420;
		break;
	case PIX_FMT_BGR24:
		DefaultRGB(&p->Codec.Out.Format.Format.Video.Pixel,24,8,8,8,0,0,0);
		break;;
	case PIX_FMT_RGB32:
		DefaultRGB(&p->Codec.Out.Format.Format.Video.Pixel,32,8,8,8,0,0,0);
		break;
	case PIX_FMT_RGB555:
		DefaultRGB(&p->Codec.Out.Format.Format.Video.Pixel,16,5,5,5,0,0,0);
		break;
	case PIX_FMT_PAL8:
		p->Codec.Out.Format.Format.Video.Pixel.Flags = PF_PALETTE;
		p->Codec.Out.Format.Format.Video.Pixel.BitCount = 8;
		p->Codec.Out.Format.Format.Video.Pixel.Palette = p->Codec.In.Format.Format.Video.Pixel.Palette;
		break;
	default:
		return 0;
	}

	p->Aspect = p->Context->sample_aspect_ratio;
	p->Codec.Out.Format.Format.Video.Aspect = p->Codec.In.Format.Format.Video.Aspect;
	if (p->Context->sample_aspect_ratio.num>0 && !p->Codec.Out.Format.Format.Video.Aspect)
		p->Codec.Out.Format.Format.Video.Aspect = Scale(ASPECT_ONE,p->Context->sample_aspect_ratio.num,p->Context->sample_aspect_ratio.den);

	p->Codec.In.Format.Format.Video.Width = p->Codec.Out.Format.Format.Video.Width = p->Context->width;
	p->Codec.In.Format.Format.Video.Height = p->Codec.Out.Format.Format.Video.Height = p->Context->height;
	
	if (p->Picture->linesize[0])
		p->Codec.Out.Format.Format.Video.Pitch = p->Picture->linesize[0];
	else
		DefaultPitch(&p->Codec.Out.Format.Format.Video);

	p->PixelFormat = p->Context->pix_fmt;

	if (p->Context->bit_rate > 0 && p->Context->bit_rate < 100000000)
		p->Codec.In.Format.ByteRate = p->Context->bit_rate/8;
	if (p->Context->time_base.num > 0)
	{
		p->Codec.In.Format.PacketRate.Num = p->Context->time_base.den;
		p->Codec.In.Format.PacketRate.Den = p->Context->time_base.num;
		p->FrameTime = Scale(TICKSPERSEC,p->Codec.In.Format.PacketRate.Den,p->Codec.In.Format.PacketRate.Num);
	}
	else
		p->FrameTime = TIME_UNKNOWN;

	//ShowMessage("","%d %d %d",p->Context->pix_fmt,p->Context->width,p->Context->height);
	return 1;
}