Example #1
0
/*
 * @brief Initializes the mesh shell image.
 */
static void R_InitShellImage() {
	r_image_state.shell = R_LoadImage("envmaps/envmap_3", IT_PROGRAM);
}
Example #2
0
/*
===============
R_CombineCubeImages_f

Used to combine animations of six separate tga files into
a serials of 6x taller tga files, for preparation to roq compress
===============
*/
void R_CombineCubeImages_f( const idCmdArgs &args ) {
	if ( args.Argc() != 2 ) {
		common->Printf( "usage: combineCubeImages <baseName>\n" );
		common->Printf( " combines basename[1-6][0001-9999].tga to basenameCM[0001-9999].tga\n" );
		common->Printf( " 1: forward 2:right 3:back 4:left 5:up 6:down\n" );
		return;
	}

	idStr	baseName = args.Argv( 1 );
	common->SetRefreshOnPrint( true );

	for ( int frameNum = 1 ; frameNum < 10000 ; frameNum++ ) {
		char	filename[MAX_IMAGE_NAME];
		byte	*pics[6];
		int		width = 0, height = 0;
		int		side;
		int		orderRemap[6] = { 1,3,4,2,5,6 };
		for ( side = 0 ; side < 6 ; side++ ) {
			sprintf( filename, "%s%i%04i.tga", baseName.c_str(), orderRemap[side], frameNum );

			common->Printf( "reading %s\n", filename );
			R_LoadImage( filename, &pics[side], &width, &height, NULL, true );

			if ( !pics[side] ) {
				common->Printf( "not found.\n" );
				break;
			}

			// convert from "camera" images to native cube map images
			switch( side ) {
			case 0:	// forward
				R_RotatePic( pics[side], width);
				break;
			case 1:	// back
				R_RotatePic( pics[side], width);
				R_HorizontalFlip( pics[side], width, height );
				R_VerticalFlip( pics[side], width, height );
				break;
			case 2:	// left
				R_VerticalFlip( pics[side], width, height );
				break;
			case 3:	// right
				R_HorizontalFlip( pics[side], width, height );
				break;
			case 4:	// up
				R_RotatePic( pics[side], width);
				break;
			case 5: // down
				R_RotatePic( pics[side], width);
				break;
			}
		}

		if ( side != 6 ) {
			for ( int i = 0 ; i < side ; side++ ) {
				Mem_Free( pics[side] );
			}
			break;
		}

		idTempArray<byte> buf( width*height*6*4 );
		byte	*combined = (byte *)buf.Ptr();
		for (  side = 0 ; side < 6 ; side++ ) {
			memcpy( combined+width*height*4*side, pics[side], width*height*4 );
			Mem_Free( pics[side] );
		}
		sprintf( filename, "%sCM%04i.tga", baseName.c_str(), frameNum );

		common->Printf( "writing %s\n", filename );
		R_WriteTGA( filename, combined, width, height*6 );
	}
	common->SetRefreshOnPrint( false );
}
Example #3
0
/*
===================
R_ParseImageProgram_r

If pic is NULL, the timestamps will be filled in, but no image will be generated
If both pic and timestamps are NULL, it will just advance past it, which can be
used to parse an image program from a text stream.
===================
*/
static bool R_ParseImageProgram_r( idLexer& src, byte** pic, int* width, int* height,
								   ID_TIME_T* timestamps, textureUsage_t* usage )
{
	idToken		token;
	float		scale;
	ID_TIME_T		timestamp;
	
	src.ReadToken( &token );
	
	// Since all interaction shaders now assume YCoCG diffuse textures.  We replace all entries for the intrinsic
	// _black texture to the black texture on disk.  Doing this will cause a YCoCG compliant texture to be generated.
	// Without a YCoCG compliant black texture we will get color artifacts for any interaction
	// material that specifies the _black texture.
	if( token == "_black" )
	{
		token = "textures/black";
	}
	
	// also check for _white
	if( token == "_white" )
	{
		token = "guis/assets/white";
	}
	
	AppendToken( token );
	
	if( !token.Icmp( "heightmap" ) )
	{
		MatchAndAppendToken( src, "(" );
		
		if( !R_ParseImageProgram_r( src, pic, width, height, timestamps, usage ) )
		{
			return false;
		}
		
		MatchAndAppendToken( src, "," );
		
		src.ReadToken( &token );
		AppendToken( token );
		scale = token.GetFloatValue();
		
		// process it
		if( pic )
		{
			R_HeightmapToNormalMap( *pic, *width, *height, scale );
			if( usage )
			{
				*usage = TD_BUMP;
			}
		}
		
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "addnormals" ) )
	{
		byte*	pic2 = NULL;
		int		width2, height2;
		
		MatchAndAppendToken( src, "(" );
		
		if( !R_ParseImageProgram_r( src, pic, width, height, timestamps, usage ) )
		{
			return false;
		}
		
		MatchAndAppendToken( src, "," );
		
		if( !R_ParseImageProgram_r( src, pic ? &pic2 : NULL, &width2, &height2, timestamps, usage ) )
		{
			if( pic )
			{
				R_StaticFree( *pic );
				*pic = NULL;
			}
			return false;
		}
		
		// process it
		if( pic )
		{
			R_AddNormalMaps( *pic, *width, *height, pic2, width2, height2 );
			R_StaticFree( pic2 );
			if( usage )
			{
				*usage = TD_BUMP;
			}
		}
		
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "smoothnormals" ) )
	{
		MatchAndAppendToken( src, "(" );
		
		if( !R_ParseImageProgram_r( src, pic, width, height, timestamps, usage ) )
		{
			return false;
		}
		
		if( pic )
		{
			R_SmoothNormalMap( *pic, *width, *height );
			if( usage )
			{
				*usage = TD_BUMP;
			}
		}
		
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "add" ) )
	{
		byte*	pic2 = NULL;
		int		width2, height2;
		
		MatchAndAppendToken( src, "(" );
		
		if( !R_ParseImageProgram_r( src, pic, width, height, timestamps, usage ) )
		{
			return false;
		}
		
		MatchAndAppendToken( src, "," );
		
		if( !R_ParseImageProgram_r( src, pic ? &pic2 : NULL, &width2, &height2, timestamps, usage ) )
		{
			if( pic )
			{
				R_StaticFree( *pic );
				*pic = NULL;
			}
			return false;
		}
		
		// process it
		if( pic )
		{
			R_ImageAdd( *pic, *width, *height, pic2, width2, height2 );
			R_StaticFree( pic2 );
		}
		
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "scale" ) )
	{
		float	scale[4];
		int		i;
		
		MatchAndAppendToken( src, "(" );
		
		R_ParseImageProgram_r( src, pic, width, height, timestamps, usage );
		
		for( i = 0 ; i < 4 ; i++ )
		{
			MatchAndAppendToken( src, "," );
			src.ReadToken( &token );
			AppendToken( token );
			scale[i] = token.GetFloatValue();
		}
		
		// process it
		if( pic )
		{
			R_ImageScale( *pic, *width, *height, scale );
		}
		
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "invertAlpha" ) )
	{
		MatchAndAppendToken( src, "(" );
		
		R_ParseImageProgram_r( src, pic, width, height, timestamps, usage );
		
		// process it
		if( pic )
		{
			R_InvertAlpha( *pic, *width, *height );
		}
		
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "invertColor" ) )
	{
		MatchAndAppendToken( src, "(" );
		
		R_ParseImageProgram_r( src, pic, width, height, timestamps, usage );
		
		// process it
		if( pic )
		{
			R_InvertColor( *pic, *width, *height );
		}
		
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "makeIntensity" ) )
	{
		int		i;
		
		MatchAndAppendToken( src, "(" );
		
		R_ParseImageProgram_r( src, pic, width, height, timestamps, usage );
		
		// copy red to green, blue, and alpha
		if( pic )
		{
			int		c;
			c = *width * *height * 4;
			for( i = 0 ; i < c ; i += 4 )
			{
				( *pic )[i + 1] =
					( *pic )[i + 2] =
						( *pic )[i + 3] = ( *pic )[i];
			}
		}
		
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "makeAlpha" ) )
	{
		int		i;
		
		MatchAndAppendToken( src, "(" );
		
		R_ParseImageProgram_r( src, pic, width, height, timestamps, usage );
		
		// average RGB into alpha, then set RGB to white
		if( pic )
		{
			int		c;
			c = *width * *height * 4;
			for( i = 0 ; i < c ; i += 4 )
			{
				( *pic )[i + 3] = ( ( *pic )[i + 0] + ( *pic )[i + 1] + ( *pic )[i + 2] ) / 3;
				( *pic )[i + 0] =
					( *pic )[i + 1] =
						( *pic )[i + 2] = 255;
			}
		}
		
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	// if we are just parsing instead of loading or checking,
	// don't do the R_LoadImage
	if( !timestamps && !pic )
	{
		return true;
	}
	
	// load it as an image
	R_LoadImage( token.c_str(), pic, width, height, &timestamp, true );
	
	if( timestamp == -1 )
	{
		return false;
	}
	
	// add this to the timestamp
	if( timestamps )
	{
		if( timestamp > *timestamps )
		{
			*timestamps = timestamp;
		}
	}
	
	return true;
}
Example #4
0
/*
===============
GLimp_SetMode
===============
*/
static int GLimp_SetMode(int mode, qboolean fullscreen, qboolean noborder)
{
	const char *glstring;
	int perChannelColorBits;
	int colorBits, depthBits, stencilBits;
	int samples;
	int i = 0;
	SDL_Surface *icon = NULL;
	Uint32 flags = SDL_WINDOW_SHOWN | SDL_WINDOW_OPENGL;
	SDL_DisplayMode desktopMode;
	int display = 0;
	int x = SDL_WINDOWPOS_UNDEFINED, y = SDL_WINDOWPOS_UNDEFINED;
	char windowTitle[128];
	textureLevel_t *iconPic;
	byte *pixelData;
	int bytesPerPixel, width, height;

	ri.Printf( PRINT_ALL, "Initializing OpenGL display\n");

	ri.Cvar_VariableStringBuffer("com_productName", windowTitle, sizeof (windowTitle));

	if ( r_allowResize->integer )
		flags |= SDL_WINDOW_RESIZABLE;

	iconPic = NULL;
	pixelData = NULL;
	bytesPerPixel = 4;

#ifdef USE_ICON
	// try to load 32 x 32 icon
	if ( r_forceWindowIcon32->integer ) {
		int numLevels;

		R_LoadImage( "windowicon32", &numLevels, &iconPic );

		if ( iconPic && ( iconPic[0].width != 32 || iconPic[0].height != 32 ) ) {
			ri.Free( iconPic );
			iconPic = NULL;
			ri.Printf( PRINT_WARNING, "Ignoring windowicon32: Image must be 32 x 32!\n");
		}
	} else {
		int numLevels;

		// try to load high resolution icon
		R_LoadImage( "windowicon", &numLevels, &iconPic );
	}

	if ( iconPic ) {
		pixelData = iconPic[0].data;
		width = iconPic[0].width;
		height = iconPic[0].height;
	} else {
		// fallback to default icon
		pixelData = (byte *)CLIENT_WINDOW_ICON.pixel_data;
		bytesPerPixel = CLIENT_WINDOW_ICON.bytes_per_pixel;
		width = CLIENT_WINDOW_ICON.width;
		height = CLIENT_WINDOW_ICON.height;
	}

	icon = SDL_CreateRGBSurfaceFrom(
			(void *)pixelData,
			width,
			height,
			bytesPerPixel * 8,
			bytesPerPixel * width,
#ifdef Q3_LITTLE_ENDIAN
			0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
#else
			0xFF000000, 0x00FF0000, 0x0000FF00, 0x000000FF
#endif
			);
#endif

	// If a window exists, note its display index
	if( SDL_window != NULL )
	{
		display = SDL_GetWindowDisplayIndex( SDL_window );
		if( display < 0 )
		{
			ri.Printf( PRINT_DEVELOPER, "SDL_GetWindowDisplayIndex() failed: %s\n", SDL_GetError() );
		}
	}

	if( display >= 0 && SDL_GetDesktopDisplayMode( display, &desktopMode ) == 0 )
	{
		glConfig.displayWidth = desktopMode.w;
		glConfig.displayHeight = desktopMode.h;
		glConfig.displayAspect = (float)desktopMode.w / (float)desktopMode.h;

		ri.Printf( PRINT_ALL, "Display aspect: %.3f\n", glConfig.displayAspect );
	}
	else
	{
		Com_Memset( &desktopMode, 0, sizeof( SDL_DisplayMode ) );

		glConfig.displayWidth = 640;
		glConfig.displayHeight = 480;
		glConfig.displayAspect = 1.333f;

		ri.Printf( PRINT_ALL,
				"Cannot determine display aspect, assuming 1.333\n" );
	}

	ri.Printf (PRINT_ALL, "...setting mode %d:", mode );

	if (mode == -2)
	{
		// use desktop video resolution
		if( desktopMode.h > 0 )
		{
			glConfig.vidWidth = desktopMode.w;
			glConfig.vidHeight = desktopMode.h;
		}
		else
		{
			glConfig.vidWidth = 640;
			glConfig.vidHeight = 480;
			ri.Printf( PRINT_ALL,
					"Cannot determine display resolution, assuming 640x480\n" );
		}

		glConfig.windowAspect = (float)glConfig.vidWidth / (float)glConfig.vidHeight;
	}
	else if ( !R_GetModeInfo( &glConfig.vidWidth, &glConfig.vidHeight, &glConfig.windowAspect, mode ) )
	{
		ri.Printf( PRINT_ALL, " invalid mode\n" );
		return RSERR_INVALID_MODE;
	}
	ri.Printf( PRINT_ALL, " %d %d\n", glConfig.vidWidth, glConfig.vidHeight);

	// Center window
	if( r_centerWindow->integer && !fullscreen )
	{
		x = ( desktopMode.w / 2 ) - ( glConfig.vidWidth / 2 );
		y = ( desktopMode.h / 2 ) - ( glConfig.vidHeight / 2 );
	}

	// Destroy existing state if it exists
	if( SDL_glContext != NULL )
	{
		SDL_GL_DeleteContext( SDL_glContext );
		SDL_glContext = NULL;
	}

	if( SDL_window != NULL )
	{
		SDL_GetWindowPosition( SDL_window, &x, &y );
		ri.Printf( PRINT_DEVELOPER, "Existing window at %dx%d before being destroyed\n", x, y );
		SDL_DestroyWindow( SDL_window );
		SDL_window = NULL;
	}

	if( fullscreen )
	{
		flags |= SDL_WINDOW_FULLSCREEN;
		glConfig.isFullscreen = qtrue;
	}
	else
	{
		if( noborder )
			flags |= SDL_WINDOW_BORDERLESS;

		glConfig.isFullscreen = qfalse;
	}

	colorBits = r_colorbits->value;
	if ((!colorBits) || (colorBits >= 32))
		colorBits = 24;

	if (!r_depthbits->value)
		depthBits = 24;
	else
		depthBits = r_depthbits->value;

	stencilBits = r_stencilbits->value;
	samples = r_ext_multisample->value;

	for (i = 0; i < 16; i++)
	{
		int testColorBits, testDepthBits, testStencilBits;

		// 0 - default
		// 1 - minus colorBits
		// 2 - minus depthBits
		// 3 - minus stencil
		if ((i % 4) == 0 && i)
		{
			// one pass, reduce
			switch (i / 4)
			{
				case 2 :
					if (colorBits == 24)
						colorBits = 16;
					break;
				case 1 :
					if (depthBits == 24)
						depthBits = 16;
					else if (depthBits == 16)
						depthBits = 8;
				case 3 :
					if (stencilBits == 24)
						stencilBits = 16;
					else if (stencilBits == 16)
						stencilBits = 8;
			}
		}

		testColorBits = colorBits;
		testDepthBits = depthBits;
		testStencilBits = stencilBits;

		if ((i % 4) == 3)
		{ // reduce colorBits
			if (testColorBits == 24)
				testColorBits = 16;
		}

		if ((i % 4) == 2)
		{ // reduce depthBits
			if (testDepthBits == 24)
				testDepthBits = 16;
			else if (testDepthBits == 16)
				testDepthBits = 8;
		}

		if ((i % 4) == 1)
		{ // reduce stencilBits
			if (testStencilBits == 24)
				testStencilBits = 16;
			else if (testStencilBits == 16)
				testStencilBits = 8;
			else
				testStencilBits = 0;
		}

		if (testColorBits == 24)
			perChannelColorBits = 8;
		else
			perChannelColorBits = 4;

#ifdef __sgi /* Fix for SGIs grabbing too many bits of color */
		if (perChannelColorBits == 4)
			perChannelColorBits = 0; /* Use minimum size for 16-bit color */

		/* Need alpha or else SGIs choose 36+ bit RGB mode */
		SDL_GL_SetAttribute( SDL_GL_ALPHA_SIZE, 1);
#endif

		SDL_GL_SetAttribute( SDL_GL_RED_SIZE, perChannelColorBits );
		SDL_GL_SetAttribute( SDL_GL_GREEN_SIZE, perChannelColorBits );
		SDL_GL_SetAttribute( SDL_GL_BLUE_SIZE, perChannelColorBits );
		SDL_GL_SetAttribute( SDL_GL_DEPTH_SIZE, testDepthBits );
		SDL_GL_SetAttribute( SDL_GL_STENCIL_SIZE, testStencilBits );

		SDL_GL_SetAttribute( SDL_GL_MULTISAMPLEBUFFERS, samples ? 1 : 0 );
		SDL_GL_SetAttribute( SDL_GL_MULTISAMPLESAMPLES, samples );

		if(r_stereoEnabled->integer)
		{
			glConfig.stereoEnabled = qtrue;
			SDL_GL_SetAttribute(SDL_GL_STEREO, 1);
		}
		else
		{
			glConfig.stereoEnabled = qfalse;
			SDL_GL_SetAttribute(SDL_GL_STEREO, 0);
		}
		
		SDL_GL_SetAttribute( SDL_GL_DOUBLEBUFFER, 1 );

#if 0 // if multisampling is enabled on X11, this causes create window to fail.
		// If not allowing software GL, demand accelerated
		if( !r_allowSoftwareGL->integer )
			SDL_GL_SetAttribute( SDL_GL_ACCELERATED_VISUAL, 1 );
#endif

		if( ( SDL_window = SDL_CreateWindow( windowTitle, x, y,
				glConfig.vidWidth, glConfig.vidHeight, flags ) ) == NULL )
		{
			ri.Printf( PRINT_DEVELOPER, "SDL_CreateWindow failed: %s\n", SDL_GetError( ) );
			continue;
		}

		if( fullscreen )
		{
			SDL_DisplayMode mode;

			switch( testColorBits )
			{
				case 16: mode.format = SDL_PIXELFORMAT_RGB565; break;
				case 24: mode.format = SDL_PIXELFORMAT_RGB24;  break;
				default: ri.Printf( PRINT_DEVELOPER, "testColorBits is %d, can't fullscreen\n", testColorBits ); continue;
			}

			mode.w = glConfig.vidWidth;
			mode.h = glConfig.vidHeight;
			mode.refresh_rate = glConfig.displayFrequency = ri.Cvar_VariableIntegerValue( "r_displayRefresh" );
			mode.driverdata = NULL;

			if( SDL_SetWindowDisplayMode( SDL_window, &mode ) < 0 )
			{
				ri.Printf( PRINT_DEVELOPER, "SDL_SetWindowDisplayMode failed: %s\n", SDL_GetError( ) );
				continue;
			}
		}

		SDL_SetWindowIcon( SDL_window, icon );

		if( ( SDL_glContext = SDL_GL_CreateContext( SDL_window ) ) == NULL )
		{
			ri.Printf( PRINT_DEVELOPER, "SDL_GL_CreateContext failed: %s\n", SDL_GetError( ) );
			continue;
		}

		qglClearColor( 0, 0, 0, 1 );
		qglClear( GL_COLOR_BUFFER_BIT );
		SDL_GL_SwapWindow( SDL_window );

		if( SDL_GL_SetSwapInterval( r_swapInterval->integer ) == -1 )
		{
			ri.Printf( PRINT_DEVELOPER, "SDL_GL_SetSwapInterval failed: %s\n", SDL_GetError( ) );
		}

		glConfig.colorBits = testColorBits;
		glConfig.depthBits = testDepthBits;
		glConfig.stencilBits = testStencilBits;

		ri.Printf( PRINT_ALL, "Using %d color bits, %d depth, %d stencil display.\n",
				glConfig.colorBits, glConfig.depthBits, glConfig.stencilBits );
		break;
	}

	SDL_FreeSurface( icon );

	if ( iconPic ) {
		ri.Free( iconPic );
		iconPic = NULL;
	}

	if( !SDL_window )
	{
		ri.Printf( PRINT_ALL, "Couldn't get a visual\n" );
		return RSERR_INVALID_MODE;
	}

	GLimp_DetectAvailableModes();

	glstring = (char *) qglGetString (GL_RENDERER);
	ri.Printf( PRINT_ALL, "GL_RENDERER: %s\n", glstring );

	return RSERR_OK;
}
Example #5
0
/*
* R_ResampleCinematicFrame
*/
static image_t *R_ResampleCinematicFrame( r_cinhandle_t *handle )
{
	const int samples = 4;

	if( !handle->pic ) {
		// we haven't yet read a new frame, return whatever image we got
		// this will return NULL until at least one frame has been read
		return handle->image;
	}

	if( handle->yuv ) {
		int i;

		if( !handle->yuv_images[0] ) {
			qbyte *fake_data[1] = { NULL };
			const char *letters[3] = { "y", "u", "v" };

			for( i = 0; i < 3; i++ ) {
				handle->yuv_images[i] = R_LoadImage( va( "%s_%s", handle->name, letters[i] ), 
					fake_data, 1, 1, IT_CINEMATIC|IT_LUMINANCE, 1 );
			}
			handle->new_frame = qtrue;
		}
		
		if( handle->new_frame ) {
			int fbo;
			qboolean in2D;
			
			// render/convert three 8-bit YUV images into RGB framebuffer

			in2D = rf.in2D;
			fbo = RFB_BoundObject();

			if( !in2D ) {
				R_PushRefInst();
			}

			R_InitViewportTexture( &handle->image, handle->name, 0, 
				handle->cyuv->image_width, handle->cyuv->image_height, 
				0, IT_CINEMATIC|IT_FRAMEBUFFER, samples );

			R_BindFrameBufferObject( handle->image->fbo );

			R_Set2DMode( qtrue );

			RB_Scissor( 0, 0, handle->image->upload_width, handle->image->upload_height );

			RB_Viewport( 0, 0, handle->image->upload_width, handle->image->upload_height );

			// flip the image vertically because we're rendering to a FBO
			R_DrawStretchRawYUVBuiltin( 
				0, 0, 
				handle->image->upload_width, handle->image->upload_height,
				(float)handle->cyuv->x_offset / handle->cyuv->image_width, 
				(float)handle->cyuv->y_offset / handle->cyuv->image_height, 
				(float)(handle->cyuv->x_offset + handle->cyuv->width) / handle->cyuv->image_width, 
				(float)(handle->cyuv->y_offset + handle->cyuv->height) / handle->cyuv->image_height, 
				handle->cyuv->yuv, handle->yuv_images, 2 );

			if( !in2D ) {
				R_PopRefInst( 0 );
			}
			R_BindFrameBufferObject( fbo );

			R_Set2DMode( in2D );

			handle->new_frame = qfalse;
		}
	}
	else {
		if( !handle->image ) {
			handle->image = R_LoadImage( handle->name, &handle->pic, handle->width, handle->height, 
				IT_CINEMATIC, samples );
			handle->new_frame = qfalse;
		} else if( handle->new_frame ) {
			R_ReplaceImage( handle->image, &handle->pic, handle->width, handle->height, 
				handle->image->flags, samples );
			handle->new_frame = qfalse;
		}
	}

	return handle->image;
}
Example #6
0
/*
* R_UploadCinematicFrame
*/
static void R_UploadCinematicFrame( r_cinhandle_t *handle ) {
	const int samples = 4;

	ri.Mutex_Lock( handle->lock );

	if( !handle->cin || !handle->pic ) {
		ri.Mutex_Unlock( handle->lock );
		return;
	}

	if( handle->yuv ) {
		int i;

		if( !handle->yuv_images[0] ) {
			char tn[256];
			uint8_t *fake_data[1] = { NULL };
			const char *letters[3] = { "y", "u", "v" };

			for( i = 0; i < 3; i++ ) {
				handle->yuv_images[i] = R_LoadImage(
					va_r( tn, sizeof( tn ), "%s_%s", handle->name, letters[i] ),
					fake_data, 1, 1, IT_SPECIAL | IT_NO_DATA_SYNC, 1, IMAGE_TAG_GENERIC, 1 );
			}
			handle->new_frame = true;
		}

		if( handle->new_frame ) {
			bool multiSamples2D;
			bool in2D;

			// render/convert three 8-bit YUV images into RGB framebuffer
			in2D = rf.twoD.enabled;
			multiSamples2D = rf.twoD.multiSamples;

			if( in2D ) {
				R_End2D();
			} else {
				R_PushRefInst();
			}

			R_InitViewportTexture( &handle->image, handle->name, 0,
								   handle->cyuv->image_width, handle->cyuv->image_height,
								   0, IT_SPECIAL | IT_FRAMEBUFFER, IMAGE_TAG_GENERIC, samples );

			R_BindFrameBufferObject( handle->image->fbo );

			R_SetupGL2D();

			RB_Scissor( 0, 0, handle->image->upload_width, handle->image->upload_height );

			RB_Viewport( 0, 0, handle->image->upload_width, handle->image->upload_height );

			R_UploadRawYUVPic( handle->yuv_images, handle->cyuv->yuv );

			// flip the image vertically because we're rendering to a FBO
			R_DrawStretchRawYUVBuiltin(
				0, 0,
				handle->image->upload_width, handle->image->upload_height,
				(float)handle->cyuv->x_offset / handle->cyuv->image_width,
				(float)handle->cyuv->y_offset / handle->cyuv->image_height,
				(float)( handle->cyuv->x_offset + handle->cyuv->width ) / handle->cyuv->image_width,
				(float)( handle->cyuv->y_offset + handle->cyuv->height ) / handle->cyuv->image_height,
				handle->yuv_images, 2 );

			if( in2D ) {
				R_Begin2D( multiSamples2D );
			} else {
				R_PopRefInst();
			}

			handle->new_frame = false;
		}
	} else {
		if( !handle->image ) {
			handle->image = R_LoadImage( handle->name, (uint8_t **)&handle->pic, handle->width, handle->height,
										 IT_SPECIAL | IT_NO_DATA_SYNC, 1, IMAGE_TAG_GENERIC, samples );
		}

		if( handle->new_frame ) {
			R_ReplaceImage( handle->image, (uint8_t **)&handle->pic, handle->width, handle->height,
							handle->image->flags, 1, samples );
			handle->new_frame = false;
		}
	}

	ri.Mutex_Unlock( handle->lock );
}
Example #7
0
/*
===================
R_ParseImageProgram_r

If pic is NULL, the timestamps will be filled in, but no image will be generated
If both pic and timestamps are NULL, it will just advance past it, which can be
used to parse an image program from a text stream.
===================
*/
static bool R_ParseImageProgram_r( idLexer &src, byte **pic, int *width, int *height, ID_TIME_T *timestamps, textureDepth_t *depth )
{
	idToken		token;
	float		scale;
	ID_TIME_T	timestamp;
	
	src.ReadToken( &token );
	AppendToken( token );
	
	if( !token.Icmp( "heightmap" ) )
	{
		MatchAndAppendToken( src, "(" );
		
		if( !R_ParseImageProgram_r( src, pic, width, height, timestamps, depth ) )
		{
			return false;
		}
		MatchAndAppendToken( src, "," );
		
		src.ReadToken( &token );
		AppendToken( token );
		scale = token.GetFloatValue();
		
		// process it
		if( pic )
		{
			R_HeightmapToNormalMap( *pic, *width, *height, scale );
			
			if( depth )
			{
				*depth = TD_BUMP;
			}
		}
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "addnormals" ) )
	{
		byte	*pic2;
		int		width2, height2;
		
		MatchAndAppendToken( src, "(" );
		
		if( !R_ParseImageProgram_r( src, pic, width, height, timestamps, depth ) )
		{
			return false;
		}
		MatchAndAppendToken( src, "," );
		
		if( !R_ParseImageProgram_r( src, pic ? &pic2 : NULL, &width2, &height2, timestamps, depth ) )
		{
			if( pic )
			{
				R_StaticFree( *pic );
				*pic = NULL;
			}
			
			return false;
		}
		
		// process it
		if( pic )
		{
			R_AddNormalMaps( *pic, *width, *height, pic2, width2, height2 );
			R_StaticFree( pic2 );
			
			if( depth )
			{
				*depth = TD_BUMP;
			}
		}
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "smoothnormals" ) )
	{
		MatchAndAppendToken( src, "(" );
		
		if( !R_ParseImageProgram_r( src, pic, width, height, timestamps, depth ) )
		{
			return false;
		}
		
		if( pic )
		{
			R_SmoothNormalMap( *pic, *width, *height );
			
			if( depth )
			{
				*depth = TD_BUMP;
			}
		}
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "add" ) )
	{
		byte	*pic2;
		int		width2, height2;
		
		MatchAndAppendToken( src, "(" );
		
		if( !R_ParseImageProgram_r( src, pic, width, height, timestamps, depth ) )
		{
			return false;
		}
		MatchAndAppendToken( src, "," );
		
		if( !R_ParseImageProgram_r( src, pic ? &pic2 : NULL, &width2, &height2, timestamps, depth ) )
		{
			if( pic )
			{
				R_StaticFree( *pic );
				*pic = NULL;
			}
			
			return false;
		}
		
		// process it
		if( pic )
		{
			R_ImageAdd( *pic, *width, *height, pic2, width2, height2 );
			R_StaticFree( pic2 );
		}
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "scale" ) )
	{
		float	vertscale[4];
		
		MatchAndAppendToken( src, "(" );
		
		R_ParseImageProgram_r( src, pic, width, height, timestamps, depth );
		
		for( int i = 0; i < 4; i++ )
		{
			MatchAndAppendToken( src, "," );
			src.ReadToken( &token );
			AppendToken( token );
			vertscale[i] = token.GetFloatValue();
		}
		
		// process it
		if( pic )
		{
			R_ImageScale( *pic, *width, *height, vertscale );
		}
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "invertAlpha" ) )
	{
		MatchAndAppendToken( src, "(" );
		
		R_ParseImageProgram_r( src, pic, width, height, timestamps, depth );
		
		// process it
		if( pic )
		{
			R_InvertAlpha( *pic, *width, *height );
		}
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "invertColor" ) )
	{
		MatchAndAppendToken( src, "(" );
		
		R_ParseImageProgram_r( src, pic, width, height, timestamps, depth );
		
		// process it
		if( pic )
		{
			R_InvertColor( *pic, *width, *height );
		}
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "makeIntensity" ) )
	{
		int		i;
		
		MatchAndAppendToken( src, "(" );
		
		R_ParseImageProgram_r( src, pic, width, height, timestamps, depth );
		
		// copy red to green, blue, and alpha
		if( pic )
		{
			int		c;
			c = *width **height * 4;
			
			for( i = 0; i < c; i += 4 )
			{
				( *pic ) [i + 1] =
					( *pic ) [i + 2] =
						( *pic ) [i + 3] = ( *pic ) [i];
			}
		}
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	if( !token.Icmp( "makeAlpha" ) )
	{
		int		i;
		
		MatchAndAppendToken( src, "(" );
		
		R_ParseImageProgram_r( src, pic, width, height, timestamps, depth );
		
		// average RGB into alpha, then set RGB to white
		if( pic )
		{
			int		c;
			c = *width **height * 4;
			
			for( i = 0; i < c; i += 4 )
			{
				( *pic ) [i + 3] = ( ( *pic ) [i + 0] + ( *pic ) [i + 1] + ( *pic ) [i + 2] ) / 3;
				( *pic ) [i + 0] =
					( *pic ) [i + 1] =
						( *pic ) [i + 2] = 255;
			}
		}
		MatchAndAppendToken( src, ")" );
		return true;
	}
	
	// if we are just parsing instead of loading or checking,
	// don't do the R_LoadImage
	if( !timestamps && !pic )
	{
		return true;
	}
	
	// load it as an image
	R_LoadImage( token.c_str(), pic, width, height, &timestamp, true );
	
	if( timestamp == -1 )
	{
		return false;
	}
	
	// add this to the timestamp
	if( timestamps )
	{
		if( timestamp > *timestamps )
		{
			*timestamps = timestamp;
		}
	}
	return true;
}
CRainSystem::CRainSystem(int maxRain) :
	mMaxRain(maxRain),
	mNextWindGust(0),
	mRainHeight(5),
	mAlpha(0.15f),
	mWindAngle(1.0f),

	mFadeAlpha(0.0f),
	mIsRaining(false)

{
	char			name[256];
	unsigned char	*data, *pos;
	int				width, height;
	int				x, y;

	mSpread[0] = (float)(M_PI*2.0);		// angle spread
	mSpread[1] = 20.0f;			// radius spread
	mSpread[2] = 20.0f;			// z spread

	mMinVelocity[0] = 0.1f;
	mMaxVelocity[0] = -0.1f;
	mMinVelocity[1] = 0.1f;
	mMaxVelocity[1] = -0.1f;
	mMinVelocity[2] = -60.0;
	mMaxVelocity[2] = -50.0;

	mWindDuration = 15;
	mWindLow = 50;
	mWindMin = 0.01f;
	mWindMax = 0.05f;

	mWindChange = 0;
	mWindDirection[0] = mWindDirection[1] = mWindDirection[2] = 0.0;

	mRainList = new SParticle[mMaxRain];

	sprintf(name, "gfx/world/rain.tga");
	R_LoadImage( name, &data, &width, &height );
	if (!data)
	{
		ri.Error (ERR_DROP, "Could not load %s", name);
	}

	pos = data;
	for(y=0;y<height;y++)
	{
		for(x=0;x<width;x++)
		{
			pos[3] = pos[0];
			pos += 4;
		}
	}

	mImage = R_CreateImage(name, data, width, height, false, false, false, GL_CLAMP);
	GL_Bind(mImage);
	qglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
	qglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );

	Init();
}