/* Create a directory named 'path' if it does not already exist. * * Returns: 0 if path already exists as a directory or if created. * -1 in all other cases. */ static int mkdir_if_needed(char *path) { struct stat sb; /* If the path exists already, then our work is done if it's a * directory, but it's an error if it is not. */ if (stat(path, &sb) == 0) { if (S_ISDIR(sb.st_mode)) { return 0; } else { _mesa_warning(NULL, "Cannot use %s for shader cache (not a directory)" "---disabling.\n", path); return -1; } } int ret = mkdir(path, 0755); if (ret == 0 || (ret == -1 && errno == EEXIST)) return 0; _mesa_warning(NULL, "Failed to create %s for shader cache (%s)---disabling.\n", path, strerror(errno)); return -1; }
static __DRIimage * intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate) { int width, height, offset, stride, dri_format, index; struct intel_image_format *f; uint32_t mask_x, mask_y; __DRIimage *image; if (parent == NULL || parent->planar_format == NULL) return NULL; f = parent->planar_format; if (plane >= f->nplanes) return NULL; width = parent->region->width >> f->planes[plane].width_shift; height = parent->region->height >> f->planes[plane].height_shift; dri_format = f->planes[plane].dri_format; index = f->planes[plane].buffer_index; offset = parent->offsets[index]; stride = parent->strides[index]; image = intel_allocate_image(dri_format, loaderPrivate); if (image == NULL) return NULL; if (offset + height * stride > parent->region->bo->size) { _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds"); free(image); return NULL; } image->region = calloc(sizeof(*image->region), 1); if (image->region == NULL) { free(image); return NULL; } image->region->cpp = _mesa_get_format_bytes(image->format); image->region->width = width; image->region->height = height; image->region->pitch = stride; image->region->refcount = 1; image->region->bo = parent->region->bo; drm_intel_bo_reference(image->region->bo); image->region->tiling = parent->region->tiling; image->offset = offset; intel_setup_image_from_dimensions(image); intel_region_get_tile_masks(image->region, &mask_x, &mask_y, false); if (offset & mask_x) _mesa_warning(NULL, "intel_create_sub_image: offset not on tile boundary"); return image; }
void _mesa_init_texture_s3tc( struct gl_context *ctx ) { /* called during context initialization */ ctx->Mesa_DXTn = GL_FALSE; #if USE_EXTERNAL_DXTN_LIB if (!dxtlibhandle) { dxtlibhandle = _mesa_dlopen(DXTN_LIBNAME, 0); if (!dxtlibhandle) { _mesa_warning(ctx, "couldn't open " DXTN_LIBNAME ", software DXTn " "compression/decompression unavailable"); } else { /* the fetch functions are not per context! Might be problematic... */ fetch_ext_rgb_dxt1 = (dxtFetchTexelFuncExt) _mesa_dlsym(dxtlibhandle, "fetch_2d_texel_rgb_dxt1"); fetch_ext_rgba_dxt1 = (dxtFetchTexelFuncExt) _mesa_dlsym(dxtlibhandle, "fetch_2d_texel_rgba_dxt1"); fetch_ext_rgba_dxt3 = (dxtFetchTexelFuncExt) _mesa_dlsym(dxtlibhandle, "fetch_2d_texel_rgba_dxt3"); fetch_ext_rgba_dxt5 = (dxtFetchTexelFuncExt) _mesa_dlsym(dxtlibhandle, "fetch_2d_texel_rgba_dxt5"); ext_tx_compress_dxtn = (dxtCompressTexFuncExt) _mesa_dlsym(dxtlibhandle, "tx_compress_dxtn"); if (!fetch_ext_rgb_dxt1 || !fetch_ext_rgba_dxt1 || !fetch_ext_rgba_dxt3 || !fetch_ext_rgba_dxt5 || !ext_tx_compress_dxtn) { _mesa_warning(ctx, "couldn't reference all symbols in " DXTN_LIBNAME ", software DXTn compression/decompression " "unavailable"); fetch_ext_rgb_dxt1 = NULL; fetch_ext_rgba_dxt1 = NULL; fetch_ext_rgba_dxt3 = NULL; fetch_ext_rgba_dxt5 = NULL; ext_tx_compress_dxtn = NULL; _mesa_dlclose(dxtlibhandle); dxtlibhandle = NULL; } } } if (dxtlibhandle) { ctx->Mesa_DXTn = GL_TRUE; } #else (void) ctx; #endif }
/** * Do bounds checking on array element indexes. Check that the vertices * pointed to by the indices don't lie outside buffer object bounds. * \return GL_TRUE if OK, GL_FALSE if any indexed vertex goes is out of bounds */ static GLboolean check_index_bounds(struct gl_context *ctx, GLsizei count, GLenum type, const GLvoid *indices, GLint basevertex) { struct _mesa_prim prim; struct _mesa_index_buffer ib; GLuint min, max; /* Only the X Server needs to do this -- otherwise, accessing outside * array/BO bounds allows application termination. */ if (!ctx->Const.CheckArrayBounds) return GL_TRUE; memset(&prim, 0, sizeof(prim)); prim.count = count; memset(&ib, 0, sizeof(ib)); ib.type = type; ib.ptr = indices; ib.obj = ctx->Array.ArrayObj->ElementArrayBufferObj; vbo_get_minmax_indices(ctx, &prim, &ib, &min, &max, 1); if ((int)(min + basevertex) < 0 || max + basevertex >= ctx->Array.ArrayObj->_MaxElement) { /* the max element is out of bounds of one or more enabled arrays */ _mesa_warning(ctx, "glDrawElements() index=%u is out of bounds (max=%u)", max, ctx->Array.ArrayObj->_MaxElement); return GL_FALSE; } return GL_TRUE; }
/** * Initialize the remap table. This is called in one_time_init(). * The remap table needs to be initialized before calling the * CALL/GET/SET macros defined in main/dispatch.h. */ void _mesa_init_remap_table(void) { static bool initialized = false; GLint i; if (initialized) return; initialized = true; /* initialize the MESA_remap_table_functions table */ for (i = 0; i < driDispatchRemapTable_size; i++) { int offset; const char *spec; /* sanity check */ assert(i == MESA_remap_table_functions[i].remap_index); spec = _mesa_function_pool + MESA_remap_table_functions[i].pool_index; offset = map_function_spec(spec); /* store the dispatch offset in the MESA_remap_table_functions table */ driDispatchRemapTable[i] = offset; if (offset < 0) { const char *name = spec + strlen(spec) + 1; _mesa_warning(NULL, "failed to remap %s", name); } } }
/** * Initialize the remap table. This is called in one_time_init(). * The remap table needs to be initialized before calling the * CALL/GET/SET macros defined in main/dispatch.h. */ static void _mesa_do_init_remap_table(const char *pool, int size, const struct gl_function_pool_remap *remap) { static GLboolean initialized = GL_FALSE; GLint i; if (initialized) return; initialized = GL_TRUE; /* initialize the remap table */ for (i = 0; i < size; i++) { GLint offset; const char *spec; /* sanity check */ assert(i == remap[i].remap_index); spec = _mesa_function_pool + remap[i].pool_index; offset = _mesa_map_function_spec(spec); /* store the dispatch offset in the remap table */ driDispatchRemapTable[i] = offset; if (offset < 0) { const char *name = spec + strlen(spec) + 1; _mesa_warning(NULL, "failed to remap %s", name); } } }
/** * Map an array of functions. This is a convenient function for * use with arrays available from including remap_helper.h. * * Note that the dispatch offsets of the functions are not returned. * If they are needed, _mesa_map_function_spec() should be used. * * \param func_array an array of function remaps. */ void _mesa_map_function_array(const struct gl_function_remap *func_array) { GLint i; if (!func_array) return; for (i = 0; func_array[i].func_index != -1; i++) { const char *spec; GLint offset; spec = _mesa_get_function_spec(func_array[i].func_index); if (!spec) { _mesa_problem(NULL, "invalid function index %d", func_array[i].func_index); continue; } offset = _mesa_map_function_spec(spec); /* error checks */ if (offset < 0) { const char *name = spec + strlen(spec) + 1; _mesa_warning(NULL, "failed to remap %s", name); } else if (func_array[i].dispatch_offset >= 0 && offset != func_array[i].dispatch_offset) { const char *name = spec + strlen(spec) + 1; _mesa_problem(NULL, "%s should be mapped to %d, not %d", name, func_array[i].dispatch_offset, offset); } } }
GLboolean _mesa_validate_DrawElementsInstanced(struct gl_context *ctx, GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei numInstances, GLint basevertex) { ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, GL_FALSE); if (count <= 0) { if (count < 0) _mesa_error(ctx, GL_INVALID_VALUE, "glDrawElementsInstanced(count=%d)", count); return GL_FALSE; } if (mode > GL_TRIANGLE_STRIP_ADJACENCY_ARB) { _mesa_error(ctx, GL_INVALID_ENUM, "glDrawElementsInstanced(mode = 0x%x)", mode); return GL_FALSE; } if (type != GL_UNSIGNED_INT && type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_SHORT) { _mesa_error(ctx, GL_INVALID_ENUM, "glDrawElementsInstanced(type=0x%x)", type); return GL_FALSE; } if (numInstances <= 0) { if (numInstances < 0) _mesa_error(ctx, GL_INVALID_VALUE, "glDrawElementsInstanced(numInstances=%d)", numInstances); return GL_FALSE; } if (!check_valid_to_render(ctx, "glDrawElementsInstanced")) return GL_FALSE; /* Vertex buffer object tests */ if (_mesa_is_bufferobj(ctx->Array.ElementArrayBufferObj)) { /* use indices in the buffer object */ /* make sure count doesn't go outside buffer bounds */ if (index_bytes(type, count) > ctx->Array.ElementArrayBufferObj->Size) { _mesa_warning(ctx, "glDrawElementsInstanced index out of buffer bounds"); return GL_FALSE; } } else { /* not using a VBO */ if (!indices) return GL_FALSE; } if (!check_index_bounds(ctx, count, type, indices, basevertex)) return GL_FALSE; return GL_TRUE; }
/** * Prior to drawing, check that any uniforms referenced by the * current shader have been set. If a uniform has not been set, * issue a warning. */ static void check_uniforms(struct gl_context *ctx) { struct gl_shader_program *shProg[3] = { ctx->Shader.CurrentVertexProgram, ctx->Shader.CurrentGeometryProgram, ctx->Shader.CurrentFragmentProgram, }; unsigned j; for (j = 0; j < 3; j++) { unsigned i; if (shProg[j] == NULL || !shProg[j]->LinkStatus) continue; for (i = 0; i < shProg[j]->NumUserUniformStorage; i++) { const struct gl_uniform_storage *u = &shProg[j]->UniformStorage[i]; if (!u->initialized) { _mesa_warning(ctx, "Using shader with uninitialized uniform: %s", u->name); } } } }
/** * Error checking for glMultiDrawElements(). Includes parameter checking * and VBO bounds checking. * \return GL_TRUE if OK to render, GL_FALSE if error found */ GLboolean _mesa_validate_MultiDrawElements(struct gl_context *ctx, GLenum mode, const GLsizei *count, GLenum type, const GLvoid * const *indices, GLuint primcount, const GLint *basevertex) { unsigned i; FLUSH_CURRENT(ctx, 0); for (i = 0; i < primcount; i++) { if (count[i] <= 0) { if (count[i] < 0) _mesa_error(ctx, GL_INVALID_VALUE, "glMultiDrawElements(count)" ); return GL_FALSE; } } if (!_mesa_valid_prim_mode(ctx, mode, "glMultiDrawElements")) { return GL_FALSE; } if (!valid_elements_type(ctx, type, "glMultiDrawElements")) return GL_FALSE; if (!check_valid_to_render(ctx, "glMultiDrawElements")) return GL_FALSE; /* Vertex buffer object tests */ if (_mesa_is_bufferobj(ctx->Array.ArrayObj->ElementArrayBufferObj)) { /* use indices in the buffer object */ /* make sure count doesn't go outside buffer bounds */ for (i = 0; i < primcount; i++) { if (index_bytes(type, count[i]) > ctx->Array.ArrayObj->ElementArrayBufferObj->Size) { _mesa_warning(ctx, "glMultiDrawElements index out of buffer bounds"); return GL_FALSE; } } } else { /* not using a VBO */ for (i = 0; i < primcount; i++) { if (!indices[i]) return GL_FALSE; } } for (i = 0; i < primcount; i++) { if (!check_index_bounds(ctx, count[i], type, indices[i], basevertex ? basevertex[i] : 0)) return GL_FALSE; } return GL_TRUE; }
/** * Error checking for glDrawElements(). Includes parameter checking * and VBO bounds checking. * \return GL_TRUE if OK to render, GL_FALSE if error found */ GLboolean _mesa_validate_DrawElements(struct gl_context *ctx, GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLint basevertex) { FLUSH_CURRENT(ctx, 0); /* From the GLES3 specification, section 2.14.2 (Transform Feedback * Primitive Capture): * * The error INVALID_OPERATION is also generated by DrawElements, * DrawElementsInstanced, and DrawRangeElements while transform feedback * is active and not paused, regardless of mode. */ if (_mesa_is_gles3(ctx) && _mesa_is_xfb_active_and_unpaused(ctx)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glDrawElements(transform feedback active)"); return GL_FALSE; } if (count <= 0) { if (count < 0) _mesa_error(ctx, GL_INVALID_VALUE, "glDrawElements(count)" ); return GL_FALSE; } if (!_mesa_valid_prim_mode(ctx, mode, "glDrawElements")) { return GL_FALSE; } if (!valid_elements_type(ctx, type, "glDrawElements")) return GL_FALSE; if (!check_valid_to_render(ctx, "glDrawElements")) return GL_FALSE; /* Vertex buffer object tests */ if (_mesa_is_bufferobj(ctx->Array.ArrayObj->ElementArrayBufferObj)) { /* use indices in the buffer object */ /* make sure count doesn't go outside buffer bounds */ if (index_bytes(type, count) > ctx->Array.ArrayObj->ElementArrayBufferObj->Size) { _mesa_warning(ctx, "glDrawElements index out of buffer bounds"); return GL_FALSE; } } else { /* not using a VBO */ if (!indices) return GL_FALSE; } if (!check_index_bounds(ctx, count, type, indices, basevertex)) return GL_FALSE; return GL_TRUE; }
/** * Null texel fetch function. * * Have to have this so the FetchTexel function pointer is never NULL. */ static void fetch_null_texelf( const struct swrast_texture_image *texImage, GLint i, GLint j, GLint k, GLfloat *texel ) { (void) texImage; (void) i; (void) j; (void) k; texel[RCOMP] = 0.0; texel[GCOMP] = 0.0; texel[BCOMP] = 0.0; texel[ACOMP] = 0.0; _mesa_warning(NULL, "fetch_null_texelf() called!"); }
/** * Private function for creating an XMesaBuffer which corresponds to an * X window or pixmap. * \param v the window's XMesaVisual * \param w the window we're wrapping * \return new XMesaBuffer or NULL if error */ PUBLIC XMesaBuffer XMesaCreateWindowBuffer(XMesaVisual v, Window w) { XWindowAttributes attr; XMesaBuffer b; Colormap cmap; int depth; assert(v); assert(w); /* Check that window depth matches visual depth */ XGetWindowAttributes( v->display, w, &attr ); depth = attr.depth; if (v->visinfo->depth != depth) { _mesa_warning(NULL, "XMesaCreateWindowBuffer: depth mismatch between visual (%d) and window (%d)!\n", v->visinfo->depth, depth); return NULL; } /* Find colormap */ if (attr.colormap) { cmap = attr.colormap; } else { _mesa_warning(NULL, "Window %u has no colormap!\n", (unsigned int) w); /* this is weird, a window w/out a colormap!? */ /* OK, let's just allocate a new one and hope for the best */ cmap = XCreateColormap(v->display, w, attr.visual, AllocNone); } b = create_xmesa_buffer((Drawable) w, WINDOW, v, cmap); if (!b) return NULL; if (!initialize_visual_and_buffer( v, b, v->mesa_visual.rgbMode, (Drawable) w, cmap )) { xmesa_free_buffer(b); return NULL; } return b; }
/** * Clear the accumulation buffer by mapping the renderbuffer and * writing the clear color to it. Called by the driver's implementation * of the glClear function. */ void _mesa_clear_accum_buffer(struct gl_context *ctx) { GLuint x, y, width, height; GLubyte *accMap; GLint accRowStride; struct gl_renderbuffer *accRb; if (!ctx->DrawBuffer) return; accRb = ctx->DrawBuffer->Attachment[BUFFER_ACCUM].Renderbuffer; if (!accRb) return; /* missing accum buffer, not an error */ /* bounds, with scissor */ x = ctx->DrawBuffer->_Xmin; y = ctx->DrawBuffer->_Ymin; width = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin; height = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin; ctx->Driver.MapRenderbuffer(ctx, accRb, x, y, width, height, GL_MAP_WRITE_BIT, &accMap, &accRowStride); if (!accMap) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glAccum"); return; } if (accRb->Format == MESA_FORMAT_SIGNED_RGBA_16) { const GLshort clearR = FLOAT_TO_SHORT(ctx->Accum.ClearColor[0]); const GLshort clearG = FLOAT_TO_SHORT(ctx->Accum.ClearColor[1]); const GLshort clearB = FLOAT_TO_SHORT(ctx->Accum.ClearColor[2]); const GLshort clearA = FLOAT_TO_SHORT(ctx->Accum.ClearColor[3]); GLuint i, j; for (j = 0; j < height; j++) { GLshort *row = (GLshort *) accMap; for (i = 0; i < width; i++) { row[i * 4 + 0] = clearR; row[i * 4 + 1] = clearG; row[i * 4 + 2] = clearB; row[i * 4 + 3] = clearA; } accMap += accRowStride; } } else { /* other types someday? */ _mesa_warning(ctx, "unexpected accum buffer type"); } ctx->Driver.UnmapRenderbuffer(ctx, accRb); }
static void intel_image_warn_if_unaligned(__DRIimage *image, const char *func) { uint32_t tiling, swizzle; drm_intel_bo_get_tiling(image->bo, &tiling, &swizzle); if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) { _mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary", func, image->offset); } }
/** * Error checking for glDrawRangeElements(). Includes parameter checking * and VBO bounds checking. * \return GL_TRUE if OK to render, GL_FALSE if error found */ GLboolean _mesa_validate_DrawRangeElements(GLcontext *ctx, GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const GLvoid *indices, GLint basevertex) { ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, GL_FALSE); if (count <= 0) { if (count < 0) _mesa_error(ctx, GL_INVALID_VALUE, "glDrawRangeElements(count)" ); return GL_FALSE; } if (mode > GL_POLYGON) { _mesa_error(ctx, GL_INVALID_ENUM, "glDrawRangeElements(mode)" ); return GL_FALSE; } if (end < start) { _mesa_error(ctx, GL_INVALID_VALUE, "glDrawRangeElements(end<start)"); return GL_FALSE; } if (type != GL_UNSIGNED_INT && type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_SHORT) { _mesa_error(ctx, GL_INVALID_ENUM, "glDrawRangeElements(type)" ); return GL_FALSE; } if (!check_valid_to_render(ctx, "glDrawRangeElements")) return GL_FALSE; /* Vertex buffer object tests */ if (_mesa_is_bufferobj(ctx->Array.ElementArrayBufferObj)) { /* use indices in the buffer object */ /* make sure count doesn't go outside buffer bounds */ if (index_bytes(type, count) > ctx->Array.ElementArrayBufferObj->Size) { _mesa_warning(ctx, "glDrawRangeElements index out of buffer bounds"); return GL_FALSE; } } else { /* not using a VBO */ if (!indices) return GL_FALSE; } if (!check_index_bounds(ctx, count, type, indices, basevertex)) return GL_FALSE; return GL_TRUE; }
/** * Store user's image in rgb_dxt1 format. */ GLboolean _mesa_texstore_rgb_dxt1(TEXSTORE_PARAMS) { const GLchan *pixels; GLubyte *dst; const GLint texWidth = dstRowStride * 4 / 8; /* a bit of a hack */ const GLchan *tempImage = NULL; ASSERT(dstFormat == MESA_FORMAT_RGB_DXT1 || dstFormat == MESA_FORMAT_SRGB_DXT1); ASSERT(dstXoffset % 4 == 0); ASSERT(dstYoffset % 4 == 0); ASSERT(dstZoffset % 4 == 0); (void) dstZoffset; (void) dstImageOffsets; if (srcFormat != GL_RGB || srcType != CHAN_TYPE || ctx->_ImageTransferState || srcPacking->SwapBytes) { /* convert image to RGB/GLchan */ tempImage = _mesa_make_temp_chan_image(ctx, dims, baseInternalFormat, _mesa_get_format_base_format(dstFormat), srcWidth, srcHeight, srcDepth, srcFormat, srcType, srcAddr, srcPacking); if (!tempImage) return GL_FALSE; /* out of memory */ pixels = tempImage; srcFormat = GL_RGB; } else { pixels = (const GLchan *) srcAddr; } dst = _mesa_compressed_image_address(dstXoffset, dstYoffset, 0, dstFormat, texWidth, (GLubyte *) dstAddr); if (ext_tx_compress_dxtn) { (*ext_tx_compress_dxtn)(3, srcWidth, srcHeight, pixels, GL_COMPRESSED_RGB_S3TC_DXT1_EXT, dst, dstRowStride); } else { _mesa_warning(ctx, "external dxt library not available: texstore_rgb_dxt1"); } if (tempImage) free((void *) tempImage); return GL_TRUE; }
static __DRIimage * intel_create_sub_image(__DRIimage *parent, int width, int height, int dri_format, int offset, int pitch, void *loaderPrivate) { __DRIimage *image; int cpp; uint32_t mask_x, mask_y; image = intel_allocate_image(dri_format, loaderPrivate); cpp = _mesa_get_format_bytes(image->format); if (offset + height * cpp * pitch > parent->region->bo->size) { _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds"); FREE(image); return NULL; } image->region = calloc(sizeof(*image->region), 1); if (image->region == NULL) { FREE(image); return NULL; } image->region->cpp = _mesa_get_format_bytes(image->format); image->region->width = width; image->region->height = height; image->region->pitch = pitch; image->region->refcount = 1; image->region->bo = parent->region->bo; drm_intel_bo_reference(image->region->bo); image->region->tiling = parent->region->tiling; image->region->screen = parent->region->screen; image->offset = offset; intel_region_get_tile_masks(image->region, &mask_x, &mask_y); if (offset & mask_x) _mesa_warning(NULL, "intel_create_sub_image: offset not on tile boundary"); return image; }
/** * Software fallback for glAccum. */ void _swrast_Accum(GLcontext *ctx, GLenum op, GLfloat value) { SWcontext *swrast = SWRAST_CONTEXT(ctx); GLint xpos, ypos, width, height; if (SWRAST_CONTEXT(ctx)->NewState) _swrast_validate_derived( ctx ); if (!ctx->DrawBuffer->Attachment[BUFFER_ACCUM].Renderbuffer) { _mesa_warning(ctx, "Calling glAccum() without an accumulation buffer"); return; } RENDER_START(swrast, ctx); /* Compute region after calling RENDER_START so that we know the * drawbuffer's size/bounds are up to date. */ xpos = ctx->DrawBuffer->_Xmin; ypos = ctx->DrawBuffer->_Ymin; width = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin; height = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin; switch (op) { case GL_ADD: if (value != 0.0F) { accum_add(ctx, value, xpos, ypos, width, height); } break; case GL_MULT: if (value != 1.0F) { accum_mult(ctx, value, xpos, ypos, width, height); } break; case GL_ACCUM: if (value != 0.0F) { accum_accum(ctx, value, xpos, ypos, width, height); } break; case GL_LOAD: accum_load(ctx, value, xpos, ypos, width, height); break; case GL_RETURN: accum_return(ctx, value, xpos, ypos, width, height); break; default: _mesa_problem(ctx, "invalid mode in _swrast_Accum()"); break; } RENDER_FINISH(swrast, ctx); }
/** * Store user's image in rgba_dxt1 format. */ GLboolean _mesa_texstore_rgba_dxt1(TEXSTORE_PARAMS) { const GLubyte *pixels; GLubyte *dst; const GLubyte *tempImage = NULL; assert(dstFormat == MESA_FORMAT_RGBA_DXT1 || dstFormat == MESA_FORMAT_SRGBA_DXT1); if (srcFormat != GL_RGBA || srcType != GL_UNSIGNED_BYTE || ctx->_ImageTransferState || ALIGN(srcPacking->RowLength, srcPacking->Alignment) != srcWidth || srcPacking->SwapBytes) { /* convert image to RGBA/GLubyte */ GLubyte *tempImageSlices[1]; int rgbaRowStride = 4 * srcWidth * sizeof(GLubyte); tempImage = malloc(srcWidth * srcHeight * 4 * sizeof(GLubyte)); if (!tempImage) return GL_FALSE; /* out of memory */ tempImageSlices[0] = (GLubyte *) tempImage; _mesa_texstore(ctx, dims, baseInternalFormat, _mesa_little_endian() ? MESA_FORMAT_R8G8B8A8_UNORM : MESA_FORMAT_A8B8G8R8_UNORM, rgbaRowStride, tempImageSlices, srcWidth, srcHeight, srcDepth, srcFormat, srcType, srcAddr, srcPacking); pixels = tempImage; srcFormat = GL_RGBA; } else { pixels = _mesa_image_address2d(srcPacking, srcAddr, srcWidth, srcHeight, srcFormat, srcType, 0, 0); } dst = dstSlices[0]; if (ext_tx_compress_dxtn) { (*ext_tx_compress_dxtn)(4, srcWidth, srcHeight, pixels, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, dst, dstRowStride); } else { _mesa_warning(ctx, "external dxt library not available: texstore_rgba_dxt1"); } free((void*) tempImage); return GL_TRUE; }
/** * Compile one arithmetic operation COLOR&ALPHA pair into TGSI instructions. */ static void compile_instruction(struct st_translate *t, const struct atifs_instruction *inst) { unsigned optype; for (optype = 0; optype < 2; optype++) { /* color, alpha */ const struct instruction_desc *desc; struct ureg_dst dst[1]; struct ureg_src args[3]; /* arguments for the main operation */ unsigned arg; unsigned dstreg = inst->DstReg[optype].Index - GL_REG_0_ATI; if (!inst->Opcode[optype]) continue; desc = &inst_desc[inst->Opcode[optype] - GL_MOV_ATI]; /* prepare the arguments */ for (arg = 0; arg < desc->arg_count; arg++) { if (arg >= inst->ArgCount[optype]) { _mesa_warning(0, "Using 0 for missing argument %d of %s\n", arg, desc->name); args[arg] = ureg_imm1f(t->ureg, 0.0f); } else { args[arg] = prepare_argument(t, arg, &inst->SrcReg[optype][arg]); } } /* prepare dst */ dst[0] = get_temp(t, dstreg); if (optype) { dst[0] = ureg_writemask(dst[0], TGSI_WRITEMASK_W); } else { GLuint dstMask = inst->DstReg[optype].dstMask; if (dstMask == GL_NONE) { dst[0] = ureg_writemask(dst[0], TGSI_WRITEMASK_XYZ); } else { dst[0] = ureg_writemask(dst[0], dstMask); /* the enum values match */ } } /* emit the main instruction */ emit_arith_inst(t, desc, dst, args, arg); emit_dstmod(t, *dst, inst->DstReg[optype].dstMod); t->regs_written[t->current_pass][dstreg] = true; } }
/** * Ensure all enabled and complete textures are uploaded along with any buffers being used. */ GLboolean r300ValidateBuffers(GLcontext * ctx) { r300ContextPtr rmesa = R300_CONTEXT(ctx); struct radeon_renderbuffer *rrb; int i; int ret; radeon_cs_space_reset_bos(rmesa->radeon.cmdbuf.cs); rrb = radeon_get_colorbuffer(&rmesa->radeon); /* color buffer */ if (rrb && rrb->bo) { radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM); } /* depth buffer */ rrb = radeon_get_depthbuffer(&rmesa->radeon); if (rrb && rrb->bo) { radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM); } for (i = 0; i < ctx->Const.MaxTextureImageUnits; ++i) { radeonTexObj *t; if (!ctx->Texture.Unit[i]._ReallyEnabled) continue; if (!r300_validate_texture(ctx, ctx->Texture.Unit[i]._Current)) { _mesa_warning(ctx, "failed to validate texture for unit %d.\n", i); } t = radeon_tex_obj(ctx->Texture.Unit[i]._Current); if (t->image_override && t->bo) radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, t->bo, RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0); else if (t->mt->bo) radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, t->mt->bo, RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0); } ret = radeon_cs_space_check_with_bo(rmesa->radeon.cmdbuf.cs, first_elem(&rmesa->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0); if (ret) return GL_FALSE; return GL_TRUE; }
/** * When a context is bound for the first time, we can finally finish * initializing the context's visual and buffer information. * \param v the XMesaVisual to initialize * \param b the XMesaBuffer to initialize (may be NULL) * \param rgb_flag TRUE = RGBA mode, FALSE = color index mode * \param window the window/pixmap we're rendering into * \param cmap the colormap associated with the window/pixmap * \return GL_TRUE=success, GL_FALSE=failure */ static GLboolean initialize_visual_and_buffer(XMesaVisual v, XMesaBuffer b, GLboolean rgb_flag, Drawable window, Colormap cmap) { ASSERT(!b || b->xm_visual == v); /* Save true bits/pixel */ v->BitsPerPixel = bits_per_pixel(v); assert(v->BitsPerPixel > 0); if (rgb_flag == GL_FALSE) { /* COLOR-INDEXED WINDOW: not supported*/ return GL_FALSE; } else { /* RGB WINDOW: * We support RGB rendering into almost any kind of visual. */ const int xclass = v->visualType; if (xclass != GLX_TRUE_COLOR && xclass == !GLX_DIRECT_COLOR) { _mesa_warning(NULL, "XMesa: RGB mode rendering not supported in given visual.\n"); return GL_FALSE; } v->mesa_visual.indexBits = 0; if (v->BitsPerPixel == 32) { /* We use XImages for all front/back buffers. If an X Window or * X Pixmap is 32bpp, there's no guarantee that the alpha channel * will be preserved. For XImages we're in luck. */ v->mesa_visual.alphaBits = 8; } } /* * If MESA_INFO env var is set print out some debugging info * which can help Brian figure out what's going on when a user * reports bugs. */ if (_mesa_getenv("MESA_INFO")) { printf("X/Mesa visual = %p\n", (void *) v); printf("X/Mesa level = %d\n", v->mesa_visual.level); printf("X/Mesa depth = %d\n", v->visinfo->depth); printf("X/Mesa bits per pixel = %d\n", v->BitsPerPixel); } return GL_TRUE; }
/** * Store user's image in rgba_dxt1 format. */ GLboolean _mesa_texstore_rgba_dxt1(TEXSTORE_PARAMS) { const GLubyte *pixels; GLubyte *dst; const GLubyte *tempImage = NULL; ASSERT(dstFormat == MESA_FORMAT_RGBA_DXT1 || dstFormat == MESA_FORMAT_SRGBA_DXT1); if (srcFormat != GL_RGBA || srcType != GL_UNSIGNED_BYTE || ctx->_ImageTransferState || srcPacking->RowLength != srcWidth || srcPacking->SwapBytes) { /* convert image to RGBA/GLubyte */ tempImage = _mesa_make_temp_ubyte_image(ctx, dims, baseInternalFormat, _mesa_get_format_base_format(dstFormat), srcWidth, srcHeight, srcDepth, srcFormat, srcType, srcAddr, srcPacking); if (!tempImage) return GL_FALSE; /* out of memory */ pixels = tempImage; srcFormat = GL_RGBA; } else { pixels = _mesa_image_address2d(srcPacking, srcAddr, srcWidth, srcHeight, srcFormat, srcType, 0, 0); } dst = dstSlices[0]; if (ext_tx_compress_dxtn) { (*ext_tx_compress_dxtn)(4, srcWidth, srcHeight, pixels, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, dst, dstRowStride); } else { _mesa_warning(ctx, "external dxt library not available: texstore_rgba_dxt1"); } free((void*) tempImage); return GL_TRUE; }
/** * Return the size of the window (or pixmap) that corresponds to the * given XMesaBuffer. * \param width returns width in pixels * \param height returns height in pixels */ void xmesa_get_window_size(Display *dpy, XMesaBuffer b, GLuint *width, GLuint *height) { XMesaDisplay xmdpy = xmesa_init_display(dpy); Status stat; pipe_mutex_lock(xmdpy->mutex); stat = get_drawable_size(dpy, b->ws.drawable, width, height); pipe_mutex_unlock(xmdpy->mutex); if (!stat) { /* probably querying a window that's recently been destroyed */ _mesa_warning(NULL, "XGetGeometry failed!\n"); *width = *height = 1; } }
/** * Used for allocating front/back renderbuffers for an X window. */ struct xmesa_renderbuffer * xmesa_new_renderbuffer(struct gl_context *ctx, GLuint name, const struct xmesa_visual *xmvis, GLboolean backBuffer) { struct xmesa_renderbuffer *xrb = CALLOC_STRUCT(xmesa_renderbuffer); if (xrb) { GLuint name = 0; _mesa_init_renderbuffer(&xrb->Base.Base, name); xrb->Base.Base.Delete = xmesa_delete_renderbuffer; if (backBuffer) xrb->Base.Base.AllocStorage = xmesa_alloc_back_storage; else xrb->Base.Base.AllocStorage = xmesa_alloc_front_storage; xrb->Base.Base.InternalFormat = GL_RGBA; xrb->Base.Base._BaseFormat = GL_RGBA; xrb->Base.Base.ClassID = XMESA_RENDERBUFFER; switch (xmvis->undithered_pf) { case PF_8R8G8B: /* This will really only happen for pixmaps. We'll access the * pixmap via a temporary XImage which will be 32bpp. */ xrb->Base.Base.Format = MESA_FORMAT_XRGB8888; break; case PF_8A8R8G8B: xrb->Base.Base.Format = MESA_FORMAT_ARGB8888; break; case PF_8A8B8G8R: xrb->Base.Base.Format = MESA_FORMAT_RGBA8888_REV; break; case PF_5R6G5B: xrb->Base.Base.Format = MESA_FORMAT_RGB565; break; default: _mesa_warning(ctx, "Bad pixel format in xmesa_new_renderbuffer"); xrb->Base.Base.Format = MESA_FORMAT_ARGB8888; break; } /* only need to set Red/Green/EtcBits fields for user-created RBs */ } return xrb; }
/** * Return the size of the window (or pixmap) that corresponds to the * given XMesaBuffer. * \param width returns width in pixels * \param height returns height in pixels */ void xmesa_get_window_size(XMesaDisplay *dpy, XMesaBuffer b, GLuint *width, GLuint *height) { Status stat; _glthread_LOCK_MUTEX(_xmesa_lock); XSync(b->xm_visual->display, 0); /* added for Chromium */ stat = get_drawable_size(dpy, b->frontxrb->pixmap, width, height); _glthread_UNLOCK_MUTEX(_xmesa_lock); if (!stat) { /* probably querying a window that's recently been destroyed */ _mesa_warning(NULL, "XGetGeometry failed!\n"); *width = *height = 1; } }
/** * Software fallback for glAccum. A hardware driver that supports * signed 16-bit color channels could implement hardware accumulation * operations, but no driver does so at this time. */ void _mesa_accum(struct gl_context *ctx, GLenum op, GLfloat value) { GLint xpos, ypos, width, height; if (!ctx->DrawBuffer->Attachment[BUFFER_ACCUM].Renderbuffer) { _mesa_warning(ctx, "Calling glAccum() without an accumulation buffer"); return; } if (!_mesa_check_conditional_render(ctx)) return; xpos = ctx->DrawBuffer->_Xmin; ypos = ctx->DrawBuffer->_Ymin; width = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin; height = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin; switch (op) { case GL_ADD: if (value != 0.0F) { accum_scale_or_bias(ctx, value, xpos, ypos, width, height, GL_TRUE); } break; case GL_MULT: if (value != 1.0F) { accum_scale_or_bias(ctx, value, xpos, ypos, width, height, GL_FALSE); } break; case GL_ACCUM: if (value != 0.0F) { accum_or_load(ctx, value, xpos, ypos, width, height, GL_FALSE); } break; case GL_LOAD: accum_or_load(ctx, value, xpos, ypos, width, height, GL_TRUE); break; case GL_RETURN: accum_return(ctx, value, xpos, ypos, width, height); break; default: _mesa_problem(ctx, "invalid mode in _mesa_accum()"); break; } }
static __DRIimage * intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate) { int width, height, offset, stride, dri_format, index; struct intel_image_format *f; __DRIimage *image; if (parent == NULL || parent->planar_format == NULL) return NULL; f = parent->planar_format; if (plane >= f->nplanes) return NULL; width = parent->width >> f->planes[plane].width_shift; height = parent->height >> f->planes[plane].height_shift; dri_format = f->planes[plane].dri_format; index = f->planes[plane].buffer_index; offset = parent->offsets[index]; stride = parent->strides[index]; image = intel_allocate_image(dri_format, loaderPrivate); if (image == NULL) return NULL; if (offset + height * stride > parent->bo->size) { _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds"); free(image); return NULL; } image->bo = parent->bo; drm_intel_bo_reference(parent->bo); image->width = width; image->height = height; image->pitch = stride; image->offset = offset; intel_image_warn_if_unaligned(image, __func__); return image; }
static bool intel_get_param(__DRIscreen *psp, int param, int *value) { int ret; struct drm_i915_getparam gp; memset(&gp, 0, sizeof(gp)); gp.param = param; gp.value = value; ret = drmCommandWriteRead(psp->fd, DRM_I915_GETPARAM, &gp, sizeof(gp)); if (ret) { if (ret != -EINVAL) _mesa_warning(NULL, "drm_i915_getparam: %d", ret); return false; } return true; }