/** * Find the line number and column for 'pos' within 'string'. * Return a copy of the line which contains 'pos'. Free the line with * _mesa_free(). * \param string the program string * \param pos the position within the string * \param line returns the line number corresponding to 'pos'. * \param col returns the column number corresponding to 'pos'. * \return copy of the line containing 'pos'. */ const GLubyte * _mesa_find_line_column(const GLubyte *string, const GLubyte *pos, GLint *line, GLint *col) { const GLubyte *lineStart = string; const GLubyte *p = string; GLubyte *s; int len; *line = 1; while (p != pos) { if (*p == (GLubyte) '\n') { (*line)++; lineStart = p + 1; } p++; } *col = (pos - lineStart) + 1; /* return copy of this line */ while (*p != 0 && *p != '\n') p++; len = p - lineStart; s = (GLubyte *) _mesa_malloc(len + 1); _mesa_memcpy(s, lineStart, len); s[len] = 0; return s; }
static void build_new_tnl_program( const struct state_key *key, struct gl_vertex_program *program, GLuint max_temps) { struct tnl_program p; _mesa_memset(&p, 0, sizeof(p)); p.state = key; p.program = program; p.eye_position = undef; p.eye_position_normalized = undef; p.eye_normal = undef; p.identity = undef; p.temp_in_use = 0; p.nr_instructions = 16; if (max_temps >= sizeof(int) * 8) p.temp_reserved = 0; else p.temp_reserved = ~((1<<max_temps)-1); p.program->Base.Instructions = _mesa_malloc(sizeof(struct prog_instruction) * p.nr_instructions); p.program->Base.String = 0; p.program->Base.NumInstructions = p.program->Base.NumTemporaries = p.program->Base.NumParameters = p.program->Base.NumAttributes = p.program->Base.NumAddressRegs = 0; p.program->Base.Parameters = _mesa_new_parameter_list(); p.program->Base.InputsRead = 0; p.program->Base.OutputsWritten = 0; build_tnl_program( &p ); }
/** * Construct the GL_EXTENSIONS string. Called the first time that * glGetString(GL_EXTENSIONS) is called. */ GLubyte * _mesa_make_extension_string( GLcontext *ctx ) { const GLboolean *base = (const GLboolean *) &ctx->Extensions; GLuint extStrLen = 0; GLubyte *s; GLuint i; /* first, compute length of the extension string */ for (i = 0 ; i < Elements(default_extensions) ; i++) { if (!default_extensions[i].flag_offset || *(base + default_extensions[i].flag_offset)) { extStrLen += (GLuint)_mesa_strlen(default_extensions[i].name) + 1; } } s = (GLubyte *) _mesa_malloc(extStrLen); /* second, build the extension string */ extStrLen = 0; for (i = 0 ; i < Elements(default_extensions) ; i++) { if (!default_extensions[i].flag_offset || *(base + default_extensions[i].flag_offset)) { GLuint len = (GLuint)_mesa_strlen(default_extensions[i].name); _mesa_memcpy(s + extStrLen, default_extensions[i].name, len); extStrLen += len; s[extStrLen] = (GLubyte) ' '; extStrLen++; } } ASSERT(extStrLen > 0); s[extStrLen - 1] = 0; return s; }
void _mesa_detach_shader(GLcontext *ctx, GLuint program, GLuint shader) { struct gl_shader_program *shProg = _mesa_lookup_shader_program(ctx, program); GLuint n; GLuint i, j; if (!shProg) { _mesa_error(ctx, GL_INVALID_VALUE, "glDetachShader(bad program or shader name)"); return; } n = shProg->NumShaders; for (i = 0; i < n; i++) { if (shProg->Shaders[i]->Name == shader) { /* found it */ struct gl_shader **newList; /* derefernce */ _mesa_reference_shader(ctx, &shProg->Shaders[i], NULL); /* alloc new, smaller array */ newList = (struct gl_shader **) _mesa_malloc((n - 1) * sizeof(struct gl_shader *)); if (!newList) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glDetachShader"); return; } for (j = 0; j < i; j++) { newList[j] = shProg->Shaders[j]; } while (++i < n) newList[j++] = shProg->Shaders[i]; _mesa_free(shProg->Shaders); shProg->Shaders = newList; shProg->NumShaders = n - 1; #ifdef DEBUG /* sanity check */ { for (j = 0; j < shProg->NumShaders; j++) { assert(shProg->Shaders[j]->Type == GL_VERTEX_SHADER || shProg->Shaders[j]->Type == GL_FRAGMENT_SHADER); assert(shProg->Shaders[j]->RefCount > 0); } } #endif return; } } /* not found */ _mesa_error(ctx, GL_INVALID_VALUE, "glDetachShader(shader not found)"); }
static GLubyte *get_space(GLcontext *ctx, GLuint bytes) { TNLcontext *tnl = TNL_CONTEXT(ctx); GLubyte *space = _mesa_malloc(bytes); tnl->block[tnl->nr_blocks++] = space; return space; }
GLboolean MesaSoftwareRenderer::_BackRenderbufferStorage(GLcontext* ctx, struct gl_renderbuffer* render, GLenum internalFormat, GLuint width, GLuint height) { struct msr_renderbuffer *mrb = msr_renderbuffer(render); _mesa_free(render->Data); _FrontRenderbufferStorage(ctx, render, internalFormat, width, height); render->Data = _mesa_malloc(mrb->Size); return GL_TRUE; }
/** * This is called via __DRIscreenRec's createNewDrawable pointer. */ static __DRIdrawable * driCreateNewDrawable(__DRIscreen *psp, const __DRIconfig *config, drm_drawable_t hwDrawable, int renderType, const int *attrs, void *data) { __DRIdrawable *pdp; /* Since pbuffers are not yet supported, no drawable attributes are * supported either. */ (void) attrs; pdp = _mesa_malloc(sizeof *pdp); if (!pdp) { return NULL; } pdp->loaderPrivate = data; pdp->hHWDrawable = hwDrawable; pdp->refcount = 0; pdp->pStamp = NULL; pdp->lastStamp = 0; pdp->index = 0; pdp->x = 0; pdp->y = 0; pdp->w = 0; pdp->h = 0; pdp->numClipRects = 0; pdp->numBackClipRects = 0; pdp->pClipRects = NULL; pdp->pBackClipRects = NULL; pdp->vblSeq = 0; pdp->vblFlags = 0; pdp->driScreenPriv = psp; pdp->driContextPriv = &psp->dummyContextPriv; if (!(*psp->DriverAPI.CreateBuffer)(psp, pdp, &config->modes, renderType == GLX_PIXMAP_BIT)) { _mesa_free(pdp); return NULL; } pdp->msc_base = 0; /* This special default value is replaced with the configured * default value when the drawable is first bound to a direct * rendering context. */ pdp->swap_interval = (unsigned)-1; return pdp; }
/* A facility similar to the data caching code above, which aims to * prevent identical commands being issued repeatedly. */ GLboolean brw_cached_batch_struct( struct brw_context *brw, const void *data, GLuint sz ) { struct brw_cached_batch_item *item = brw->cached_batch_items; struct header *newheader = (struct header *)data; if (brw->emit_state_always) { intel_batchbuffer_data(brw->intel.batch, data, sz, IGNORE_CLIPRECTS); return GL_TRUE; } while (item) { if (item->header->opcode == newheader->opcode) { if (item->sz == sz && memcmp(item->header, newheader, sz) == 0) return GL_FALSE; if (item->sz != sz) { _mesa_free(item->header); item->header = _mesa_malloc(sz); item->sz = sz; } goto emit; } item = item->next; } assert(!item); item = CALLOC_STRUCT(brw_cached_batch_item); item->header = _mesa_malloc(sz); item->sz = sz; item->next = brw->cached_batch_items; brw->cached_batch_items = item; emit: memcpy(item->header, newheader, sz); intel_batchbuffer_data(brw->intel.batch, data, sz, IGNORE_CLIPRECTS); return GL_TRUE; }
/** * Allocate stage's private data (storage for transformed normals). */ static GLboolean alloc_normal_data(GLcontext *ctx, struct tnl_pipeline_stage *stage) { TNLcontext *tnl = TNL_CONTEXT(ctx); struct normal_stage_data *store; stage->privatePtr = _mesa_malloc(sizeof(*store)); store = NORMAL_STAGE_DATA(stage); if (!store) return GL_FALSE; _mesa_vector4f_alloc( &store->normal, 0, tnl->vb.Size, 32 ); return GL_TRUE; }
static GLboolean Parse_PrintInstruction(struct parse_state *parseState, struct prog_instruction *inst) { const GLubyte *str; GLubyte *msg; GLuint len; GLint idx; /* The first argument is a literal string 'just like this' */ if (!Parse_String(parseState, "'")) RETURN_ERROR1("Expected '"); str = parseState->pos; for (len = 0; str[len] != '\''; len++) /* find closing quote */ ; parseState->pos += len + 1; msg = (GLubyte*) _mesa_malloc(len + 1); _mesa_memcpy(msg, str, len); msg[len] = 0; inst->Data = msg; if (Parse_String(parseState, ",")) { /* got an optional register to print */ GLubyte token[100]; GetToken(parseState, token); if (token[0] == 'o') { /* dst reg */ if (!Parse_OutputReg(parseState, &idx)) RETURN_ERROR; inst->SrcReg[0].Index = idx; inst->SrcReg[0].File = PROGRAM_OUTPUT; } else { /* src reg */ if (!Parse_VectorSrc(parseState, &inst->SrcReg[0])) RETURN_ERROR; } } else { inst->SrcReg[0].File = PROGRAM_UNDEFINED; } inst->SrcReg[0].Swizzle = SWIZZLE_NOOP; inst->SrcReg[0].Abs = GL_FALSE; inst->SrcReg[0].Negate = NEGATE_NONE; return GL_TRUE; }
/** * Allocate space for and store data in a buffer object. Any data that was * previously stored in the buffer object is lost. If data is NULL, * memory will be allocated, but no copy will occur. * Called via ctx->Driver.BufferData(). * \return GL_TRUE for success, GL_FALSE if out of memory */ static GLboolean intel_bufferobj_data(GLcontext * ctx, GLenum target, GLsizeiptrARB size, const GLvoid * data, GLenum usage, struct gl_buffer_object *obj) { struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *intel_obj = intel_buffer_object(obj); intel_obj->Base.Size = size; intel_obj->Base.Usage = usage; assert(!obj->Pointer); /* Mesa should have unmapped it */ if (intel_obj->region) intel_bufferobj_release_region(intel, intel_obj); if (intel_obj->buffer != NULL) { dri_bo_unreference(intel_obj->buffer); intel_obj->buffer = NULL; } _mesa_free(intel_obj->sys_buffer); intel_obj->sys_buffer = NULL; if (size != 0) { #ifdef I915 /* On pre-965, stick VBOs in system memory, as we're always doing swtnl * with their contents anyway. */ if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) { intel_obj->sys_buffer = _mesa_malloc(size); if (intel_obj->sys_buffer != NULL) { if (data != NULL) memcpy(intel_obj->sys_buffer, data, size); return GL_TRUE; } } #endif intel_bufferobj_alloc_buffer(intel, intel_obj); if (!intel_obj->buffer) return GL_FALSE; if (data != NULL) dri_bo_subdata(intel_obj->buffer, 0, size, data); } return GL_TRUE; }
/** * gl_renderbuffer::AllocStorage() * This is called to allocate the original drawing surface, and * during window resize. */ static GLboolean st_renderbuffer_alloc_storage(GLcontext * ctx, struct gl_renderbuffer *rb, GLenum internalFormat, GLuint width, GLuint height) { struct pipe_context *pipe = ctx->st->pipe; struct st_renderbuffer *strb = st_renderbuffer(rb); enum pipe_format format; if (strb->format != PIPE_FORMAT_NONE) format = strb->format; else format = st_choose_renderbuffer_format(pipe->screen, internalFormat); /* init renderbuffer fields */ strb->Base.Width = width; strb->Base.Height = height; init_renderbuffer_bits(strb, format); strb->defined = GL_FALSE; /* undefined contents now */ if(strb->software) { struct pipe_format_block block; size_t size; _mesa_free(strb->data); assert(strb->format != PIPE_FORMAT_NONE); pf_get_block(strb->format, &block); strb->stride = pf_get_stride(&block, width); size = pf_get_2d_size(&block, strb->stride, height); strb->data = _mesa_malloc(size); return strb->data != NULL; } else { struct pipe_texture template; unsigned surface_usage; /* Free the old surface and texture */ pipe_surface_reference( &strb->surface, NULL ); pipe_texture_reference( &strb->texture, NULL ); /* Setup new texture template. */ memset(&template, 0, sizeof(template));
static __DRIdrawable * dri2CreateNewDrawable(__DRIscreen *screen, const __DRIconfig *config, unsigned int drawable_id, unsigned int head, void *data) { __DRIdrawable *pdraw; pdraw = driCreateNewDrawable(screen, config, 0, 0, NULL, data); if (!pdraw) return NULL; pdraw->dri2.drawable_id = drawable_id; pdraw->dri2.tail = head; pdraw->pBackClipRects = _mesa_malloc(sizeof *pdraw->pBackClipRects); return pdraw; }
static void accum_load(struct st_context *st, GLfloat value, GLint xpos, GLint ypos, GLint width, GLint height, struct st_renderbuffer *acc_strb, struct st_renderbuffer *color_strb) { struct pipe_context *pipe = st->pipe; struct pipe_screen *screen = pipe->screen; struct pipe_transfer *color_trans; size_t stride = acc_strb->stride; GLubyte *data = acc_strb->data; GLfloat *buf; if (ST_DEBUG & DEBUG_FALLBACK) debug_printf("%s: fallback processing\n", __FUNCTION__); color_trans = st_cond_flush_get_tex_transfer(st, color_strb->texture, 0, 0, 0, PIPE_TRANSFER_READ, xpos, ypos, width, height); buf = (GLfloat *) _mesa_malloc(width * height * 4 * sizeof(GLfloat)); pipe_get_tile_rgba(color_trans, 0, 0, width, height, buf); switch (acc_strb->format) { case PIPE_FORMAT_R16G16B16A16_SNORM: { const GLfloat *color = buf; int i, j; for (i = 0; i < height; i++) { GLshort *acc = (GLshort *) (data + (ypos + i) * stride + xpos * 8); for (j = 0; j < width * 4; j++) { float val = *color++ * value; *acc++ = FLOAT_TO_SHORT(val); } } } break; default: _mesa_problem(NULL, "unexpected format in st_clear_accum_buffer()"); } _mesa_free(buf); screen->tex_transfer_destroy(color_trans); }
/* If the backbuffer is on a videocard, this is extraordinarily slow! */ static EGLBoolean fbSwapBuffers(_EGLDriver *drv, EGLDisplay dpy, EGLSurface draw) { fbContext *context = (fbContext *)_eglGetCurrentContext(); fbSurface *fs = Lookup_fbSurface(draw); struct gl_renderbuffer * front_renderbuffer = fs->mesa_framebuffer->Attachment[BUFFER_FRONT_LEFT].Renderbuffer; void *frontBuffer = front_renderbuffer->Data; int currentPitch = ((driRenderbuffer *)front_renderbuffer)->pitch; void *backBuffer = fs->mesa_framebuffer->Attachment[BUFFER_BACK_LEFT].Renderbuffer->Data; if (!_eglSwapBuffers(drv, dpy, draw)) return EGL_FALSE; if (context) { GLcontext *ctx = context->glCtx; if (ctx->Visual.doubleBufferMode) { int i; int offset = 0; char *tmp = _mesa_malloc(currentPitch); _mesa_notifySwapBuffers( ctx ); /* flush pending rendering comands */ ASSERT(frontBuffer); ASSERT(backBuffer); for (i = 0; i < fs->Base.Height; i++) { _mesa_memcpy(tmp, (char *) backBuffer + offset, currentPitch); _mesa_memcpy((char *) frontBuffer + offset, tmp, currentPitch); offset += currentPitch; } _mesa_free(tmp); } } else { /* XXX this shouldn't be an error but we can't handle it for now */ _mesa_problem(NULL, "fbSwapBuffers: drawable has no context!\n"); return EGL_FALSE; } return EGL_TRUE; }
static void copy_array_to_vbo_array( struct brw_context *brw, struct brw_vertex_element *element, GLuint dst_stride) { GLuint size = element->count * dst_stride; get_space(brw, size, &element->bo, &element->offset); if (element->glarray->StrideB == 0) { assert(element->count == 1); element->stride = 0; } else { element->stride = dst_stride; } if (dst_stride == element->glarray->StrideB) { dri_bo_subdata(element->bo, element->offset, size, element->glarray->Ptr); } else { void *data; char *dest; const char *src = element->glarray->Ptr; int i; data = _mesa_malloc(dst_stride * element->count); dest = data; for (i = 0; i < element->count; i++) { memcpy(dest, src, dst_stride); src += element->glarray->StrideB; dest += dst_stride; } dri_bo_subdata(element->bo, element->offset, size, data); _mesa_free(data); } }
static void rehash( struct brw_tnl_cache *cache ) { struct brw_tnl_cache_item **items; struct brw_tnl_cache_item *c, *next; GLuint size, i; size = cache->size * 3; items = (struct brw_tnl_cache_item**) _mesa_malloc(size * sizeof(*items)); _mesa_memset(items, 0, size * sizeof(*items)); for (i = 0; i < cache->size; i++) for (c = cache->items[i]; c; c = next) { next = c->next; c->next = items[c->hash % size]; items[c->hash % size] = c; } FREE(cache->items); cache->items = items; cache->size = size; }
/** * Allocate and initialize a new dispatch table. */ static struct _glapi_table * alloc_dispatch_table(void) { /* Find the larger of Mesa's dispatch table and libGL's dispatch table. * In practice, this'll be the same for stand-alone Mesa. But for DRI * Mesa we do this to accomodate different versions of libGL and various * DRI drivers. */ GLint numEntries = MAX2(_glapi_get_dispatch_table_size(), sizeof(struct _glapi_table) / sizeof(_glapi_proc)); struct _glapi_table *table = (struct _glapi_table *) _mesa_malloc(numEntries * sizeof(_glapi_proc)); if (table) { _glapi_proc *entry = (_glapi_proc *) table; GLint i; for (i = 0; i < numEntries; i++) { entry[i] = (_glapi_proc) generic_nop; } } return table; }
void brw_init_state( struct brw_context *brw ) { GLuint i; brw_init_pools(brw); brw_init_caches(brw); brw->state.atoms = _mesa_malloc(sizeof(atoms)); brw->state.nr_atoms = sizeof(atoms)/sizeof(*atoms); _mesa_memcpy(brw->state.atoms, atoms, sizeof(atoms)); /* Patch in a pointer to the dynamic state atom: */ for (i = 0; i < brw->state.nr_atoms; i++) if (brw->state.atoms[i] == NULL) brw->state.atoms[i] = &brw->curbe.tracked_state; _mesa_memcpy(&brw->curbe.tracked_state, &brw_constant_buffer, sizeof(brw_constant_buffer)); }
/* If the backbuffer is on a videocard, this is extraordinarily slow! */ static void fbSwapBuffers( __DRIdrawablePrivate *dPriv ) { struct gl_framebuffer *mesa_framebuffer = (struct gl_framebuffer *)dPriv->driverPrivate; struct gl_renderbuffer * front_renderbuffer = mesa_framebuffer->Attachment[BUFFER_FRONT_LEFT].Renderbuffer; void *frontBuffer = front_renderbuffer->Data; int currentPitch = ((driRenderbuffer *)front_renderbuffer)->pitch; void *backBuffer = mesa_framebuffer->Attachment[BUFFER_BACK_LEFT].Renderbuffer->Data; if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) { fbContextPtr fbmesa = (fbContextPtr) dPriv->driContextPriv->driverPrivate; GLcontext *ctx = fbmesa->glCtx; if (ctx->Visual.doubleBufferMode) { int i; int offset = 0; char *tmp = _mesa_malloc(currentPitch); _mesa_notifySwapBuffers( ctx ); /* flush pending rendering commands */ ASSERT(frontBuffer); ASSERT(backBuffer); for (i = 0; i < dPriv->h; i++) { _mesa_memcpy(tmp, (char *) backBuffer + offset, currentPitch); _mesa_memcpy((char *) frontBuffer + offset, tmp, currentPitch); offset += currentPitch; } _mesa_free(tmp); } } else { /* XXX this shouldn't be an error but we can't handle it for now */ _mesa_problem(NULL, "fbSwapBuffers: drawable has no context!\n"); } }
void _tnl_register_fastpath( struct tnl_clipspace *vtx, GLboolean match_strides ) { struct tnl_clipspace_fastpath *fastpath = CALLOC_STRUCT(tnl_clipspace_fastpath); GLuint i; fastpath->vertex_size = vtx->vertex_size; fastpath->attr_count = vtx->attr_count; fastpath->match_strides = match_strides; fastpath->func = vtx->emit; fastpath->attr = (struct tnl_attr_type *) _mesa_malloc(vtx->attr_count * sizeof(fastpath->attr[0])); for (i = 0; i < vtx->attr_count; i++) { fastpath->attr[i].format = vtx->attr[i].format; fastpath->attr[i].stride = vtx->attr[i].inputstride; fastpath->attr[i].size = vtx->attr[i].inputsize; fastpath->attr[i].offset = vtx->attr[i].vertoffset; } fastpath->next = vtx->fastpath; vtx->fastpath = fastpath; }
void vf_register_fastpath( struct vertex_fetch *vf, GLboolean match_strides ) { struct vf_fastpath *fastpath = CALLOC_STRUCT(vf_fastpath); GLuint i; fastpath->vertex_stride = vf->vertex_stride; fastpath->attr_count = vf->attr_count; fastpath->match_strides = match_strides; fastpath->func = vf->emit; fastpath->attr = (struct vf_attr_type *) _mesa_malloc(vf->attr_count * sizeof(fastpath->attr[0])); for (i = 0; i < vf->attr_count; i++) { fastpath->attr[i].format = vf->attr[i].format; fastpath->attr[i].stride = vf->attr[i].inputstride; fastpath->attr[i].size = vf->attr[i].inputsize; fastpath->attr[i].offset = vf->attr[i].vertoffset; } fastpath->next = vf->fastpath; vf->fastpath = fastpath; }
/** * Create the per-drawable private driver information. * * \param render_type Type of rendering target. \c GLX_RGBA is the only * type likely to ever be supported for direct-rendering. * \param shared Context with which to share textures, etc. or NULL * * \returns An opaque pointer to the per-context private information on * success, or \c NULL on failure. * * \internal * This function allocates and fills a __DRIcontextPrivateRec structure. It * performs some device independent initialization and passes all the * relevant information to __DriverAPIRec::CreateContext to create the * context. * */ static __DRIcontext * driCreateNewContext(__DRIscreen *psp, const __DRIconfig *config, int render_type, __DRIcontext *shared, drm_context_t hwContext, void *data) { __DRIcontext *pcp; void * const shareCtx = (shared != NULL) ? shared->driverPrivate : NULL; pcp = _mesa_malloc(sizeof *pcp); if (!pcp) return NULL; pcp->driScreenPriv = psp; pcp->driDrawablePriv = NULL; /* When the first context is created for a screen, initialize a "dummy" * context. */ if (!psp->dri2.enabled && !psp->dummyContextPriv.driScreenPriv) { psp->dummyContextPriv.hHWContext = psp->pSAREA->dummy_context; psp->dummyContextPriv.driScreenPriv = psp; psp->dummyContextPriv.driDrawablePriv = NULL; psp->dummyContextPriv.driverPrivate = NULL; /* No other fields should be used! */ } pcp->hHWContext = hwContext; if ( !(*psp->DriverAPI.CreateContext)(&config->modes, pcp, shareCtx) ) { _mesa_free(pcp); return NULL; } return pcp; }
void GLAPIENTRY _mesa_ColorTable( GLenum target, GLenum internalFormat, GLsizei width, GLenum format, GLenum type, const GLvoid *data ) { static const GLfloat one[4] = { 1.0, 1.0, 1.0, 1.0 }; static const GLfloat zero[4] = { 0.0, 0.0, 0.0, 0.0 }; GET_CURRENT_CONTEXT(ctx); struct gl_texture_unit *texUnit = _mesa_get_current_tex_unit(ctx); struct gl_texture_object *texObj = NULL; struct gl_color_table *table = NULL; GLboolean proxy = GL_FALSE; GLint baseFormat; const GLfloat *scale = one, *bias = zero; GLint comps; ASSERT_OUTSIDE_BEGIN_END_AND_FLUSH(ctx); /* too complex */ switch (target) { case GL_SHARED_TEXTURE_PALETTE_EXT: table = &ctx->Texture.Palette; break; case GL_COLOR_TABLE: table = &ctx->ColorTable[COLORTABLE_PRECONVOLUTION]; scale = ctx->Pixel.ColorTableScale[COLORTABLE_PRECONVOLUTION]; bias = ctx->Pixel.ColorTableBias[COLORTABLE_PRECONVOLUTION]; break; case GL_PROXY_COLOR_TABLE: table = &ctx->ProxyColorTable[COLORTABLE_PRECONVOLUTION]; proxy = GL_TRUE; break; case GL_TEXTURE_COLOR_TABLE_SGI: if (!ctx->Extensions.SGI_texture_color_table) { _mesa_error(ctx, GL_INVALID_ENUM, "glColorTable(target)"); return; } table = &(texUnit->ColorTable); scale = ctx->Pixel.TextureColorTableScale; bias = ctx->Pixel.TextureColorTableBias; break; case GL_PROXY_TEXTURE_COLOR_TABLE_SGI: if (!ctx->Extensions.SGI_texture_color_table) { _mesa_error(ctx, GL_INVALID_ENUM, "glColorTable(target)"); return; } table = &(texUnit->ProxyColorTable); proxy = GL_TRUE; break; case GL_POST_CONVOLUTION_COLOR_TABLE: table = &ctx->ColorTable[COLORTABLE_POSTCONVOLUTION]; scale = ctx->Pixel.ColorTableScale[COLORTABLE_POSTCONVOLUTION]; bias = ctx->Pixel.ColorTableBias[COLORTABLE_POSTCONVOLUTION]; break; case GL_PROXY_POST_CONVOLUTION_COLOR_TABLE: table = &ctx->ProxyColorTable[COLORTABLE_POSTCONVOLUTION]; proxy = GL_TRUE; break; case GL_POST_COLOR_MATRIX_COLOR_TABLE: table = &ctx->ColorTable[COLORTABLE_POSTCOLORMATRIX]; scale = ctx->Pixel.ColorTableScale[COLORTABLE_POSTCOLORMATRIX]; bias = ctx->Pixel.ColorTableBias[COLORTABLE_POSTCOLORMATRIX]; break; case GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE: table = &ctx->ProxyColorTable[COLORTABLE_POSTCOLORMATRIX]; proxy = GL_TRUE; break; default: /* try texture targets */ { struct gl_texture_object *texobj = _mesa_select_tex_object(ctx, texUnit, target); if (texobj) { table = &texobj->Palette; proxy = _mesa_is_proxy_texture(target); } else { _mesa_error(ctx, GL_INVALID_ENUM, "glColorTable(target)"); return; } } } assert(table); if (!_mesa_is_legal_format_and_type(ctx, format, type) || format == GL_INTENSITY) { _mesa_error(ctx, GL_INVALID_OPERATION, "glColorTable(format or type)"); return; } baseFormat = base_colortab_format(internalFormat); if (baseFormat < 0) { _mesa_error(ctx, GL_INVALID_ENUM, "glColorTable(internalFormat)"); return; } if (width < 0 || (width != 0 && !_mesa_is_pow_two(width))) { /* error */ if (proxy) { table->Size = 0; table->InternalFormat = (GLenum) 0; table->_BaseFormat = (GLenum) 0; } else { _mesa_error(ctx, GL_INVALID_VALUE, "glColorTable(width=%d)", width); } return; } if (width > (GLsizei) ctx->Const.MaxColorTableSize) { if (proxy) { table->Size = 0; table->InternalFormat = (GLenum) 0; table->_BaseFormat = (GLenum) 0; } else { _mesa_error(ctx, GL_TABLE_TOO_LARGE, "glColorTable(width)"); } return; } table->Size = width; table->InternalFormat = internalFormat; table->_BaseFormat = (GLenum) baseFormat; comps = _mesa_components_in_format(table->_BaseFormat); assert(comps > 0); /* error should have been caught sooner */ if (!proxy) { _mesa_free_colortable_data(table); if (width > 0) { table->TableF = (GLfloat *) _mesa_malloc(comps * width * sizeof(GLfloat)); table->TableUB = (GLubyte *) _mesa_malloc(comps * width * sizeof(GLubyte)); if (!table->TableF || !table->TableUB) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glColorTable"); return; } store_colortable_entries(ctx, table, 0, width, /* start, count */ format, type, data, scale[0], bias[0], scale[1], bias[1], scale[2], bias[2], scale[3], bias[3]); } } /* proxy */ /* do this after the table's Type and Format are set */ set_component_sizes(table); if (texObj || target == GL_SHARED_TEXTURE_PALETTE_EXT) { /* texture object palette, texObj==NULL means the shared palette */ if (ctx->Driver.UpdateTexturePalette) { (*ctx->Driver.UpdateTexturePalette)( ctx, texObj ); } } ctx->NewState |= _NEW_PIXEL; }
/** * Called via glMapBufferRange(). * * The goal of this extension is to allow apps to accumulate their rendering * at the same time as they accumulate their buffer object. Without it, * you'd end up blocking on execution of rendering every time you mapped * the buffer to put new data in. * * We support it in 3 ways: If unsynchronized, then don't bother * flushing the batchbuffer before mapping the buffer, which can save blocking * in many cases. If we would still block, and they allow the whole buffer * to be invalidated, then just allocate a new buffer to replace the old one. * If not, and we'd block, and they allow the subrange of the buffer to be * invalidated, then we can make a new little BO, let them write into that, * and blit it into the real BO at unmap time. */ static void * intel_bufferobj_map_range(GLcontext * ctx, GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, struct gl_buffer_object *obj) { struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *intel_obj = intel_buffer_object(obj); assert(intel_obj); /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also * internally uses our functions directly. */ obj->Offset = offset; obj->Length = length; obj->AccessFlags = access; if (intel_obj->sys_buffer) { obj->Pointer = intel_obj->sys_buffer + offset; return obj->Pointer; } if (intel_obj->region) intel_bufferobj_cow(intel, intel_obj); /* If the mapping is synchronized with other GL operations, flush * the batchbuffer so that GEM knows about the buffer access for later * syncing. */ if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) && drm_intel_bo_references(intel->batch->buf, intel_obj->buffer)) intelFlush(ctx); if (intel_obj->buffer == NULL) { obj->Pointer = NULL; return NULL; } /* If the user doesn't care about existing buffer contents and mapping * would cause us to block, then throw out the old buffer. */ if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) && (access & GL_MAP_INVALIDATE_BUFFER_BIT) && drm_intel_bo_busy(intel_obj->buffer)) { drm_intel_bo_unreference(intel_obj->buffer); intel_obj->buffer = dri_bo_alloc(intel->bufmgr, "bufferobj", intel_obj->Base.Size, 64); } /* If the user is mapping a range of an active buffer object but * doesn't require the current contents of that range, make a new * BO, and we'll copy what they put in there out at unmap or * FlushRange time. */ if ((access & GL_MAP_INVALIDATE_RANGE_BIT) && drm_intel_bo_busy(intel_obj->buffer)) { if (access & GL_MAP_FLUSH_EXPLICIT_BIT) { intel_obj->range_map_buffer = _mesa_malloc(length); obj->Pointer = intel_obj->range_map_buffer; } else { intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr, "range map", length, 64); if (!(access & GL_MAP_READ_BIT) && intel->intelScreen->kernel_exec_fencing) { drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo); intel_obj->mapped_gtt = GL_TRUE; } else { drm_intel_bo_map(intel_obj->range_map_bo, (access & GL_MAP_WRITE_BIT) != 0); intel_obj->mapped_gtt = GL_FALSE; } obj->Pointer = intel_obj->range_map_bo->virtual; } return obj->Pointer; } if (!(access & GL_MAP_READ_BIT) && intel->intelScreen->kernel_exec_fencing) { drm_intel_gem_bo_map_gtt(intel_obj->buffer); intel_obj->mapped_gtt = GL_TRUE; } else { drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0); intel_obj->mapped_gtt = GL_FALSE; } obj->Pointer = intel_obj->buffer->virtual + offset; return obj->Pointer; }
/** * This isn't terribly efficient. If a driver really has combined * depth/stencil buffers the driver should implement an optimized * CopyPixels function. */ static void copy_depth_stencil_pixels(GLcontext *ctx, const GLint srcX, const GLint srcY, const GLint width, const GLint height, const GLint destX, const GLint destY) { struct gl_renderbuffer *stencilReadRb, *depthReadRb, *depthDrawRb; GLint sy, dy, stepy; GLint j; GLstencil *tempStencilImage = NULL, *stencilPtr = NULL; GLfloat *tempDepthImage = NULL, *depthPtr = NULL; const GLfloat depthScale = ctx->DrawBuffer->_DepthMaxF; const GLuint stencilMask = ctx->Stencil.WriteMask[0]; const GLboolean zoom = ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F; const GLboolean shiftOrOffset = ctx->Pixel.IndexShift || ctx->Pixel.IndexOffset; const GLboolean scaleOrBias = ctx->Pixel.DepthScale != 1.0 || ctx->Pixel.DepthBias != 0.0; GLint overlapping; depthDrawRb = ctx->DrawBuffer->_DepthBuffer; depthReadRb = ctx->ReadBuffer->_DepthBuffer; stencilReadRb = ctx->ReadBuffer->_StencilBuffer; ASSERT(depthDrawRb); ASSERT(depthReadRb); ASSERT(stencilReadRb); /* Determine if copy should be bottom-to-top or top-to-bottom */ if (srcY < destY) { /* top-down max-to-min */ sy = srcY + height - 1; dy = destY + height - 1; stepy = -1; } else { /* bottom-up min-to-max */ sy = srcY; dy = destY; stepy = 1; } if (ctx->DrawBuffer == ctx->ReadBuffer) { overlapping = regions_overlap(srcX, srcY, destX, destY, width, height, ctx->Pixel.ZoomX, ctx->Pixel.ZoomY); } else { overlapping = GL_FALSE; } if (overlapping) { GLint ssy = sy; if (stencilMask != 0x0) { tempStencilImage = (GLstencil *) _mesa_malloc(width * height * sizeof(GLstencil)); if (!tempStencilImage) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCopyPixels"); return; } /* get copy of stencil pixels */ stencilPtr = tempStencilImage; for (j = 0; j < height; j++, ssy += stepy) { _swrast_read_stencil_span(ctx, stencilReadRb, width, srcX, ssy, stencilPtr); stencilPtr += width; } stencilPtr = tempStencilImage; } if (ctx->Depth.Mask) { tempDepthImage = (GLfloat *) _mesa_malloc(width * height * sizeof(GLfloat)); if (!tempDepthImage) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCopyPixels"); _mesa_free(tempStencilImage); return; } /* get copy of depth pixels */ depthPtr = tempDepthImage; for (j = 0; j < height; j++, ssy += stepy) { _swrast_read_depth_span_float(ctx, depthReadRb, width, srcX, ssy, depthPtr); depthPtr += width; } depthPtr = tempDepthImage; } } for (j = 0; j < height; j++, sy += stepy, dy += stepy) { if (stencilMask != 0x0) { GLstencil stencil[MAX_WIDTH]; /* Get stencil values */ if (overlapping) { _mesa_memcpy(stencil, stencilPtr, width * sizeof(GLstencil)); stencilPtr += width; } else { _swrast_read_stencil_span(ctx, stencilReadRb, width, srcX, sy, stencil); } /* Apply shift, offset, look-up table */ if (shiftOrOffset) { _mesa_shift_and_offset_stencil(ctx, width, stencil); } if (ctx->Pixel.MapStencilFlag) { _mesa_map_stencil(ctx, width, stencil); } /* Write values */ if (zoom) { _swrast_write_zoomed_stencil_span(ctx, destX, destY, width, destX, dy, stencil); } else { _swrast_write_stencil_span( ctx, width, destX, dy, stencil ); } } if (ctx->Depth.Mask) { GLfloat depth[MAX_WIDTH]; GLuint zVals32[MAX_WIDTH]; GLushort zVals16[MAX_WIDTH]; GLvoid *zVals; GLuint zBytes; /* get depth values */ if (overlapping) { _mesa_memcpy(depth, depthPtr, width * sizeof(GLfloat)); depthPtr += width; } else { _swrast_read_depth_span_float(ctx, depthReadRb, width, srcX, sy, depth); } /* scale & bias */ if (scaleOrBias) { _mesa_scale_and_bias_depth(ctx, width, depth); } /* convert to integer Z values */ if (depthDrawRb->DataType == GL_UNSIGNED_SHORT) { GLint k; for (k = 0; k < width; k++) zVals16[k] = (GLushort) (depth[k] * depthScale); zVals = zVals16; zBytes = 2; } else { GLint k; for (k = 0; k < width; k++) zVals32[k] = (GLuint) (depth[k] * depthScale); zVals = zVals32; zBytes = 4; } /* Write values */ if (zoom) { _swrast_write_zoomed_z_span(ctx, destX, destY, width, destX, dy, zVals); } else { _swrast_put_row(ctx, depthDrawRb, width, destX, dy, zVals, zBytes); } } } if (tempStencilImage) _mesa_free(tempStencilImage); if (tempDepthImage) _mesa_free(tempDepthImage); }
static void accum_return(GLcontext *ctx, GLfloat value, GLint xpos, GLint ypos, GLint width, GLint height, struct st_renderbuffer *acc_strb, struct st_renderbuffer *color_strb) { struct pipe_context *pipe = ctx->st->pipe; struct pipe_screen *screen = pipe->screen; const GLubyte *colormask = ctx->Color.ColorMask; enum pipe_transfer_usage usage; struct pipe_transfer *color_trans; size_t stride = acc_strb->stride; const GLubyte *data = acc_strb->data; GLfloat *buf; if (ST_DEBUG & DEBUG_FALLBACK) debug_printf("%s: fallback processing\n", __FUNCTION__); buf = (GLfloat *) _mesa_malloc(width * height * 4 * sizeof(GLfloat)); if (!colormask[0] || !colormask[1] || !colormask[2] || !colormask[3]) usage = PIPE_TRANSFER_READ_WRITE; else usage = PIPE_TRANSFER_WRITE; color_trans = st_cond_flush_get_tex_transfer(st_context(ctx), color_strb->texture, 0, 0, 0, usage, xpos, ypos, width, height); if (usage & PIPE_TRANSFER_READ) pipe_get_tile_rgba(color_trans, 0, 0, width, height, buf); switch (acc_strb->format) { case PIPE_FORMAT_R16G16B16A16_SNORM: { GLfloat *color = buf; int i, j, ch; for (i = 0; i < height; i++) { const GLshort *acc = (const GLshort *) (data + (ypos + i) * stride + xpos * 8); for (j = 0; j < width; j++) { for (ch = 0; ch < 4; ch++) { if (colormask[ch]) { GLfloat val = SHORT_TO_FLOAT(*acc * value); *color = CLAMP(val, 0.0f, 1.0f); } else { /* No change */ } ++acc; ++color; } } } } break; default: _mesa_problem(NULL, "unexpected format in st_clear_accum_buffer()"); } pipe_put_tile_rgba(color_trans, 0, 0, width, height, buf); _mesa_free(buf); screen->tex_transfer_destroy(color_trans); }
/** * Create a drawing surface which can be directly displayed on a screen. */ static EGLSurface fbCreateScreenSurfaceMESA(_EGLDriver *drv, EGLDisplay dpy, EGLConfig cfg, const EGLint *attrib_list) { _EGLConfig *config = _eglLookupConfig(drv, dpy, cfg); fbDisplay *display = Lookup_fbDisplay(dpy); fbSurface *surface; EGLSurface surf; GLvisual vis; GLcontext *ctx = NULL; /* this should be OK */ int origin, bytesPerPixel; int width, height, stride; surface = (fbSurface *) malloc(sizeof(*surface)); if (!surface) { return EGL_NO_SURFACE; } /* init base class, error check, etc. */ surf = _eglInitScreenSurface(&surface->Base, drv, dpy, cfg, attrib_list); if (surf == EGL_NO_SURFACE) { free(surface); return EGL_NO_SURFACE; } /* convert EGLConfig to GLvisual */ _eglConfigToContextModesRec(config, &vis); /* create Mesa framebuffer */ surface->mesa_framebuffer = _mesa_create_framebuffer(&vis); if (!surface->mesa_framebuffer) { free(surface); _eglRemoveSurface(&surface->Base); return EGL_NO_SURFACE; } width = surface->Base.Width; height = surface->Base.Height; bytesPerPixel = vis.rgbBits / 8; stride = width * bytesPerPixel; origin = 0; /* front color renderbuffer */ { driRenderbuffer *drb = driNewRenderbuffer(GL_RGBA, display->pFB, bytesPerPixel, origin, stride, NULL); fbSetSpanFunctions(drb, &vis); _mesa_add_renderbuffer(surface->mesa_framebuffer, BUFFER_FRONT_LEFT, &drb->Base); } /* back color renderbuffer */ if (vis.doubleBufferMode) { GLubyte *backBuf = _mesa_malloc(stride * height); driRenderbuffer *drb = driNewRenderbuffer(GL_RGBA, backBuf, bytesPerPixel, origin, stride, NULL); fbSetSpanFunctions(drb, &vis); _mesa_add_renderbuffer(surface->mesa_framebuffer, BUFFER_BACK_LEFT, &drb->Base); } /* other renderbuffers- software based */ _mesa_add_soft_renderbuffers(surface->mesa_framebuffer, GL_FALSE, /* color */ vis.haveDepthBuffer, vis.haveStencilBuffer, vis.haveAccumBuffer, GL_FALSE, /* alpha */ GL_FALSE /* aux */); _mesa_resize_framebuffer(ctx, surface->mesa_framebuffer, width, height); return surf; }
/* Adjust primitives, indices and vertex definitions so that min_index * becomes zero. There are lots of reasons for wanting to do this, eg: * * Software tnl: * - any time min_index != 0, otherwise unused vertices lower than * min_index will be transformed. * * Hardware tnl: * - if ib != NULL and min_index != 0, otherwise vertices lower than * min_index will be uploaded. Requires adjusting index values. * * - if ib == NULL and min_index != 0, just for convenience so this doesn't * have to be handled within the driver. * * Hardware tnl with VBO support: * - as above, but only when vertices are not (all?) in VBO's. * - can't save time by trying to upload half a vbo - typically it is * all or nothing. */ void vbo_rebase_prims( GLcontext *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prim, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLuint min_index, GLuint max_index, vbo_draw_func draw ) { struct gl_client_array tmp_arrays[VERT_ATTRIB_MAX]; const struct gl_client_array *tmp_array_pointers[VERT_ATTRIB_MAX]; struct _mesa_index_buffer tmp_ib; struct _mesa_prim *tmp_prims = NULL; void *tmp_indices = NULL; GLuint i; assert(min_index != 0); if (0) _mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index); /* XXX this path is disabled for now. * There's rendering corruption in some apps when it's enabled. */ if (0 && ib && ctx->Extensions.ARB_draw_elements_base_vertex) { /* If we can just tell the hardware or the TNL to interpret our * indices with a different base, do so. */ tmp_prims = (struct _mesa_prim *)_mesa_malloc(sizeof(*prim) * nr_prims); for (i = 0; i < nr_prims; i++) { tmp_prims[i] = prim[i]; tmp_prims[i].basevertex -= min_index; } prim = tmp_prims; } else if (ib) { /* Unfortunately need to adjust each index individually. */ GLboolean map_ib = ib->obj->Name && !ib->obj->Pointer; void *ptr; if (map_ib) ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, ib->obj); ptr = ADD_POINTERS(ib->obj->Pointer, ib->ptr); /* Some users might prefer it if we translated elements to * GLuints here. Others wouldn't... */ switch (ib->type) { case GL_UNSIGNED_INT: tmp_indices = rebase_GLuint( ptr, ib->count, min_index ); break; case GL_UNSIGNED_SHORT: tmp_indices = rebase_GLushort( ptr, ib->count, min_index ); break; case GL_UNSIGNED_BYTE: tmp_indices = rebase_GLubyte( ptr, ib->count, min_index ); break; } if (map_ib) ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, ib->obj); tmp_ib.obj = ctx->Shared->NullBufferObj; tmp_ib.ptr = tmp_indices; tmp_ib.count = ib->count; tmp_ib.type = ib->type; ib = &tmp_ib; } else { /* Otherwise the primitives need adjustment. */ tmp_prims = (struct _mesa_prim *)_mesa_malloc(sizeof(*prim) * nr_prims); for (i = 0; i < nr_prims; i++) { /* If this fails, it could indicate an application error: */ assert(prim[i].start >= min_index); tmp_prims[i] = prim[i]; tmp_prims[i].start -= min_index; } prim = tmp_prims; } /* Just need to adjust the pointer values on each incoming array. * This works for VBO and non-vbo rendering and shouldn't pesimize * VBO-based upload schemes. However this may still not be a fast * path for hardware tnl for VBO based rendering as most machines * will be happier if you just specify a starting vertex value in * each primitive. * * For drivers with hardware tnl, you only want to do this if you * are forced to, eg non-VBO indexed rendering with start != 0. */ for (i = 0; i < VERT_ATTRIB_MAX; i++) { tmp_arrays[i] = *arrays[i]; tmp_arrays[i].Ptr += min_index * tmp_arrays[i].StrideB; tmp_array_pointers[i] = &tmp_arrays[i]; } /* Re-issue the draw call. */ draw( ctx, tmp_array_pointers, prim, nr_prims, ib, GL_TRUE, 0, max_index - min_index ); if (tmp_indices) _mesa_free(tmp_indices); if (tmp_prims) _mesa_free(tmp_prims); }
/** * The PRINT instruction is Mesa-specific and is meant as a debugging aid for * the vertex program developer. * The NV_vertex_program extension grammar is modified as follows: * * <instruction> ::= <ARL-instruction> * | ... * | <PRINT-instruction> * * <PRINT-instruction> ::= "PRINT" <string literal> * | "PRINT" <string literal> "," <srcReg> * | "PRINT" <string literal> "," <dstReg> */ static GLboolean Parse_PrintInstruction(struct parse_state *parseState, struct vp_instruction *inst) { const GLubyte *str; GLubyte *msg; GLuint len; GLubyte token[100]; struct vp_src_register *srcReg = &inst->SrcReg[0]; inst->Opcode = VP_OPCODE_PRINT; inst->StringPos = parseState->curLine - parseState->start; /* The first argument is a literal string 'just like this' */ if (!Parse_String(parseState, "'")) RETURN_ERROR; str = parseState->pos; for (len = 0; str[len] != '\''; len++) /* find closing quote */ ; parseState->pos += len + 1; msg = _mesa_malloc(len + 1); _mesa_memcpy(msg, str, len); msg[len] = 0; inst->Data = msg; /* comma */ if (Parse_String(parseState, ",")) { /* The second argument is a register name */ if (!Peek_Token(parseState, token)) RETURN_ERROR; srcReg->RelAddr = GL_FALSE; srcReg->Negate = GL_FALSE; srcReg->Swizzle[0] = 0; srcReg->Swizzle[1] = 1; srcReg->Swizzle[2] = 2; srcReg->Swizzle[3] = 3; /* Register can be R<n>, c[n], c[n +/- offset], a named vertex attrib, * or an o[n] output register. */ if (token[0] == 'R') { srcReg->File = PROGRAM_TEMPORARY; if (!Parse_TempReg(parseState, &srcReg->Index)) RETURN_ERROR; } else if (token[0] == 'c') { srcReg->File = PROGRAM_ENV_PARAM; if (!Parse_ParamReg(parseState, srcReg)) RETURN_ERROR; } else if (token[0] == 'v') { srcReg->File = PROGRAM_INPUT; if (!Parse_AttribReg(parseState, &srcReg->Index)) RETURN_ERROR; } else if (token[0] == 'o') { srcReg->File = PROGRAM_OUTPUT; if (!Parse_OutputReg(parseState, &srcReg->Index)) RETURN_ERROR; } else { RETURN_ERROR2("Bad source register name", token); } } else { srcReg->File = 0; } /* semicolon */ if (!Parse_String(parseState, ";")) RETURN_ERROR; return GL_TRUE; }