Ejemplo n.º 1
0
void __indirect_glArrayElement(GLint i)
{
    __GLXcontext *gc = __glXGetCurrentContext();
    __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
    __GLXvertArrayState *va = &state->vertArray;
    GLint j;


    if (IS_TEXARRAY_ENABLED(state, 0)) {
	(*va->texCoord[0].proc)(va->texCoord[0].ptr+i*va->texCoord[0].skip);
    }

    /* Multitexturing is handled specially because the protocol
     * requires an extra parameter.
     */
    for (j=1; j<__GLX_MAX_TEXTURE_UNITS; ++j) {
	if (IS_TEXARRAY_ENABLED(state, j)) {
	    (*va->texCoord[j].mtex_proc)(GL_TEXTURE0 + j, va->texCoord[j].ptr+i*va->texCoord[j].skip);
	}
    }

    for ( j = 0 ; j < __GLX_MAX_ARRAYS ; j++ ) {
	if (IS_ARRAY_ENABLED_BY_INDEX(state, j)) {
	    (*va->arrays[ j ].proc)(va->arrays[ j ].ptr+i*va->arrays[ j ].skip);
	}
    }
}
Ejemplo n.º 2
0
void __indirect_glIndexPointer( GLenum type, GLsizei stride,
				const GLvoid * pointer )
{
    uint16_t opcode;
    __GLXcontext *gc = __glXGetCurrentContext();
    __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
    struct array_state_vector * arrays = state->array_state;
    struct array_state * a;


    if (stride < 0) {
        __glXSetError(gc, GL_INVALID_VALUE);
        return;
    }
    
    switch ( type ) {
    case GL_UNSIGNED_BYTE:	opcode = X_GLrop_Indexubv; break;
    case GL_SHORT:		opcode = X_GLrop_Indexsv; break;
    case GL_INT:		opcode = X_GLrop_Indexiv; break;
    case GL_FLOAT:		opcode = X_GLrop_Indexfv; break;
    case GL_DOUBLE:		opcode = X_GLrop_Indexdv; break;
    default:
        __glXSetError(gc, GL_INVALID_ENUM);
        return;
    }

    a = get_array_entry( arrays, GL_INDEX_ARRAY, 0 );
    assert( a != NULL );
    COMMON_ARRAY_DATA_INIT( a, pointer, type, stride, 1, GL_FALSE, 4,
			    opcode );

    if ( a->enabled ) {
	arrays->array_info_cache_valid = GL_FALSE;
    }
}
Ejemplo n.º 3
0
void
__glXSendError(Display * dpy, int errorCode, unsigned long resourceID,
               unsigned long minorCode, bool coreX11error)
{
   XExtDisplayInfo *info = __glXFindDisplay(dpy);
   GLXContext gc = __glXGetCurrentContext();
   xError error;

   LockDisplay(dpy);

   error.type = X_Error;

   if (coreX11error) {
      error.errorCode = errorCode;
   }
   else {
      error.errorCode = info->codes->first_error + errorCode;
   }

   error.sequenceNumber = dpy->request;
   error.resourceID = resourceID;
   error.minorCode = minorCode;
   error.majorCode = gc ? gc->majorOpcode : 0;

   _XError(dpy, &error);

   UnlockDisplay(dpy);
}
Ejemplo n.º 4
0
void __indirect_glGetVertexAttribivARB( GLuint index, GLenum pname,
					GLint * params )
{
    __GLXcontext * const gc = __glXGetCurrentContext();
    Display * const dpy = gc->currentDpy;
    __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
    xGLXSingleReply reply;


    get_vertex_attrib( gc, 1303, index, pname, (xReply *) & reply );

    if ( reply.size != 0 ) {
	if ( ! get_attrib_array_data( state, index, pname, params ) ) {
	    if (reply.size == 1) {
		*params = (GLint) reply.pad3;
	    } 
	    else {
		_XRead(dpy, (void *) params, 4 * reply.size);
	    }
	}
    }

    UnlockDisplay(dpy);
    SyncHandle();
}
Ejemplo n.º 5
0
void
emit_DrawArrays_old( GLenum mode, GLint first, GLsizei count )
{
    __GLXcontext *gc = __glXGetCurrentContext();
    const __GLXattribute * state = 
       (const __GLXattribute *)(gc->client_state_private);
    struct array_state_vector * arrays = state->array_state;

    GLubyte * pc;
    size_t elements_per_request;
    unsigned total_requests = 0;
    unsigned i;
    size_t total_sent = 0;


    pc = emit_DrawArrays_header_old( gc, arrays, & elements_per_request,
				     & total_requests, mode, count);

    
    /* Write the arrays.
     */

    if ( total_requests == 0 ) {
	assert( elements_per_request >= count );

	for ( i = 0 ; i < count ; i++ ) {
	    pc = emit_element_old( pc, arrays, i + first );
	}

	assert( pc <= gc->bufEnd );

	gc->pc = pc;
	if ( gc->pc > gc->limit ) {
	    (void) __glXFlushRenderBuffer(gc, gc->pc);
	}
    }
    else {
	unsigned req;


	for ( req = 2 ; req <= total_requests ; req++ ) {
	    if ( count < elements_per_request ) {
		elements_per_request = count;
	    }

	    pc = gc->pc;
	    for ( i = 0 ; i < elements_per_request ; i++ ) {
		pc = emit_element_old( pc, arrays, i + first );
	    }

	    first += elements_per_request;

	    total_sent += (size_t) (pc - gc->pc);
	    __glXSendLargeChunk( gc, req, total_requests, gc->pc,
				 pc - gc->pc );

	    count -= elements_per_request;
	}
    }
}
Ejemplo n.º 6
0
void __indirect_glGetVertexAttribdvARB( GLuint index, GLenum pname,
					GLdouble * params )
{
    __GLXcontext * const gc = __glXGetCurrentContext();
    Display * const dpy = gc->currentDpy;
    __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
    xGLXSingleReply reply;


    get_vertex_attrib( gc, 1301, index, pname, (xReply *) & reply );

    if ( reply.size != 0 ) {
	GLintptr data;


	if ( get_attrib_array_data( state, index, pname, & data ) ) {
	    *params = (GLdouble) data;
	}
	else {
	    if (reply.size == 1) {
		(void) memcpy( params, & reply.pad3, sizeof( GLdouble ) );
	    } 
	    else {
		_XRead(dpy, (void *) params, 8 * reply.size);
	    }
	}
    }

    UnlockDisplay(dpy);
    SyncHandle();
}
Ejemplo n.º 7
0
void
__indirect_glPushClientAttrib(GLuint mask)
{
   __GLXcontext *gc = __glXGetCurrentContext();
   __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
   __GLXattribute **spp = gc->attributes.stackPointer, *sp;

   if (spp < &gc->attributes.stack[__GL_CLIENT_ATTRIB_STACK_DEPTH]) {
      if (!(sp = *spp)) {
         sp = (__GLXattribute *) Xmalloc(sizeof(__GLXattribute));
         *spp = sp;
      }
      sp->mask = mask;
      gc->attributes.stackPointer = spp + 1;
      if (mask & GL_CLIENT_PIXEL_STORE_BIT) {
         sp->storePack = state->storePack;
         sp->storeUnpack = state->storeUnpack;
      }
      if (mask & GL_CLIENT_VERTEX_ARRAY_BIT) {
         __glXPushArrayState(state);
      }
   }
   else {
      __glXSetError(gc, GL_STACK_OVERFLOW);
      return;
   }
}
Ejemplo n.º 8
0
/*
** Setup for sending a GLX command on dpy.  Make sure the extension is
** initialized.  Try to avoid calling __glXInitialize as its kinda slow.
*/
_X_HIDDEN CARD8
__glXSetupForCommand(Display * dpy)
{
    struct glx_context *gc;
    struct glx_display *priv;

   /* If this thread has a current context, flush its rendering commands */
   gc = __glXGetCurrentContext();
   if (gc->currentDpy) {
      /* Flush rendering buffer of the current context, if any */
      (void) __glXFlushRenderBuffer(gc, gc->pc);

      if (gc->currentDpy == dpy) {
         /* Use opcode from gc because its right */
         return gc->majorOpcode;
      }
      else {
         /*
          ** Have to get info about argument dpy because it might be to
          ** a different server
          */
      }
   }

   /* Forced to lookup extension via the slow initialize route */
   priv = __glXInitialize(dpy);
   if (!priv) {
      return 0;
   }
   return priv->majorOpcode;
}
Ejemplo n.º 9
0
/**
 * I don't have 100% confidence that this is correct.  The different rules
 * about whether or not generic vertex attributes alias "classic" vertex
 * attributes (i.e., attrib1 ?= primary color) between ARB_vertex_program,
 * ARB_vertex_shader, and NV_vertex_program are a bit confusing.  My
 * feeling is that the client-side doesn't have to worry about it.  The
 * client just sends all the data to the server and lets the server deal
 * with it.
 */
void __indirect_glVertexAttribPointerNV( GLuint index, GLint size,
					 GLenum type, GLsizei stride,
					 const GLvoid * pointer)
{
    __GLXcontext *gc = __glXGetCurrentContext();
    GLboolean normalized = GL_FALSE;


    switch( type ) {
    case GL_UNSIGNED_BYTE:
	if ( size != 4 ) {
	    __glXSetError(gc, GL_INVALID_VALUE);
	    return;
	}
	normalized = GL_TRUE;

    case GL_SHORT:
    case GL_FLOAT:
    case GL_DOUBLE:
	__indirect_glVertexAttribPointerARB(index, size, type,
					    normalized,
					    stride, pointer);
	return;
    default:
	__glXSetError(gc, GL_INVALID_ENUM);
	return;
    }
}
Ejemplo n.º 10
0
/**
 * Emit DrawArrays protocol.  This function acts as a switch betteen
 * \c emit_Render_DrawArrays and \c emit_RenderLarge_DrawArrays depending
 * on how much array data is to be sent.
 */
static void
emit_DrawArraysEXT(const __GLXattribute * const state,
		   GLint first, GLsizei count, GLenum mode)
{
    struct array_info arrays[32];
    GLsizei num_arrays;
    GLsizei total_vertex_size;
     __GLXcontext *gc = __glXGetCurrentContext();
    GLuint cmdlen;


    /* Determine how big the final request will be.  This depends on a number
     * of factors.  It depends on how many array elemets there are (which is
     * the passed-in 'count'), how many arrays are enabled, how many elements
     * are in each array entry, and what the types are for each array.
     */

    cmdlen = prep_arrays(state, arrays, count, & num_arrays,
			 & total_vertex_size);


    /* If the data payload and the protocol header is too large for a Render
     * command, use a RenderLarge command.
     */
    if (cmdlen > gc->maxSmallRenderCommandSize) {
	emit_RenderLarge_DrawArrays(gc, arrays, first, count, num_arrays,
				    mode, cmdlen, total_vertex_size);
    }
    else {
	emit_Render_DrawArrays(gc, arrays, first, count, num_arrays,
			       mode, cmdlen, total_vertex_size);
    }
}
Ejemplo n.º 11
0
void __indirect_glEdgeFlagPointer(GLsizei stride, const GLvoid *pointer)
{
    __GLXcontext *gc = __glXGetCurrentContext();
    __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
    __GLXvertexArrayPointerState *edgeFlagPointer = &state->vertArray.arrays[ edgeFlag_ARRAY ];

    /* Check arguments */
    if (stride < 0) {
	__glXSetError(gc, GL_INVALID_VALUE);
	return;
    } 

    /* Choose appropriate api proc */
    edgeFlagPointer->proc = (void (*)(const void *))__indirect_glEdgeFlagv;

    edgeFlagPointer->stride = stride;
    edgeFlagPointer->ptr = pointer;

    /* Set internal state */
    if (stride == 0) {
	edgeFlagPointer->skip = sizeof(GLboolean);
    } else {
	edgeFlagPointer->skip = stride;
    }

}
Ejemplo n.º 12
0
void __indirect_glFogCoordPointerEXT(GLenum type, GLsizei stride, const GLvoid * pointer)
{
    __GLXcontext *gc = __glXGetCurrentContext();
    __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
    __GLXvertexArrayPointerState *fogPointer = &state->vertArray.arrays[ fogCoord_ARRAY ];

    /* Check arguments */
    if (stride < 0) {
	__glXSetError(gc, GL_INVALID_VALUE);
	return;
    } 

    /* Choose appropriate api proc */
    switch(type) {
	__GL_FOG_FUNC(FLOAT, f);
	__GL_FOG_FUNC(DOUBLE, d);
      default:
        __glXSetError(gc, GL_INVALID_ENUM);
        return;
    }

    fogPointer->size = 1;
    fogPointer->type = type;
    fogPointer->stride = stride;
    fogPointer->ptr = pointer;

    /* Set internal state */
    if (stride == 0) {
        fogPointer->skip = __glXTypeSize(type);
    } else {
        fogPointer->skip = stride;
    }
}
Ejemplo n.º 13
0
void __indirect_glIndexPointer(GLenum type, GLsizei stride, const GLvoid *pointer)
{
    __GLXcontext *gc = __glXGetCurrentContext();
    __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
    __GLXvertexArrayPointerState *indexPointer = &state->vertArray.arrays[ index_ARRAY ];

    /* Check arguments */
    if (stride < 0) {
        __glXSetError(gc, GL_INVALID_VALUE);
        return;
    }

    /* Choose appropriate api proc */
    switch(type) {
	__GL_INDEX_FUNC(UNSIGNED_BYTE, ub);
        __GL_INDEX_FUNC(SHORT, s);
        __GL_INDEX_FUNC(INT, i);
        __GL_INDEX_FUNC(FLOAT, f);
        __GL_INDEX_FUNC(DOUBLE, d);
      default:
        __glXSetError(gc, GL_INVALID_ENUM);
        return;
    }

    indexPointer->type = type;
    indexPointer->stride = stride;
    indexPointer->ptr = pointer;

    /* Set internal state */
    if (stride == 0) {
	indexPointer->skip = __glXTypeSize(type);
    } else {
	indexPointer->skip = stride;
    }
}
Ejemplo n.º 14
0
void
__indirect_glPopClientAttrib(void)
{
   __GLXcontext *gc = __glXGetCurrentContext();
   __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
   __GLXattribute **spp = gc->attributes.stackPointer, *sp;
   GLuint mask;

   if (spp > &gc->attributes.stack[0]) {
      --spp;
      sp = *spp;
      assert(sp != 0);
      mask = sp->mask;
      gc->attributes.stackPointer = spp;

      if (mask & GL_CLIENT_PIXEL_STORE_BIT) {
         state->storePack = sp->storePack;
         state->storeUnpack = sp->storeUnpack;
      }
      if (mask & GL_CLIENT_VERTEX_ARRAY_BIT) {
         __glXPopArrayState(state);
      }

      sp->mask = 0;
   }
   else {
      __glXSetError(gc, GL_STACK_UNDERFLOW);
      return;
   }
}
Ejemplo n.º 15
0
void
__glXSendError(Display * dpy, int_fast8_t errorCode, uint_fast32_t resourceID,
               uint_fast16_t minorCode, bool coreX11error)
{
   struct glx_display *glx_dpy = __glXInitialize(dpy);
   struct glx_context *gc = __glXGetCurrentContext();
   xError error;

   assert(glx_dpy);
   assert(gc);

   LockDisplay(dpy);

   error.type = X_Error;

   if (coreX11error) {
      error.errorCode = errorCode;
   }
   else {
      error.errorCode = glx_dpy->codes->first_error + errorCode;
   }

   error.sequenceNumber = dpy->request;
   error.resourceID = resourceID;
   error.minorCode = minorCode;
   error.majorCode = gc ? gc->majorOpcode : 0;

   _XError(dpy, &error);

   UnlockDisplay(dpy);
}
Ejemplo n.º 16
0
/*
 * These are special functions for stereoscopic support
 * differences in MacOS X.
 */
void
__applegl_glDrawBuffer(GLenum mode)
{
    struct glx_context * gc = __glXGetCurrentContext();

    if (gc && apple_glx_context_uses_stereo(gc->driContext)) {
        GLenum buf[2];
        GLsizei n = 0;

        switch (mode) {
        case GL_BACK:
            buf[0] = GL_BACK_LEFT;
            buf[1] = GL_BACK_RIGHT;
            n = 2;
            break;
        case GL_FRONT:
            buf[0] = GL_FRONT_LEFT;
            buf[1] = GL_FRONT_RIGHT;
            n = 2;
            break;

        default:
            buf[0] = mode;
            n = 1;
            break;
        }

        __ogl_framework_api->DrawBuffers(n, buf);
    }
    else {
        __ogl_framework_api->DrawBuffer(mode);
    }
}
Ejemplo n.º 17
0
_X_HIDDEN Bool
glXQueryCurrentRendererIntegerMESA(int attribute, unsigned int *value)
{
    struct glx_context *gc = __glXGetCurrentContext();

    if (gc == &dummyContext)
        return False;

    return __glXQueryRendererInteger(gc->psc, attribute, value);
}
Ejemplo n.º 18
0
_X_HIDDEN const char *
glXQueryCurrentRendererStringMESA(int attribute)
{
    struct glx_context *gc = __glXGetCurrentContext();

    if (gc == &dummyContext)
        return False;

    return __glXQueryRendererString(gc->psc, attribute);
}
Ejemplo n.º 19
0
void do_vertex_attrib_enable( GLuint index, GLboolean val )
{
    __GLXcontext *gc = __glXGetCurrentContext();
    __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);

    if ( ! __glXSetArrayEnable( state, GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB,
				index, val ) ) {
	__glXSetError(gc, GL_INVALID_ENUM);
    }
}
Ejemplo n.º 20
0
void
__applegl_glViewport(GLint x, GLint y, GLsizei width, GLsizei height)
{
   struct glx_context *gc = __glXGetCurrentContext();
   Display *dpy = glXGetCurrentDisplay();

   if (gc && gc->driContext)
      apple_glx_context_update(dpy, gc->driContext);

   __ogl_framework_api->Viewport(x, y, width, height);
}
Ejemplo n.º 21
0
GLboolean
glAreTexturesResidentEXT(GLsizei n, const GLuint * textures,
                         GLboolean * residences)
{
   struct glx_context *const gc = __glXGetCurrentContext();

   if (gc->isDirect) {
      const _glapi_proc *const table = (_glapi_proc *) GET_DISPATCH();
      PFNGLARETEXTURESRESIDENTEXTPROC p =
         (PFNGLARETEXTURESRESIDENTEXTPROC) table[332];

      return p(n, textures, residences);
   }
   else {
      struct glx_context *const gc = __glXGetCurrentContext();
      Display *const dpy = gc->currentDpy;
      GLboolean retval = (GLboolean) 0;
      const GLuint cmdlen = 4 + __GLX_PAD((n * 4));
      if (__builtin_expect((n >= 0) && (dpy != NULL), 1)) {
         GLubyte const *pc =
            __glXSetupVendorRequest(gc, X_GLXVendorPrivateWithReply,
                                    X_GLvop_AreTexturesResidentEXT,
                                    cmdlen);
         (void) memcpy((void *) (pc + 0), (void *) (&n), 4);
         (void) memcpy((void *) (pc + 4), (void *) (textures), (n * 4));
         if (n & 3) {
            /* see comments in __indirect_glAreTexturesResident() */
            GLboolean *res4 = malloc((n + 3) & ~3);
            retval = (GLboolean) __glXReadReply(dpy, 1, res4, GL_TRUE);
            memcpy(residences, res4, n);
            free(res4);
         }
         else {
            retval = (GLboolean) __glXReadReply(dpy, 1, residences, GL_TRUE);
         }
         UnlockDisplay(dpy);
         SyncHandle();
      }
      return retval;
   }
}
Ejemplo n.º 22
0
_X_EXPORT GLXContext
glXGetCurrentContext(void)
{
   struct glx_context *cx = __glXGetCurrentContext();

   if (cx == &dummyContext) {
      return NULL;
   }
   else {
      return (GLXContext) cx;
   }
}
Ejemplo n.º 23
0
void __indirect_glClientActiveTextureARB(GLenum texture)
{
    __GLXcontext *gc = __glXGetCurrentContext();
    __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
    GLint unit = (GLint) texture - GL_TEXTURE0;

    if (unit < 0 || __GLX_MAX_TEXTURE_UNITS <= unit) {
	__glXSetError(gc, GL_INVALID_ENUM);
	return;
    }
    state->vertArray.activeTexture = unit;
}
Ejemplo n.º 24
0
PUBLIC GLXContext
glXGetCurrentContext(void)
{
   GLXContext cx = __glXGetCurrentContext();

   if (cx == &dummyContext) {
      return NULL;
   }
   else {
      return cx;
   }
}
Ejemplo n.º 25
0
void __indirect_glDrawRangeElements(GLenum mode, GLuint start, GLuint end,
			 GLsizei count, GLenum type,
			 const GLvoid *indices)
{
    __GLXcontext *gc = __glXGetCurrentContext();

    if (end < start) {
	__glXSetError(gc, GL_INVALID_VALUE);
	return;
    }

    __indirect_glDrawElements(mode,count,type,indices);
}
Ejemplo n.º 26
0
static int
__glXCloseDisplay(Display * dpy, XExtCodes * codes)
{
   GLXContext gc;

   gc = __glXGetCurrentContext();
   if (dpy == gc->currentDpy) {
      __glXSetCurrentContextNull();
      __glXFreeContext(gc);
   }

   return XextRemoveDisplay(__glXExtensionInfo, dpy);
}
Ejemplo n.º 27
0
GLboolean
__indirect_glAreTexturesResident(GLsizei n, const GLuint * textures,
                                 GLboolean * residences)
{
   struct glx_context *const gc = __glXGetCurrentContext();
   Display *const dpy = gc->currentDpy;
   GLboolean retval = (GLboolean) 0;
   if (__builtin_expect((n >= 0) && (dpy != NULL), 1)) {
#ifdef USE_XCB
      xcb_connection_t *c = XGetXCBConnection(dpy);
      (void) __glXFlushRenderBuffer(gc, gc->pc);
      xcb_glx_are_textures_resident_reply_t *reply =
         xcb_glx_are_textures_resident_reply(c,
                                             xcb_glx_are_textures_resident
                                             (c, gc->currentContextTag, n,
                                              textures), NULL);
      (void) memcpy(residences, xcb_glx_are_textures_resident_data(reply),
                    xcb_glx_are_textures_resident_data_length(reply) *
                    sizeof(GLboolean));
      retval = reply->ret_val;
      free(reply);
#else
      const GLuint cmdlen = 4 + __GLX_PAD((n * 4));
      GLubyte const *pc =
         __glXSetupSingleRequest(gc, X_GLsop_AreTexturesResident, cmdlen);
      (void) memcpy((void *) (pc + 0), (void *) (&n), 4);
      (void) memcpy((void *) (pc + 4), (void *) (textures), (n * 4));
      if (n & 3) {
         /* n is not a multiple of four.
          * When reply_is_always_array is TRUE, __glXReadReply() will
          * put a multiple of four bytes into the dest buffer.  If the
          * caller's buffer is not a multiple of four in size, we'll write
          * out of bounds.  So use a temporary buffer that's a few bytes
          * larger.
          */
         GLboolean *res4 = malloc((n + 3) & ~3);
         retval = (GLboolean) __glXReadReply(dpy, 1, res4, GL_TRUE);
         memcpy(residences, res4, n);
         free(res4);
      }
      else {
         retval = (GLboolean) __glXReadReply(dpy, 1, residences, GL_TRUE);
      }
      UnlockDisplay(dpy);
      SyncHandle();
#endif /* USE_XCB */
   }
   return retval;
}
Ejemplo n.º 28
0
static void
do_enable_disable(GLenum array, GLboolean val)
{
   __GLXcontext *gc = __glXGetCurrentContext();
   __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
   unsigned index = 0;

   if (array == GL_TEXTURE_COORD_ARRAY) {
      index = __glXGetActiveTextureUnit(state);
   }

   if (!__glXSetArrayEnable(state, array, index, val)) {
      __glXSetError(gc, GL_INVALID_ENUM);
   }
}
Ejemplo n.º 29
0
void __indirect_glGetVertexAttribPointervNV( GLuint index, GLenum pname,
					     GLvoid ** pointer )
{
    __GLXcontext * const gc = __glXGetCurrentContext();
    __GLXattribute * state = (__GLXattribute *)(gc->client_state_private);
 
    if ( pname != GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB ) {
	__glXSetError( gc, GL_INVALID_ENUM );
    }
    
    if ( ! __glXGetArrayPointer( state, GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB,
				 index, pointer ) ) {
	__glXSetError( gc, GL_INVALID_VALUE );
    }
}
Ejemplo n.º 30
0
void __indirect_glClientActiveTextureARB(GLenum texture)
{
    __GLXcontext * const gc = __glXGetCurrentContext();
    __GLXattribute * const state = (__GLXattribute *)(gc->client_state_private);
    struct array_state_vector * const arrays = state->array_state;
    const GLint unit = (GLint) texture - GL_TEXTURE0;


    if ( (unit < 0) || (unit > arrays->num_texture_units) ) {
	__glXSetError(gc, GL_INVALID_ENUM);
	return;
    }

    arrays->active_texture_unit = unit;
}