/** * Emit a single element using non-DrawArrays protocol. */ GLubyte * emit_element_none( GLubyte * dst, const struct array_state_vector * arrays, unsigned index ) { unsigned i; for ( i = 0 ; i < arrays->num_arrays ; i++ ) { if ( arrays->arrays[i].enabled ) { const size_t offset = index * arrays->arrays[i].true_stride; /* The generic attributes can have more data than is in the * elements. This is because a vertex array can be a 2 element, * normalized, unsigned short, but the "closest" immediate mode * protocol is for a 4Nus. Since the sizes are small, the * performance impact on modern processors should be negligible. */ (void) memset( dst, 0, ((uint16_t *)arrays->arrays[i].header)[0] ); (void) memcpy( dst, arrays->arrays[i].header, arrays->arrays[i].header_size ); dst += arrays->arrays[i].header_size; (void) memcpy( dst, ((GLubyte *) arrays->arrays[i].data) + offset, arrays->arrays[i].element_size ); dst += __GLX_PAD( arrays->arrays[i].element_size ); } } return dst; }
void __glXDispSwap_SeparableFilter2D(GLbyte *pc) { __GLXdispatchConvolutionFilterHeader *hdr = (__GLXdispatchConvolutionFilterHeader *) pc; GLint hdrlen, image1len; __GLX_DECLARE_SWAP_VARIABLES; hdrlen = __GLX_PAD(__GLX_CONV_FILT_CMD_HDR_SIZE); __GLX_SWAP_INT((GLbyte *)&hdr->rowLength); __GLX_SWAP_INT((GLbyte *)&hdr->skipRows); __GLX_SWAP_INT((GLbyte *)&hdr->skipPixels); __GLX_SWAP_INT((GLbyte *)&hdr->alignment); __GLX_SWAP_INT((GLbyte *)&hdr->target); __GLX_SWAP_INT((GLbyte *)&hdr->internalformat); __GLX_SWAP_INT((GLbyte *)&hdr->width); __GLX_SWAP_INT((GLbyte *)&hdr->height); __GLX_SWAP_INT((GLbyte *)&hdr->format); __GLX_SWAP_INT((GLbyte *)&hdr->type); /* ** Just invert swapBytes flag; the GL will figure out if it needs to swap ** the pixel data. */ hdr->swapBytes = !hdr->swapBytes; }
void __glXDispSwap_SeparableFilter2D(GLbyte *pc) { __GLXdispatchConvolutionFilterHeader *hdr = (__GLXdispatchConvolutionFilterHeader *) pc; GLint hdrlen, image1len; __GLX_DECLARE_SWAP_VARIABLES; hdrlen = __GLX_PAD(__GLX_CONV_FILT_CMD_HDR_SIZE); __GLX_SWAP_INT((GLbyte *)&hdr->rowLength); __GLX_SWAP_INT((GLbyte *)&hdr->skipRows); __GLX_SWAP_INT((GLbyte *)&hdr->skipPixels); __GLX_SWAP_INT((GLbyte *)&hdr->alignment); __GLX_SWAP_INT((GLbyte *)&hdr->target); __GLX_SWAP_INT((GLbyte *)&hdr->internalformat); __GLX_SWAP_INT((GLbyte *)&hdr->width); __GLX_SWAP_INT((GLbyte *)&hdr->height); __GLX_SWAP_INT((GLbyte *)&hdr->format); __GLX_SWAP_INT((GLbyte *)&hdr->type); /* ** Just invert swapBytes flag; the GL will figure out if it needs to swap ** the pixel data. */ CALL_PixelStorei( GET_DISPATCH(), (GL_UNPACK_SWAP_BYTES, !hdr->swapBytes) ); CALL_PixelStorei( GET_DISPATCH(), (GL_UNPACK_LSB_FIRST, hdr->lsbFirst) ); CALL_PixelStorei( GET_DISPATCH(), (GL_UNPACK_ROW_LENGTH, hdr->rowLength) ); CALL_PixelStorei( GET_DISPATCH(), (GL_UNPACK_SKIP_ROWS, hdr->skipRows) ); CALL_PixelStorei( GET_DISPATCH(), (GL_UNPACK_SKIP_PIXELS, hdr->skipPixels) ); CALL_PixelStorei( GET_DISPATCH(), (GL_UNPACK_ALIGNMENT, hdr->alignment) ); /* XXX check this usage - internal code called ** a version without the packing parameters */ image1len = __glXImageSize(hdr->format, hdr->type, 0, hdr->width, 1, 1, 0, hdr->rowLength, 0, hdr->skipRows, hdr->alignment); image1len = __GLX_PAD(image1len); CALL_SeparableFilter2D( GET_DISPATCH(), (hdr->target, hdr->internalformat, hdr->width, hdr->height, hdr->format, hdr->type, ((GLubyte *)hdr+hdrlen), ((GLubyte *)hdr+hdrlen+image1len)) ); }
/** * Internal function used for \c glCompressedTexImage1D and * \c glCompressedTexImage2D. */ static void CompressedTexImage1D2D( GLenum target, GLint level, GLenum internal_format, GLsizei width, GLsizei height, GLint border, GLsizei image_size, const GLvoid *data, CARD32 rop ) { __GLX_DECLARE_VARIABLES(); __GLX_LOAD_VARIABLES(); if ( gc->currentDpy == NULL ) { return; } if ( (target == GL_PROXY_TEXTURE_1D) || (target == GL_PROXY_TEXTURE_2D) || (target == GL_PROXY_TEXTURE_CUBE_MAP) ) { compsize = 0; } else { compsize = image_size; } cmdlen = __GLX_PAD( __GLX_COMPRESSED_TEXIMAGE_CMD_HDR_SIZE + compsize ); if ( cmdlen <= gc->maxSmallRenderCommandSize ) { __GLX_BEGIN_VARIABLE( rop, cmdlen ); __GLX_PUT_LONG( 4, target ); __GLX_PUT_LONG( 8, level ); __GLX_PUT_LONG( 12, internal_format ); __GLX_PUT_LONG( 16, width ); __GLX_PUT_LONG( 20, height ); __GLX_PUT_LONG( 24, border ); __GLX_PUT_LONG( 28, image_size ); if ( compsize != 0 ) { __GLX_PUT_CHAR_ARRAY( __GLX_COMPRESSED_TEXIMAGE_CMD_HDR_SIZE, data, image_size ); } __GLX_END( cmdlen ); } else { assert( compsize != 0 ); __GLX_BEGIN_VARIABLE_LARGE( rop, cmdlen + 4 ); __GLX_PUT_LONG( 8, target ); __GLX_PUT_LONG( 12, level ); __GLX_PUT_LONG( 16, internal_format ); __GLX_PUT_LONG( 20, width ); __GLX_PUT_LONG( 24, height ); __GLX_PUT_LONG( 28, border ); __GLX_PUT_LONG( 32, image_size ); __glXSendLargeCommand( gc, gc->pc, __GLX_COMPRESSED_TEXIMAGE_CMD_HDR_SIZE + 4, data, image_size ); } }
int __glXProgramStringARBReqSize( const GLbyte * pc, Bool swap ) { GLsizei len = *(GLsizei *)(pc + 8); if (swap) { len = bswap_32(len); } return __GLX_PAD(len); }
int __glXVertexAttribs4dvNVReqSize( const GLbyte * pc, Bool swap ) { GLsizei n = *(GLsizei *)(pc + 4); if (swap) { n = bswap_32(n); } return __GLX_PAD((n * 32)); }
int __glXProgramNamedParameter4fvNVReqSize( const GLbyte * pc, Bool swap ) { GLsizei len = *(GLsizei *)(pc + 4); if (swap) { len = bswap_32(len); } return __GLX_PAD(len); }
int __glXCompressedTexSubImage3DReqSize( const GLbyte * pc, Bool swap ) { GLsizei imageSize = *(GLsizei *)(pc + 36); if (swap) { imageSize = bswap_32(imageSize); } return __GLX_PAD(imageSize); }
int __glXPrioritizeTexturesReqSize( const GLbyte * pc, Bool swap ) { GLsizei n = *(GLsizei *)(pc + 0); if (swap) { n = bswap_32(n); } return __GLX_PAD((n * 4) + (n * 4)); }
int __glXPixelMapusvReqSize( const GLbyte * pc, Bool swap ) { GLsizei mapsize = *(GLsizei *)(pc + 4); if (swap) { mapsize = bswap_32(mapsize); } return __GLX_PAD((mapsize * 2)); }
int __glXProgramParameters4fvNVReqSize( const GLbyte * pc, Bool swap ) { GLsizei num = *(GLsizei *)(pc + 8); if (swap) { num = bswap_32(num); } return __GLX_PAD((num * 16)); }
int __glXDrawBuffersReqSize( const GLbyte * pc, Bool swap ) { GLsizei n = *(GLsizei *)(pc + 0); if (swap) { n = bswap_32(n); } return __GLX_PAD((n * 4)); }
static int set_client_info(__GLXclientState * cl, xGLXSetClientInfoARBReq * req, unsigned bytes_per_version) { char *gl_extensions; char *glx_extensions; /* Verify that the size of the packet matches the size inferred from the * sizes specified for the various fields. */ const unsigned expected_size = sz_xGLXSetClientInfoARBReq + (req->numVersions * bytes_per_version) + __GLX_PAD(req->numGLExtensionBytes) + __GLX_PAD(req->numGLXExtensionBytes); if (req->length != (expected_size / 4)) return BadLength; /* Verify that the actual length of the GL extension string matches what's * encoded in protocol packet. */ gl_extensions = (char *) (req + 1) + (req->numVersions * bytes_per_version); if (req->numGLExtensionBytes != 0 && memchr(gl_extensions, 0, __GLX_PAD(req->numGLExtensionBytes)) == NULL) return BadLength; /* Verify that the actual length of the GLX extension string matches * what's encoded in protocol packet. */ glx_extensions = gl_extensions + __GLX_PAD(req->numGLExtensionBytes); if (req->numGLXExtensionBytes != 0 && memchr(glx_extensions, 0, __GLX_PAD(req->numGLXExtensionBytes)) == NULL) return BadLength; free(cl->GLClientextensions); cl->GLClientextensions = strdup(gl_extensions); return 0; }
void __indirect_glCompressedTexSubImage3DARB( GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei image_size, const GLvoid *data ) { __GLX_DECLARE_VARIABLES(); __GLX_LOAD_VARIABLES(); if ( gc->currentDpy == NULL ) { return; } cmdlen = __GLX_PAD( __GLX_COMPRESSED_TEXSUBIMAGE_3D_CMD_HDR_SIZE + image_size ); if ( cmdlen <= gc->maxSmallRenderCommandSize ) { __GLX_BEGIN_VARIABLE( X_GLrop_CompressedTexSubImage3D, cmdlen ); __GLX_PUT_LONG( 4, target ); __GLX_PUT_LONG( 8, level ); __GLX_PUT_LONG( 12, xoffset ); __GLX_PUT_LONG( 16, yoffset ); __GLX_PUT_LONG( 20, zoffset ); __GLX_PUT_LONG( 24, width ); __GLX_PUT_LONG( 28, height ); __GLX_PUT_LONG( 32, depth ); __GLX_PUT_LONG( 36, format ); __GLX_PUT_LONG( 40, image_size ); if ( image_size != 0 ) { __GLX_PUT_CHAR_ARRAY( __GLX_COMPRESSED_TEXSUBIMAGE_3D_CMD_HDR_SIZE, data, image_size ); } __GLX_END( cmdlen ); } else { __GLX_BEGIN_VARIABLE_LARGE( X_GLrop_CompressedTexSubImage3D, cmdlen + 4 ); __GLX_PUT_LONG( 8, target ); __GLX_PUT_LONG( 12, level ); __GLX_PUT_LONG( 16, xoffset ); __GLX_PUT_LONG( 20, yoffset ); __GLX_PUT_LONG( 24, zoffset ); __GLX_PUT_LONG( 28, width ); __GLX_PUT_LONG( 32, height ); __GLX_PUT_LONG( 36, depth ); __GLX_PUT_LONG( 40, format ); __GLX_PUT_LONG( 44, image_size ); __glXSendLargeCommand( gc, gc->pc, __GLX_COMPRESSED_TEXSUBIMAGE_3D_CMD_HDR_SIZE + 4, data, image_size ); } }
int __glXTexGendvReqSize( const GLbyte * pc, Bool swap ) { GLenum pname = * (GLenum *)(pc + 4); GLsizei compsize; if (swap) { pname = bswap_32(pname); } compsize = __glTexGendv_size(pname); return __GLX_PAD((compsize * 8)); }
int __glXConvolutionParameterfvReqSize( const GLbyte * pc, Bool swap ) { GLenum pname = * (GLenum *)(pc + 4); GLsizei compsize; if (swap) { pname = bswap_32(pname); } compsize = __glConvolutionParameterfv_size(pname); return __GLX_PAD((compsize * 4)); }
int __glXFogfvReqSize( const GLbyte * pc, Bool swap ) { GLenum pname = * (GLenum *)(pc + 0); GLsizei compsize; if (swap) { pname = bswap_32(pname); } compsize = __glFogfv_size(pname); return __GLX_PAD((compsize * 4)); }
void glGetTexImage(GLenum target, GLint level, GLenum format, GLenum type, GLvoid *texels) { __GLX_SINGLE_DECLARE_VARIABLES(); const __GLXattribute * state; xGLXGetTexImageReply reply; GLubyte *buf; if (!dpy) return; __GLX_SINGLE_LOAD_VARIABLES(); state = gc->client_state_private; /* Send request */ __GLX_SINGLE_BEGIN(X_GLsop_GetTexImage,__GLX_PAD(17)); __GLX_SINGLE_PUT_LONG(0,target); __GLX_SINGLE_PUT_LONG(4,level); __GLX_SINGLE_PUT_LONG(8,format); __GLX_SINGLE_PUT_LONG(12,type); __GLX_SINGLE_PUT_CHAR(16,state->storePack.swapEndian); __GLX_SINGLE_READ_XREPLY(); compsize = reply.length << 2; if (compsize != 0) { /* Allocate a holding buffer to transform the data from */ buf = (GLubyte*) Xmalloc(compsize); if (!buf) { /* Throw data away */ _XEatData(dpy, compsize); __glXSetError(gc, GL_OUT_OF_MEMORY); } else { GLint width, height, depth; /* ** Fetch data into holding buffer. Apply pixel store pack modes ** to put data back into client memory */ width = reply.width; height = reply.height; depth = reply.depth; __GLX_SINGLE_GET_CHAR_ARRAY(buf,compsize); __glEmptyImage(gc, 2, width, height, depth, format, type, buf, texels); Xfree((char*) buf); } } else { /* ** GL error occured, don't modify user's buffer. */ } __GLX_SINGLE_END(); }
/** * Initialize a \c array_info structure for each array that is enabled in * \c state. Determine how many arrays are enabled, and store the result * in \c num_arrays. Determine how big each vertex is, and store the result * in \c total_vertex_size. * * \returns The size of the final request. This is the size, in bytes, of * the DrawArrays header, the ARRAY_INFO structures, and all the vertex data. * This value \b assumes a \c X_GLXRender command is used. The true size * will be 4 bytes larger if a \c X_GLXRenderLarge command is used. */ static GLuint prep_arrays(const __GLXattribute * const state, struct array_info * arrays, GLint count, GLsizei *num_arrays, GLsizei *total_vertex_size) { GLsizei na = 0; GLsizei vs = 0; #define ASSIGN_ARRAY_INFO(state, enum_name, arr) \ do { \ arrays[ na ].ai.datatype = state->vertArray. arr .type ; \ arrays[ na ].ai.numVals = state->vertArray. arr .size ; \ arrays[ na ].ai.component = GL_ ## enum_name ## _ARRAY; \ \ arrays[ na ].bytes = state->vertArray. arr .size \ * __glXTypeSize( state->vertArray. arr .type ); \ arrays[ na ].ptr = state->vertArray. arr .ptr; \ arrays[ na ].skip = state->vertArray. arr .skip; \ \ vs += __GLX_PAD(arrays[ na ].bytes); \ na++; \ } while( 0 ) #define ADD_ARRAY_IF_ENABLED(state, enum_name, arr) \ do { if ( IS_ARRAY_ENABLED(state, arr) ) { \ ASSIGN_ARRAY_INFO(state, enum_name, arrays[ arr ## _ARRAY ] ); \ } } while( 0 ) ADD_ARRAY_IF_ENABLED(state, VERTEX, vertex); ADD_ARRAY_IF_ENABLED(state, NORMAL, normal); ADD_ARRAY_IF_ENABLED(state, COLOR, color); ADD_ARRAY_IF_ENABLED(state, SECONDARY_COLOR, secondaryColor); ADD_ARRAY_IF_ENABLED(state, FOG_COORD, fogCoord); ADD_ARRAY_IF_ENABLED(state, EDGE_FLAG, edgeFlag); ADD_ARRAY_IF_ENABLED(state, INDEX, index); /* The standard DrawArrays protocol *only* supports a single array of * texture coordinates. */ if ( IS_TEXARRAY_ENABLED(state, 0) ) { ASSIGN_ARRAY_INFO(state, TEXTURE_COORD, texCoord[0]); } *num_arrays = na; *total_vertex_size = vs; return __GLX_PAD((__GLX_COMPONENT_HDR_SIZE * na) + (vs * count) + __GLX_DRAWARRAYS_CMD_HDR_SIZE); }
GLboolean __indirect_glAreTexturesResident(GLsizei n, const GLuint * textures, GLboolean * residences) { struct glx_context *const gc = __glXGetCurrentContext(); Display *const dpy = gc->currentDpy; GLboolean retval = (GLboolean) 0; if (__builtin_expect((n >= 0) && (dpy != NULL), 1)) { #ifdef USE_XCB xcb_connection_t *c = XGetXCBConnection(dpy); (void) __glXFlushRenderBuffer(gc, gc->pc); xcb_glx_are_textures_resident_reply_t *reply = xcb_glx_are_textures_resident_reply(c, xcb_glx_are_textures_resident (c, gc->currentContextTag, n, textures), NULL); (void) memcpy(residences, xcb_glx_are_textures_resident_data(reply), xcb_glx_are_textures_resident_data_length(reply) * sizeof(GLboolean)); retval = reply->ret_val; free(reply); #else const GLuint cmdlen = 4 + __GLX_PAD((n * 4)); GLubyte const *pc = __glXSetupSingleRequest(gc, X_GLsop_AreTexturesResident, cmdlen); (void) memcpy((void *) (pc + 0), (void *) (&n), 4); (void) memcpy((void *) (pc + 4), (void *) (textures), (n * 4)); if (n & 3) { /* n is not a multiple of four. * When reply_is_always_array is TRUE, __glXReadReply() will * put a multiple of four bytes into the dest buffer. If the * caller's buffer is not a multiple of four in size, we'll write * out of bounds. So use a temporary buffer that's a few bytes * larger. */ GLboolean *res4 = malloc((n + 3) & ~3); retval = (GLboolean) __glXReadReply(dpy, 1, res4, GL_TRUE); memcpy(residences, res4, n); free(res4); } else { retval = (GLboolean) __glXReadReply(dpy, 1, residences, GL_TRUE); } UnlockDisplay(dpy); SyncHandle(); #endif /* USE_XCB */ } return retval; }
int __glXCallListsReqSize( const GLbyte * pc, Bool swap ) { GLsizei n = *(GLsizei *)(pc + 0); GLenum type = * (GLenum *)(pc + 4); GLsizei compsize; if (swap) { n = bswap_32(n); type = bswap_32(type); } compsize = __glCallLists_size(type); return __GLX_PAD((compsize * n)); }
/** * Emits the vertex data for the DrawArrays GLX protocol. */ static GLsizei emit_vertex(GLubyte * data, const struct array_info * arrays, GLsizei num_arrays, GLint element, GLsizei offset) { GLint i; for ( i = 0 ; i < num_arrays ; i++ ) { (void) memcpy( data + offset, arrays[i].ptr + (arrays[i].skip * element), arrays[i].bytes ); offset += __GLX_PAD(arrays[i].bytes); } return offset; }
void glReadPixels(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLvoid *pixels) { __GLX_SINGLE_DECLARE_VARIABLES(); const __GLXattribute * state; xGLXReadPixelsReply reply; GLubyte *buf; if (!dpy) return; __GLX_SINGLE_LOAD_VARIABLES(); state = gc->client_state_private; /* Send request */ __GLX_SINGLE_BEGIN(X_GLsop_ReadPixels,__GLX_PAD(26)); __GLX_SINGLE_PUT_LONG(0,x); __GLX_SINGLE_PUT_LONG(4,y); __GLX_SINGLE_PUT_LONG(8,width); __GLX_SINGLE_PUT_LONG(12,height); __GLX_SINGLE_PUT_LONG(16,format); __GLX_SINGLE_PUT_LONG(20,type); __GLX_SINGLE_PUT_CHAR(24,state->storePack.swapEndian); __GLX_SINGLE_PUT_CHAR(25,GL_FALSE); __GLX_SINGLE_READ_XREPLY(); compsize = reply.length << 2; if (compsize != 0) { /* Allocate a holding buffer to transform the data from */ buf = (GLubyte*) Xmalloc(compsize); if (!buf) { /* Throw data away */ _XEatData(dpy, compsize); __glXSetError(gc, GL_OUT_OF_MEMORY); } else { /* ** Fetch data into holding buffer. Apply pixel store pack modes ** to put data back into client memory */ __GLX_SINGLE_GET_CHAR_ARRAY(buf,compsize); __glEmptyImage(gc, 2, width, height, 1, format, type, buf, pixels); Xfree((char*) buf); } } else { /* ** GL error occurred; don't modify user's buffer. */ } __GLX_SINGLE_END(); }
void glGetHistogram(GLenum target, GLboolean reset, GLenum format, GLenum type, GLvoid *values) { __GLX_SINGLE_DECLARE_VARIABLES(); const __GLXattribute * state; xGLXGetHistogramReply reply; GLubyte *buf; if (!dpy) return; __GLX_SINGLE_LOAD_VARIABLES(); state = gc->client_state_private; /* Send request */ __GLX_SINGLE_BEGIN(X_GLsop_GetHistogram,__GLX_PAD(14)); __GLX_SINGLE_PUT_LONG(0,(long)target); __GLX_SINGLE_PUT_LONG(4,(long)format); __GLX_SINGLE_PUT_LONG(8,(long)type); __GLX_SINGLE_PUT_CHAR(12,state->storePack.swapEndian); __GLX_SINGLE_PUT_CHAR(13,reset); __GLX_SINGLE_READ_XREPLY(); compsize = (long)reply.length << 2; if (compsize != 0) { /* Allocate a holding buffer to transform the data from */ buf = (GLubyte*)Xmalloc(compsize); if (!buf) { /* Throw data away */ _XEatData(dpy, compsize); __glXSetError(gc, GL_OUT_OF_MEMORY); } else { GLint width; /* ** Fetch data into holding buffer. Apply pixel store pack modes ** to put data back into client memory */ width = (int)reply.width; __GLX_SINGLE_GET_CHAR_ARRAY(((char*)buf),(long)compsize); __glEmptyImage(gc, 1, width, 1, 1, format, type, buf, values); Xfree((char*) buf); } } else { /* ** GL error occured, don't modify user's buffer. */ } __GLX_SINGLE_END(); }
void glGetConvolutionFilter(GLenum target, GLenum format, GLenum type, GLvoid *image) { __GLX_SINGLE_DECLARE_VARIABLES(); xGLXGetConvolutionFilterReply reply; GLubyte *buf; if (!dpy) return; __GLX_SINGLE_LOAD_VARIABLES(); /* Send request */ __GLX_SINGLE_BEGIN(X_GLsop_GetConvolutionFilter, __GLX_PAD(13)); __GLX_SINGLE_PUT_LONG(0,target); __GLX_SINGLE_PUT_LONG(4,format); __GLX_SINGLE_PUT_LONG(8,type); __GLX_SINGLE_PUT_CHAR(12,gc->state.storePack.swapEndian); __GLX_SINGLE_READ_XREPLY(); compsize = reply.length << 2; if (compsize != 0) { /* Allocate a holding buffer to transform the data from */ buf = (GLubyte*) Xmalloc(compsize); if (!buf) { /* Throw data away */ _XEatData(dpy, compsize); __glXSetError(gc, GL_OUT_OF_MEMORY); } else { GLint width, height; /* ** Fetch data into holding buffer. Apply pixel store pack modes ** to put data back into client memory */ width = reply.width; height = reply.height; __GLX_SINGLE_GET_CHAR_ARRAY(((char*)buf),compsize); __glEmptyImage(gc, 2, width, height, 1, format, type, buf, image); Xfree((char*) buf); } } else { /* ** GL error occured, don't modify user's buffer. */ } __GLX_SINGLE_END(); }
void glGetPolygonStipple(GLubyte *mask) { __GLX_SINGLE_DECLARE_VARIABLES(); xGLXSingleReply reply; GLubyte buf[128]; if (!dpy) return; __GLX_SINGLE_LOAD_VARIABLES(); __GLX_SINGLE_BEGIN(X_GLsop_GetPolygonStipple,__GLX_PAD(1)); __GLX_SINGLE_PUT_CHAR(0,GL_FALSE); __GLX_SINGLE_READ_XREPLY(); if (reply.length == 32) { __GLX_SINGLE_GET_CHAR_ARRAY(buf,128); __glEmptyImage(gc, 2, 32, 32, 1, GL_COLOR_INDEX, GL_BITMAP, buf, mask); } __GLX_SINGLE_END(); }
GLboolean glAreTexturesResidentEXT(GLsizei n, const GLuint * textures, GLboolean * residences) { struct glx_context *const gc = __glXGetCurrentContext(); if (gc->isDirect) { const _glapi_proc *const table = (_glapi_proc *) GET_DISPATCH(); PFNGLARETEXTURESRESIDENTEXTPROC p = (PFNGLARETEXTURESRESIDENTEXTPROC) table[332]; return p(n, textures, residences); } else { struct glx_context *const gc = __glXGetCurrentContext(); Display *const dpy = gc->currentDpy; GLboolean retval = (GLboolean) 0; const GLuint cmdlen = 4 + __GLX_PAD((n * 4)); if (__builtin_expect((n >= 0) && (dpy != NULL), 1)) { GLubyte const *pc = __glXSetupVendorRequest(gc, X_GLXVendorPrivateWithReply, X_GLvop_AreTexturesResidentEXT, cmdlen); (void) memcpy((void *) (pc + 0), (void *) (&n), 4); (void) memcpy((void *) (pc + 4), (void *) (textures), (n * 4)); if (n & 3) { /* see comments in __indirect_glAreTexturesResident() */ GLboolean *res4 = malloc((n + 3) & ~3); retval = (GLboolean) __glXReadReply(dpy, 1, res4, GL_TRUE); memcpy(residences, res4, n); free(res4); } else { retval = (GLboolean) __glXReadReply(dpy, 1, residences, GL_TRUE); } UnlockDisplay(dpy); SyncHandle(); } return retval; } }
/** * Emit a single element using "old" DrawArrays protocol from * EXT_vertex_arrays / OpenGL 1.1. */ GLubyte * emit_element_old( GLubyte * dst, const struct array_state_vector * arrays, unsigned index ) { unsigned i; for ( i = 0 ; i < arrays->num_arrays ; i++ ) { if ( arrays->arrays[i].enabled ) { const size_t offset = index * arrays->arrays[i].true_stride; (void) memcpy( dst, ((GLubyte *) arrays->arrays[i].data) + offset, arrays->arrays[i].element_size ); dst += __GLX_PAD( arrays->arrays[i].element_size ); } } return dst; }
void __glXDisp_TexGendv(GLbyte *pc) { #ifdef __GLX_ALIGN64 GLenum pname; GLint cmdlen; GLint compsize; pname = *(GLenum *)(pc + 4); compsize = __glTexGendv_size(pname); if (compsize < 0) compsize = 0; cmdlen = __GLX_PAD(8+compsize*8); if ((unsigned long)(pc) & 7) { __GLX_MEM_COPY(pc-4, pc, cmdlen); pc -= 4; } #endif glTexGendv( *(GLenum *)(pc + 0), *(GLenum *)(pc + 4), (GLdouble *)(pc + 8) ); }
void __glXDispSwap_TexGendv(GLbyte *pc) { GLenum pname; GLint cmdlen; GLint compsize; __GLX_DECLARE_SWAP_VARIABLES; __GLX_DECLARE_SWAP_ARRAY_VARIABLES; __GLX_SWAP_INT(pc + 4); pname = *(GLenum *)(pc + 4); compsize = __glTexGendv_size(pname); if (compsize < 0) compsize = 0; cmdlen = __GLX_PAD(8+compsize*8); #ifdef __GLX_ALIGN64 if ((unsigned long)(pc) & 7) { __GLX_MEM_COPY(pc-4, pc, cmdlen); pc -= 4; } #endif __GLX_SWAP_INT(pc + 0); __GLX_SWAP_DOUBLE_ARRAY(pc + 8, compsize); }