Example #1
0
/*
** Setup for sending a GLX command on dpy.  Make sure the extension is
** initialized.  Try to avoid calling __glXInitialize as its kinda slow.
*/
_X_HIDDEN CARD8
__glXSetupForCommand(Display * dpy)
{
    struct glx_context *gc;
    struct glx_display *priv;

   /* If this thread has a current context, flush its rendering commands */
   gc = __glXGetCurrentContext();
   if (gc->currentDpy) {
      /* Flush rendering buffer of the current context, if any */
      (void) __glXFlushRenderBuffer(gc, gc->pc);

      if (gc->currentDpy == dpy) {
         /* Use opcode from gc because its right */
         return gc->majorOpcode;
      }
      else {
         /*
          ** Have to get info about argument dpy because it might be to
          ** a different server
          */
      }
   }

   /* Forced to lookup extension via the slow initialize route */
   priv = __glXInitialize(dpy);
   if (!priv) {
      return 0;
   }
   return priv->majorOpcode;
}
void
emit_DrawArrays_old( GLenum mode, GLint first, GLsizei count )
{
    __GLXcontext *gc = __glXGetCurrentContext();
    const __GLXattribute * state = 
       (const __GLXattribute *)(gc->client_state_private);
    struct array_state_vector * arrays = state->array_state;

    GLubyte * pc;
    size_t elements_per_request;
    unsigned total_requests = 0;
    unsigned i;
    size_t total_sent = 0;


    pc = emit_DrawArrays_header_old( gc, arrays, & elements_per_request,
				     & total_requests, mode, count);

    
    /* Write the arrays.
     */

    if ( total_requests == 0 ) {
	assert( elements_per_request >= count );

	for ( i = 0 ; i < count ; i++ ) {
	    pc = emit_element_old( pc, arrays, i + first );
	}

	assert( pc <= gc->bufEnd );

	gc->pc = pc;
	if ( gc->pc > gc->limit ) {
	    (void) __glXFlushRenderBuffer(gc, gc->pc);
	}
    }
    else {
	unsigned req;


	for ( req = 2 ; req <= total_requests ; req++ ) {
	    if ( count < elements_per_request ) {
		elements_per_request = count;
	    }

	    pc = gc->pc;
	    for ( i = 0 ; i < elements_per_request ; i++ ) {
		pc = emit_element_old( pc, arrays, i + first );
	    }

	    first += elements_per_request;

	    total_sent += (size_t) (pc - gc->pc);
	    __glXSendLargeChunk( gc, req, total_requests, gc->pc,
				 pc - gc->pc );

	    count -= elements_per_request;
	}
    }
}
/**
 * Emit a \c glDrawArrays command using the "none" protocol.  That is,
 * emit immediate-mode commands that are equivalent to the requiested
 * \c glDrawArrays command.  This is used with servers that don't support
 * the OpenGL 1.1 / EXT_vertex_arrays DrawArrays protocol or in cases where
 * vertex state is enabled that is not compatible with that protocol.
 */
void
emit_DrawArrays_none( GLenum mode, GLint first, GLsizei count )
{
    __GLXcontext *gc = __glXGetCurrentContext();
    const __GLXattribute * state = 
       (const __GLXattribute *)(gc->client_state_private);
    struct array_state_vector * arrays = state->array_state;

    size_t single_vertex_size;
    GLubyte * pc;
    unsigned  i;
    static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
    static const uint16_t end_cmd[2]   = { 4, X_GLrop_End };


    single_vertex_size = calculate_single_vertex_size_none( arrays );

    pc = gc->pc;

    (void) memcpy( pc, begin_cmd, 4 );
    *(int *)(pc +  4) = mode;

    pc += 8;

    for ( i = 0 ; i < count ; i++ ) {
	if ( (pc + single_vertex_size) >= gc->bufEnd ) {
	    pc = __glXFlushRenderBuffer(gc, gc->pc);
	}

	pc = emit_element_none( pc, arrays, first + i );
    }

    if ( (pc + 4) >= gc->bufEnd ) {
	pc = __glXFlushRenderBuffer(gc, gc->pc);
    }

    (void) memcpy( pc, end_cmd, 4 );
    pc += 4;

    gc->pc = pc;
    if ( gc->pc > gc->limit ) {
	(void) __glXFlushRenderBuffer(gc, gc->pc);
    }
}
void __indirect_glArrayElement(GLint index)
{
    __GLXcontext *gc = __glXGetCurrentContext();
    const __GLXattribute * state = 
       (const __GLXattribute *)(gc->client_state_private);
    struct array_state_vector * arrays = state->array_state;

    size_t single_vertex_size;


    single_vertex_size = calculate_single_vertex_size_none( arrays );

    if ( (gc->pc + single_vertex_size) >= gc->bufEnd ) {
	gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
    }

    gc->pc = emit_element_none( gc->pc, arrays, index );

    if ( gc->pc > gc->limit ) {
	(void) __glXFlushRenderBuffer(gc, gc->pc);
    }
}
Example #5
0
GLubyte *
__glXSetupSingleRequest(struct glx_context * gc, GLint sop, GLint cmdlen)
{
   xGLXSingleReq *req;
   Display *const dpy = gc->currentDpy;

   (void) __glXFlushRenderBuffer(gc, gc->pc);
   LockDisplay(dpy);
   GetReqExtra(GLXSingle, cmdlen, req);
   req->reqType = gc->majorOpcode;
   req->contextTag = gc->currentContextTag;
   req->glxCode = sop;
   return (GLubyte *) (req) + sz_xGLXSingleReq;
}
Example #6
0
GLboolean
__indirect_glAreTexturesResident(GLsizei n, const GLuint * textures,
                                 GLboolean * residences)
{
   struct glx_context *const gc = __glXGetCurrentContext();
   Display *const dpy = gc->currentDpy;
   GLboolean retval = (GLboolean) 0;
   if (__builtin_expect((n >= 0) && (dpy != NULL), 1)) {
#ifdef USE_XCB
      xcb_connection_t *c = XGetXCBConnection(dpy);
      (void) __glXFlushRenderBuffer(gc, gc->pc);
      xcb_glx_are_textures_resident_reply_t *reply =
         xcb_glx_are_textures_resident_reply(c,
                                             xcb_glx_are_textures_resident
                                             (c, gc->currentContextTag, n,
                                              textures), NULL);
      (void) memcpy(residences, xcb_glx_are_textures_resident_data(reply),
                    xcb_glx_are_textures_resident_data_length(reply) *
                    sizeof(GLboolean));
      retval = reply->ret_val;
      free(reply);
#else
      const GLuint cmdlen = 4 + __GLX_PAD((n * 4));
      GLubyte const *pc =
         __glXSetupSingleRequest(gc, X_GLsop_AreTexturesResident, cmdlen);
      (void) memcpy((void *) (pc + 0), (void *) (&n), 4);
      (void) memcpy((void *) (pc + 4), (void *) (textures), (n * 4));
      if (n & 3) {
         /* n is not a multiple of four.
          * When reply_is_always_array is TRUE, __glXReadReply() will
          * put a multiple of four bytes into the dest buffer.  If the
          * caller's buffer is not a multiple of four in size, we'll write
          * out of bounds.  So use a temporary buffer that's a few bytes
          * larger.
          */
         GLboolean *res4 = malloc((n + 3) & ~3);
         retval = (GLboolean) __glXReadReply(dpy, 1, res4, GL_TRUE);
         memcpy(residences, res4, n);
         free(res4);
      }
      else {
         retval = (GLboolean) __glXReadReply(dpy, 1, residences, GL_TRUE);
      }
      UnlockDisplay(dpy);
      SyncHandle();
#endif /* USE_XCB */
   }
   return retval;
}
Example #7
0
GLboolean
__indirect_glAreTexturesResident(GLsizei n, const GLuint * textures,
                                 GLboolean * residences)
{
   struct glx_context *const gc = __glXGetCurrentContext();
   Display *const dpy = gc->currentDpy;
   GLboolean retval = (GLboolean) 0;
   if (__builtin_expect((n >= 0) && (dpy != NULL), 1)) {
      xcb_connection_t *c = XGetXCBConnection(dpy);
      xcb_glx_are_textures_resident_reply_t *reply;
      (void) __glXFlushRenderBuffer(gc, gc->pc);
      reply =
         xcb_glx_are_textures_resident_reply(c,
                                             xcb_glx_are_textures_resident
                                             (c, gc->currentContextTag, n,
                                              textures), NULL);
      (void) memcpy(residences, xcb_glx_are_textures_resident_data(reply),
                    xcb_glx_are_textures_resident_data_length(reply) *
                    sizeof(GLboolean));
      retval = reply->ret_val;
      free(reply);
   }
   return retval;
}
Example #8
0
const GLubyte *
__indirect_glGetString(GLenum name)
{
   struct glx_context *gc = __glXGetCurrentContext();
   Display *dpy = gc->currentDpy;
   GLubyte *s = NULL;

   if (!dpy)
      return 0;

   /*
    ** Return the cached copy if the string has already been fetched
    */
   switch (name) {
   case GL_VENDOR:
      if (gc->vendor)
         return gc->vendor;
      break;
   case GL_RENDERER:
      if (gc->renderer)
         return gc->renderer;
      break;
   case GL_VERSION:
      if (gc->version)
         return gc->version;
      break;
   case GL_EXTENSIONS:
      if (gc->extensions)
         return gc->extensions;
      break;
   default:
      __glXSetError(gc, GL_INVALID_ENUM);
      return 0;
   }

   /*
    ** Get requested string from server
    */

   (void) __glXFlushRenderBuffer(gc, gc->pc);
   s = (GLubyte *) __glXGetString(dpy, gc->majorOpcode, gc->currentContextTag,
                                  name);
   if (!s) {
      /* Throw data on the floor */
      __glXSetError(gc, GL_OUT_OF_MEMORY);
   }
   else {
      /*
       ** Update local cache
       */
      switch (name) {
      case GL_VENDOR:
         gc->vendor = s;
         break;

      case GL_RENDERER:
         gc->renderer = s;
         break;

      case GL_VERSION:{
            int client_major;
            int client_minor;

            version_from_string((char *) s,
                                &gc->server_major, &gc->server_minor);
            __glXGetGLVersion(&client_major, &client_minor);

            if ((gc->server_major < client_major)
                || ((gc->server_major == client_major)
                    && (gc->server_minor <= client_minor))) {
               gc->version = s;
            }
            else {
               /* Allow 7 bytes for the client-side GL version.  This allows
                * for upto version 999.999.  I'm not holding my breath for
                * that one!  The extra 4 is for the ' ()\0' that will be
                * added.
                */
               const size_t size = 7 + strlen((char *) s) + 4;

               gc->version = Xmalloc(size);
               if (gc->version == NULL) {
                  /* If we couldn't allocate memory for the new string,
                   * make a best-effort and just copy the client-side version
                   * to the string and use that.  It probably doesn't
                   * matter what is done here.  If there not memory available
                   * for a short string, the system is probably going to die
                   * soon anyway.
                   */
                  snprintf((char *) s, strlen((char *) s) + 1, "%u.%u",
                           client_major, client_minor);
                  gc->version = s;
               }
               else {
                  snprintf((char *) gc->version, size, "%u.%u (%s)",
                           client_major, client_minor, s);
                  Xfree(s);
                  s = gc->version;
               }
            }
            break;
         }

      case GL_EXTENSIONS:{
            int major = 1;
            int minor = 0;

            /* This code is currently disabled.  I was reminded that some
             * vendors intentionally exclude some extensions from their
             * extension string that are part of the core version they
             * advertise.  In particular, on Nvidia drivers this means that
             * the functionality is supported by the driver, but is not
             * hardware accelerated.  For example, a TNT will show core
             * version 1.5, but most of the post-1.2 functionality is a
             * software fallback.
             *
             * I don't want to break applications that rely on this odd
             * behavior.  At the same time, the code is written and tested,
             * so I didn't want to throw it away.  Therefore, the code is here
             * but disabled.  In the future, we may wish to and an environment
             * variable to enable it.
             */

#if 0
            /* Call glGetString just to make sure that gc->server_major and
             * gc->server_minor are set.  This version may be higher than we
             * can completely support, but it may imply support for some
             * extensions that we can support.
             *
             * For example, at the time of this writing, the client-side
             * library only supports upto core GL version 1.2.  However, cubic
             * textures, multitexture, multisampling, and some other 1.3
             * features are supported.  If the server reports back version
             * 1.3, but does not report all of those extensions, we will
             * enable them.
             */
            (void *) glGetString(GL_VERSION);
            major = gc->server_major, minor = gc->server_minor;
#endif

            __glXCalculateUsableGLExtensions(gc, (char *) s, major, minor);
            XFree(s);
            s = gc->extensions;
            break;
         }
      }
   }
   return s;
}
void
emit_DrawElements_old( GLenum mode, GLsizei count, GLenum type,
		       const GLvoid *indices )
{
    __GLXcontext *gc = __glXGetCurrentContext();
    const __GLXattribute * state = 
       (const __GLXattribute *)(gc->client_state_private);
    struct array_state_vector * arrays = state->array_state;

    GLubyte * pc;
    size_t elements_per_request;
    unsigned total_requests = 0;
    unsigned i;
    unsigned req;


    pc = emit_DrawArrays_header_old( gc, arrays, & elements_per_request,
				     & total_requests, mode, count);

    
    /* Write the arrays.
     */

    req = 2;
    while ( count > 0 ) {
	if ( count < elements_per_request ) {
	    elements_per_request = count;
	}

	switch( type ) {
	case GL_UNSIGNED_INT: {
	    const GLuint   * ui_ptr = (const GLuint   *) indices;

	    for ( i = 0 ; i < elements_per_request ; i++ ) {
		const GLint index = (GLint) *(ui_ptr++);
		pc = emit_element_old( pc, arrays, index );
	    }
	    break;
	}
	case GL_UNSIGNED_SHORT: {
	    const GLushort * us_ptr = (const GLushort *) indices;

	    for ( i = 0 ; i < elements_per_request ; i++ ) {
		const GLint index = (GLint) *(us_ptr++);
		pc = emit_element_old( pc, arrays, index );
	    }
	    break;
	}
	case GL_UNSIGNED_BYTE: {
	    const GLubyte  * ub_ptr = (const GLubyte  *) indices;

	    for ( i = 0 ; i < elements_per_request ; i++ ) {
		const GLint index = (GLint) *(ub_ptr++);
		pc = emit_element_old( pc, arrays, index );
	    }
	    break;
	}
	}

	if ( total_requests != 0 ) {
	    __glXSendLargeChunk( gc, req, total_requests, gc->pc,
				 pc - gc->pc );
	    pc = gc->pc;
	    req++;
	}

	count -= elements_per_request;
    }


    assert( (total_requests == 0) || ((req - 1) == total_requests) );

    if ( total_requests == 0 ) {
	assert( pc <= gc->bufEnd );

	gc->pc = pc;
	if ( gc->pc > gc->limit ) {
	    (void) __glXFlushRenderBuffer(gc, gc->pc);
	}
    }
}
void
emit_DrawElements_none( GLenum mode, GLsizei count, GLenum type,
			const GLvoid *indices )
{
    __GLXcontext *gc = __glXGetCurrentContext();
    const __GLXattribute * state = 
       (const __GLXattribute *)(gc->client_state_private);
    struct array_state_vector * arrays = state->array_state;
    static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
    static const uint16_t end_cmd[2]   = { 4, X_GLrop_End };

    GLubyte * pc;
    size_t single_vertex_size;
    unsigned  i;


    single_vertex_size = calculate_single_vertex_size_none( arrays );


    if ( (gc->pc + single_vertex_size) >= gc->bufEnd ) {
	gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
    }

    pc = gc->pc;

    (void) memcpy( pc, begin_cmd, 4 );
    *(int *)(pc +  4) = mode;

    pc += 8;

    for ( i = 0 ; i < count ; i++ ) {
	unsigned  index = 0;

	if ( (pc + single_vertex_size) >= gc->bufEnd ) {
	    pc = __glXFlushRenderBuffer(gc, gc->pc);
	}

	switch( type ) {
	case GL_UNSIGNED_INT:
	    index = (unsigned) (((GLuint *) indices)[i]);
	    break;
	case GL_UNSIGNED_SHORT:
	    index = (unsigned) (((GLushort *) indices)[i]);
	    break;
	case GL_UNSIGNED_BYTE:
	    index = (unsigned) (((GLubyte *) indices)[i]);
	    break;
	}
	pc = emit_element_none( pc, arrays, index );
    }

    if ( (pc + 4) >= gc->bufEnd ) {
	pc = __glXFlushRenderBuffer(gc, gc->pc);
    }

    (void) memcpy( pc, end_cmd, 4 );
    pc += 4;

    gc->pc = pc;
    if ( gc->pc > gc->limit ) {
	(void) __glXFlushRenderBuffer(gc, gc->pc);
    }
}
/**
 * Emit the header data for the GL 1.1 / EXT_vertex_arrays DrawArrays
 * protocol.
 * 
 * \param gc                    GLX context.
 * \param arrays                Array state.
 * \param elements_per_request  Location to store the number of elements that
 *                              can fit in a single Render / RenderLarge
 *                              command.
 * \param total_request         Total number of requests for a RenderLarge
 *                              command.  If a Render command is used, this
 *                              will be zero.
 * \param mode                  Drawing mode.
 * \param count                 Number of vertices.
 * 
 * \returns
 * A pointer to the buffer for array data.
 */
static GLubyte *
emit_DrawArrays_header_old( __GLXcontext * gc,
			    struct array_state_vector * arrays,
			    size_t * elements_per_request,
			    size_t * total_requests,
			    GLenum mode, GLsizei count )
{
    size_t command_size;
    size_t single_vertex_size;
    const unsigned header_size = 16;
    unsigned  i;
    GLubyte * pc;


    /* Determine the size of the whole command.  This includes the header,
     * the ARRAY_INFO data and the array data.  Once this size is calculated,
     * it will be known whether a Render or RenderLarge command is needed.
     */

    single_vertex_size = 0;
    for ( i = 0 ; i < arrays->num_arrays ; i++ ) {
	if ( arrays->arrays[i].enabled ) {
	    single_vertex_size += __GLX_PAD( arrays->arrays[i].element_size );
	}
    }

    command_size = arrays->array_info_cache_size + header_size 
      + (single_vertex_size * count);


    /* Write the header for either a Render command or a RenderLarge
     * command.  After the header is written, write the ARRAY_INFO data.
     */

    if ( command_size > gc->maxSmallRenderCommandSize ) {
	/* maxSize is the maximum amount of data can be stuffed into a single
	 * packet.  sz_xGLXRenderReq is added because bufSize is the maximum
	 * packet size minus sz_xGLXRenderReq.
	 */
	const size_t maxSize = (gc->bufSize + sz_xGLXRenderReq)
	  - sz_xGLXRenderLargeReq;
	unsigned vertex_requests;


	/* Calculate the number of data packets that will be required to send
	 * the whole command.  To do this, the number of verticies that
	 * will fit in a single buffer must be calculated.
	 * 
	 * The important value here is elements_per_request.  This is the
	 * number of complete array elements that will fit in a single
	 * buffer.  There may be some wasted space at the end of the buffer,
	 * but splitting elements across buffer boundries would be painful.
	 */

	elements_per_request[0] = maxSize / single_vertex_size;

	vertex_requests = (count + elements_per_request[0] - 1)
	  / elements_per_request[0];
	  
	*total_requests = vertex_requests + 1;


	__glXFlushRenderBuffer(gc, gc->pc);

	command_size += 4;

	pc = ((GLubyte *) arrays->array_info_cache) - (header_size + 4);
	*(uint32_t *)(pc +  0) = command_size;
	*(uint32_t *)(pc +  4) = X_GLrop_DrawArrays;
	*(uint32_t *)(pc +  8) = count;
	*(uint32_t *)(pc + 12) = arrays->enabled_client_array_count;
	*(uint32_t *)(pc + 16) = mode;

	__glXSendLargeChunk( gc, 1, *total_requests, pc,
			     header_size + 4 + arrays->array_info_cache_size );

	pc = gc->pc;
    }
    else {
	if ( (gc->pc + command_size) >= gc->bufEnd ) {
	    (void) __glXFlushRenderBuffer(gc, gc->pc);
	}

	pc = gc->pc;
	*(uint16_t *)(pc +  0) = command_size;
	*(uint16_t *)(pc +  2) = X_GLrop_DrawArrays;
	*(uint32_t *)(pc +  4) = count;
	*(uint32_t *)(pc +  8) = arrays->enabled_client_array_count;
	*(uint32_t *)(pc + 12) = mode;

	pc += header_size;

	(void) memcpy( pc, arrays->array_info_cache,
		       arrays->array_info_cache_size );
	pc += arrays->array_info_cache_size;

	*elements_per_request = count;
	*total_requests = 0;
    }


    return pc;
}