Ejemplo n.º 1
0
/**
 * We should call this periodically from a function such as glXMakeCurrent
 * in order to test if multiple threads are being used.
 */
void
_glapi_check_multithread(void)
{
#if defined(THREADS) && !defined(GLX_USE_TLS)
   static unsigned long knownID;
   static GLboolean firstCall = GL_TRUE;

   if (ThreadSafe)
      return;

   CHECK_MULTITHREAD_LOCK();
   if (firstCall) {
      /* initialize TSDs */
      (void) _glthread_GetTSD(&ContextTSD);
      (void) _glthread_GetTSD(&_gl_DispatchTSD);

      knownID = _glthread_GetID();
      firstCall = GL_FALSE;
   }
   else if (knownID != _glthread_GetID()) {
      ThreadSafe = GL_TRUE;
      _glapi_set_dispatch(NULL);
      _glapi_set_context(NULL);
   }
   CHECK_MULTITHREAD_UNLOCK();
#endif
}
Ejemplo n.º 2
0
/**
 * We should call this periodically from a function such as glXMakeCurrent
 * in order to test if multiple threads are being used.
 */
void
_glapi_check_multithread(void)
{
#if defined(THREADS) && !defined(GLX_USE_TLS)
   if (!ThreadSafe) {
      static unsigned long knownID;
      static GLboolean firstCall = GL_TRUE;
      if (firstCall) {
         knownID = _glthread_GetID();
         firstCall = GL_FALSE;
      }
      else if (knownID != _glthread_GetID()) {
         ThreadSafe = GL_TRUE;
         _glapi_set_dispatch(NULL);
         _glapi_set_context(NULL);
      }
   }
   else if (!_glapi_get_dispatch()) {
      /* make sure that this thread's dispatch pointer isn't null */
      _glapi_set_dispatch(NULL);
   }
#endif
}
Ejemplo n.º 3
0
/**
 * Called by Mesa when rendering to a texture is done.
 */
static void
intel_finish_render_texture(GLcontext * ctx,
                            struct gl_renderbuffer_attachment *att)
{
   struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);

   DBG("End render texture (tid %x) tex %u\n", _glthread_GetID(), att->Texture->Name);

   if (irb) {
      /* just release the region */
      intel_region_release(&irb->region);
   }
   else if (att->Renderbuffer) {
      /* software fallback */
      _mesa_finish_render_texture(ctx, att);
      /* XXX FBO: Need to unmap the buffer (or in intelSpanRenderStart???) */
   }
}
Ejemplo n.º 4
0
/**
 * Called by glFramebufferTexture[123]DEXT() (and other places) to
 * prepare for rendering into texture memory.  This might be called
 * many times to choose different texture levels, cube faces, etc
 * before intel_finish_render_texture() is ever called.
 */
static void
intel_render_texture(GLcontext * ctx,
                     struct gl_framebuffer *fb,
                     struct gl_renderbuffer_attachment *att)
{
   struct gl_texture_image *newImage
      = att->Texture->Image[att->CubeMapFace][att->TextureLevel];
   struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
   struct intel_texture_image *intel_image;
   GLuint dst_x, dst_y;

   (void) fb;

   ASSERT(newImage);

   intel_image = intel_texture_image(newImage);
   if (!intel_image->mt) {
      /* Fallback on drawing to a texture that doesn't have a miptree
       * (has a border, width/height 0, etc.)
       */
      _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
      _mesa_render_texture(ctx, fb, att);
      return;
   }
   else if (!irb) {
      irb = intel_wrap_texture(ctx, newImage);
      if (irb) {
         /* bind the wrapper to the attachment point */
         _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
      }
      else {
         /* fallback to software rendering */
         _mesa_render_texture(ctx, fb, att);
         return;
      }
   }

   if (!intel_update_wrapper(ctx, irb, newImage)) {
       _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
       _mesa_render_texture(ctx, fb, att);
       return;
   }

   DBG("Begin render texture tid %x tex=%u w=%d h=%d refcount=%d\n",
       _glthread_GetID(),
       att->Texture->Name, newImage->Width, newImage->Height,
       irb->Base.RefCount);

   /* point the renderbufer's region to the texture image region */
   if (irb->region != intel_image->mt->region) {
      if (irb->region)
	 intel_region_release(&irb->region);
      intel_region_reference(&irb->region, intel_image->mt->region);
   }

   /* compute offset of the particular 2D image within the texture region */
   intel_miptree_get_image_offset(intel_image->mt,
				  att->TextureLevel,
				  att->CubeMapFace,
				  att->Zoffset,
				  &dst_x, &dst_y);

   intel_image->mt->region->draw_offset = (dst_y * intel_image->mt->pitch +
					   dst_x) * intel_image->mt->cpp;
   intel_image->mt->region->draw_x = dst_x;
   intel_image->mt->region->draw_y = dst_y;
   intel_image->used_as_render_target = GL_TRUE;

   /* update drawing region, etc */
   intel_draw_buffer(ctx, fb);
}
Ejemplo n.º 5
0
/**
 * Make a particular context current.
 *
 * \note This is in this file so that it can access dummyContext.
 */
static Bool
MakeContextCurrent(Display * dpy, GLXDrawable draw,
                   GLXDrawable read, GLXContext gc)
{
   const GLXContext oldGC = __glXGetCurrentContext();
#ifdef GLX_USE_APPLEGL
   bool error = apple_glx_make_current_context(dpy, 
                   (oldGC && oldGC != &dummyContext) ? oldGC->apple : NULL, 
                   gc ? gc->apple : NULL, draw);
   
   apple_glx_diagnostic("%s: error %s\n", __func__, error ? "YES" : "NO");
   if(error)
      return GL_FALSE;
#else
   xGLXMakeCurrentReply reply;
   const CARD8 opcode = __glXSetupForCommand(dpy);
   const CARD8 oldOpcode = ((gc == oldGC) || (oldGC == &dummyContext))
      ? opcode : __glXSetupForCommand(oldGC->currentDpy);
   Bool bindReturnValue;
   __GLXattribute *state;

   if (!opcode || !oldOpcode) {
      return GL_FALSE;
   }

   /* Make sure that the new context has a nonzero ID.  In the request,
    * a zero context ID is used only to mean that we bind to no current
    * context.
    */
   if ((gc != NULL) && (gc->xid == None)) {
      return GL_FALSE;
   }

   if (gc == NULL && (draw != None || read != None)) {
      __glXGenerateError(dpy, gc, (draw != None) ? draw : read,
                         BadMatch, X_GLXMakeContextCurrent);
      return False;
   }
   if (gc != NULL && (draw == None || read == None)) {
      __glXGenerateError(dpy, gc, None, BadMatch, X_GLXMakeContextCurrent);
      return False;
   }

   _glapi_check_multithread();

   if (gc != NULL && gc->thread_id != 0 && gc->thread_id != _glthread_GetID()) {
      __glXGenerateError(dpy, gc, gc->xid,
                         BadAccess, X_GLXMakeContextCurrent);
      return False;
   }

#ifdef GLX_DIRECT_RENDERING
   /* Bind the direct rendering context to the drawable */
   if (gc && gc->driContext) {
      __GLXDRIdrawable *pdraw = FetchDRIDrawable(dpy, draw, gc);
      __GLXDRIdrawable *pread = FetchDRIDrawable(dpy, read, gc);

      if ((pdraw == NULL) || (pread == NULL)) {
         __glXGenerateError(dpy, gc, (pdraw == NULL) ? draw : read,
                            GLXBadDrawable, X_GLXMakeContextCurrent);
         return False;
      }

      bindReturnValue =
         (gc->driContext->bindContext) (gc->driContext, pdraw, pread);
   }
   else if (!gc && oldGC && oldGC->driContext) {
      bindReturnValue = True;
   }
   else
#endif
   {
      /* Send a glXMakeCurrent request to bind the new context. */
      bindReturnValue =
         SendMakeCurrentRequest(dpy, opcode, gc ? gc->xid : None,
                                ((dpy != oldGC->currentDpy)
                                 || oldGC->isDirect)
                                ? None : oldGC->currentContextTag, draw, read,
                                &reply);
   }


   if (!bindReturnValue) {
      return False;
   }

#ifdef GLX_DIRECT_RENDERING
   if ((dpy != oldGC->currentDpy || (gc && gc->driContext)) &&
       !oldGC->isDirect && oldGC != &dummyContext) {
#else
   if ((dpy != oldGC->currentDpy) && oldGC != &dummyContext) {
#endif
      xGLXMakeCurrentReply dummy_reply;

      /* We are either switching from one dpy to another and have to
       * send a request to the previous dpy to unbind the previous
       * context, or we are switching away from a indirect context to
       * a direct context and have to send a request to the dpy to
       * unbind the previous context.
       */
      (void) SendMakeCurrentRequest(oldGC->currentDpy, oldOpcode, None,
                                    oldGC->currentContextTag, None, None,
                                    &dummy_reply);
   }
#ifdef GLX_DIRECT_RENDERING
   else if (oldGC->driContext && oldGC != gc) {
      oldGC->driContext->unbindContext(oldGC->driContext);
   }
#endif

#endif /* GLX_USE_APPLEGL */

   /* Update our notion of what is current */
   __glXLock();
   if (gc == oldGC) {
      /* Even though the contexts are the same the drawable might have
       * changed.  Note that gc cannot be the dummy, and that oldGC
       * cannot be NULL, therefore if they are the same, gc is not
       * NULL and not the dummy.
       */
      if(gc) {
        gc->currentDrawable = draw;
        gc->currentReadable = read;
      }
   }
   else {
      if (oldGC != &dummyContext) {
         /* Old current context is no longer current to anybody */
         oldGC->currentDpy = 0;
         oldGC->currentDrawable = None;
         oldGC->currentReadable = None;
         oldGC->currentContextTag = 0;
         oldGC->thread_id = 0;
#ifdef GLX_USE_APPLEGL
         
         /*
          * At this point we should check if the context has been
          * through glXDestroyContext, and redestroy it if so.
          */
         if(oldGC->do_destroy) {
            __glXUnlock();
            /* glXDestroyContext uses the same global lock. */
            glXDestroyContext(dpy, oldGC);
            __glXLock();
#else
         if (oldGC->xid == None) {
            /* We are switching away from a context that was
             * previously destroyed, so we need to free the memory
             * for the old handle.
             */
#ifdef GLX_DIRECT_RENDERING
            /* Destroy the old direct rendering context */
            if (oldGC->driContext) {
               oldGC->driContext->destroyContext(oldGC->driContext,
                                                 oldGC->psc,
                                                 oldGC->createDpy);
               oldGC->driContext = NULL;
            }
#endif
            __glXFreeContext(oldGC);
#endif /* GLX_USE_APPLEGL */
         }
      }
      if (gc) {
         __glXSetCurrentContext(gc);

         gc->currentDpy = dpy;
         gc->currentDrawable = draw;
         gc->currentReadable = read;
#ifndef GLX_USE_APPLEGL
         gc->thread_id = _glthread_GetID();

#ifdef GLX_DIRECT_RENDERING
         if (!gc->driContext) {
#endif
            if (!IndirectAPI)
               IndirectAPI = __glXNewIndirectAPI();
            _glapi_set_dispatch(IndirectAPI);

#ifdef GLX_USE_APPLEGL
            do {
               extern void XAppleDRIUseIndirectDispatch(void);
               XAppleDRIUseIndirectDispatch();
            } while (0);
#endif

            state = (__GLXattribute *) (gc->client_state_private);

            gc->currentContextTag = reply.contextTag;
            if (state->array_state == NULL) {
               (void) glGetString(GL_EXTENSIONS);
               (void) glGetString(GL_VERSION);
               __glXInitVertexArrayState(gc);
            }
#ifdef GLX_DIRECT_RENDERING
         }
         else {
            gc->currentContextTag = -1;
         }
#endif
#endif /* GLX_USE_APPLEGL */
      }
      else {
         __glXSetCurrentContextNull();
      }
   }
   __glXUnlock();
   return GL_TRUE;
}


PUBLIC Bool
glXMakeCurrent(Display * dpy, GLXDrawable draw, GLXContext gc)
{
   return MakeContextCurrent(dpy, draw, draw, gc);
}

PUBLIC
GLX_ALIAS(Bool, glXMakeCurrentReadSGI,
          (Display * dpy, GLXDrawable d, GLXDrawable r, GLXContext ctx),
          (dpy, d, r, ctx), MakeContextCurrent)

PUBLIC
GLX_ALIAS(Bool, glXMakeContextCurrent,
          (Display * dpy, GLXDrawable d, GLXDrawable r,
           GLXContext ctx), (dpy, d, r, ctx), MakeContextCurrent)
Ejemplo n.º 6
0
static void
radeon_render_texture(struct gl_context * ctx,
                     struct gl_framebuffer *fb,
                     struct gl_renderbuffer_attachment *att)
{
   struct gl_texture_image *newImage
      = att->Texture->Image[att->CubeMapFace][att->TextureLevel];
   struct radeon_renderbuffer *rrb = radeon_renderbuffer(att->Renderbuffer);
   radeon_texture_image *radeon_image;
   GLuint imageOffset;

  radeon_print(RADEON_TEXTURE, RADEON_TRACE,
		"%s(%p, fb %p, rrb %p, att %p)\n",
		__func__, ctx, fb, rrb, att);

   (void) fb;

   ASSERT(newImage);

   radeon_image = (radeon_texture_image *)newImage;

   if (!radeon_image->mt || newImage->Border != 0) {
      /* Fallback on drawing to a texture without a miptree.
       */
      _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
      _mesa_render_texture(ctx, fb, att);
      return;
   }
   else if (!rrb) {
      rrb = radeon_wrap_texture(ctx, newImage);
      if (rrb) {
         /* bind the wrapper to the attachment point */
         _mesa_reference_renderbuffer(&att->Renderbuffer, &rrb->base);
      }
      else {
         /* fallback to software rendering */
         _mesa_render_texture(ctx, fb, att);
         return;
      }
   }

   if (!radeon_update_wrapper(ctx, rrb, newImage)) {
       _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
       _mesa_render_texture(ctx, fb, att);
       return;
   }

   DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n",
       _glthread_GetID(),
       att->Texture->Name, newImage->Width, newImage->Height,
       rrb->base.RefCount);

   /* point the renderbufer's region to the texture image region */
   if (rrb->bo != radeon_image->mt->bo) {
      if (rrb->bo)
  	radeon_bo_unref(rrb->bo);
      rrb->bo = radeon_image->mt->bo;
      radeon_bo_ref(rrb->bo);
   }

   /* compute offset of the particular 2D image within the texture region */
   imageOffset = radeon_miptree_image_offset(radeon_image->mt,
                                            att->CubeMapFace,
                                            att->TextureLevel);

   if (att->Texture->Target == GL_TEXTURE_3D) {
      imageOffset += radeon_image->mt->levels[att->TextureLevel].rowstride *
                     radeon_image->mt->levels[att->TextureLevel].height *
                     att->Zoffset;
   }

   /* store that offset in the region, along with the correct pitch for
    * the image we are rendering to */
   rrb->draw_offset = imageOffset;
   rrb->pitch = radeon_image->mt->levels[att->TextureLevel].rowstride;

   /* update drawing region, etc */
   radeon_draw_buffer(ctx, fb);
}
Ejemplo n.º 7
0
/**
 * Make a particular context current.
 *
 * \note This is in this file so that it can access dummyContext.
 */
static Bool
MakeContextCurrent(Display * dpy, GLXDrawable draw,
                   GLXDrawable read, GLXContext gc_user)
{
   struct glx_context *gc = (struct glx_context *) gc_user;
   struct glx_context *oldGC = __glXGetCurrentContext();
   int ret = Success;

   /* Make sure that the new context has a nonzero ID.  In the request,
    * a zero context ID is used only to mean that we bind to no current
    * context.
    */
   if ((gc != NULL) && (gc->xid == None)) {
      return GL_FALSE;
   }

   if (gc == NULL && (draw != None || read != None)) {
      __glXGenerateError(dpy, gc, (draw != None) ? draw : read,
                         BadMatch, X_GLXMakeContextCurrent);
      return False;
   }
   if (gc != NULL && (draw == None || read == None)) {
      __glXGenerateError(dpy, gc, None, BadMatch, X_GLXMakeContextCurrent);
      return False;
   }

   _glapi_check_multithread();

   if (gc != NULL && gc->thread_id != 0 && gc->thread_id != _glthread_GetID()) {
      __glXGenerateError(dpy, gc, gc->xid,
                         BadAccess, X_GLXMakeContextCurrent);
      return False;
   }

   if (oldGC == gc &&
       gc->currentDrawable == draw && gc->currentReadable == read)
      return True;

   if (oldGC != &dummyContext) {
      oldGC->vtable->unbind(oldGC, gc);
      oldGC->currentDpy = 0;
      oldGC->currentDrawable = None;
      oldGC->currentReadable = None;
      oldGC->thread_id = 0;
   }

   if (gc) {
      gc->currentDpy = dpy;
      gc->currentDrawable = draw;
      gc->currentReadable = read;
      gc->thread_id = _glthread_GetID();
      __glXSetCurrentContext(gc);
      ret = gc->vtable->bind(gc, oldGC, draw, read);
   } else {
      __glXSetCurrentContextNull();
   }

   if (oldGC != &dummyContext && oldGC->xid == None && oldGC != gc) {
      /* We are switching away from a context that was
       * previously destroyed, so we need to free the memory
       * for the old handle. */
      oldGC->vtable->destroy(oldGC);
   }

   if (ret) {
      __glXGenerateError(dpy, gc, None, ret, X_GLXMakeContextCurrent);
      return GL_FALSE;
   }

   return GL_TRUE;
}
Ejemplo n.º 8
0
/**
 * Make a particular context current.
 *
 * \note This is in this file so that it can access dummyContext.
 */
static Bool
MakeContextCurrent(Display * dpy, GLXDrawable draw,
                   GLXDrawable read, GLXContext gc_user)
{
   struct glx_context *gc = (struct glx_context *) gc_user;
   struct glx_context *oldGC = __glXGetCurrentContext();

   /* XXX: If this is left out, then libGL ends up not having this
    * symbol, and drivers using it fail to load.  Compare the
    * implementation of this symbol to _glapi_noop_enable_warnings(),
    * though, which gets into the library despite no callers, the same
    * prototypes, and the same compile flags to the files containing
    * them.  Moving the definition to glapi_nop.c gets it into the
    * library, though.
    */
   (void)_glthread_GetID();

   /* Make sure that the new context has a nonzero ID.  In the request,
    * a zero context ID is used only to mean that we bind to no current
    * context.
    */
   if ((gc != NULL) && (gc->xid == None)) {
      return GL_FALSE;
   }

   if (gc == NULL && (draw != None || read != None)) {
      __glXGenerateError(dpy, (draw != None) ? draw : read,
                         BadMatch, X_GLXMakeContextCurrent);
      return False;
   }
   if (gc != NULL && (draw == None || read == None)) {
      __glXGenerateError(dpy, None, BadMatch, X_GLXMakeContextCurrent);
      return False;
   }

   _glapi_check_multithread();

   __glXLock();
   if (oldGC == gc &&
       gc->currentDrawable == draw && gc->currentReadable == read) {
      __glXUnlock();
      return True;
   }

   if (oldGC != &dummyContext) {
      if (--oldGC->thread_refcount == 0) {
	 oldGC->vtable->unbind(oldGC, gc);
	 oldGC->currentDpy = 0;
      }
   }

   if (gc) {
      /* Attempt to bind the context.  We do this before mucking with
       * gc and __glXSetCurrentContext to properly handle our state in
       * case of an error.
       *
       * If an error occurs, set the Null context since we've already
       * blown away our old context.  The caller is responsible for
       * figuring out how to handle setting a valid context.
       */
      if (gc->vtable->bind(gc, oldGC, draw, read) != Success) {
         __glXSetCurrentContextNull();
         __glXUnlock();
         __glXGenerateError(dpy, None, GLXBadContext, X_GLXMakeContextCurrent);
         return GL_FALSE;
      }

      if (gc->thread_refcount == 0) {
         gc->currentDpy = dpy;
         gc->currentDrawable = draw;
         gc->currentReadable = read;
      }
      gc->thread_refcount++;
      __glXSetCurrentContext(gc);
   } else {
      __glXSetCurrentContextNull();
   }

   if (oldGC->thread_refcount == 0 && oldGC != &dummyContext && oldGC->xid == None) {
      /* We are switching away from a context that was
       * previously destroyed, so we need to free the memory
       * for the old handle. */
      oldGC->vtable->destroy(oldGC);
   }

   __glXUnlock();

   return GL_TRUE;
}
Ejemplo n.º 9
0
/**
 * Called by glFramebufferTexture[123]DEXT() (and other places) to
 * prepare for rendering into texture memory.  This might be called
 * many times to choose different texture levels, cube faces, etc
 * before intel_finish_render_texture() is ever called.
 */
static void
intel_render_texture(GLcontext * ctx,
                     struct gl_framebuffer *fb,
                     struct gl_renderbuffer_attachment *att)
{
   struct gl_texture_image *newImage
      = att->Texture->Image[att->CubeMapFace][att->TextureLevel];
   struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
   struct intel_texture_image *intel_image;
   GLuint imageOffset;

   (void) fb;

   ASSERT(newImage);

   if (!irb) {
      irb = intel_wrap_texture(ctx, newImage);
      if (irb) {
         /* bind the wrapper to the attachment point */
         _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
      }
      else {
         /* fallback to software rendering */
         _mesa_render_texture(ctx, fb, att);
         return;
      }
   } if (!intel_update_wrapper(ctx, irb, newImage)) {
       _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
       _mesa_render_texture(ctx, fb, att);
       return;
   }

   DBG("Begin render texture tid %x tex=%u w=%d h=%d refcount=%d\n",
       _glthread_GetID(),
       att->Texture->Name, newImage->Width, newImage->Height,
       irb->Base.RefCount);

   /* point the renderbufer's region to the texture image region */
   intel_image = intel_texture_image(newImage);
   if (irb->region != intel_image->mt->region) {
      if (irb->region)
	 intel_region_release(&irb->region);
      intel_region_reference(&irb->region, intel_image->mt->region);
   }

   /* compute offset of the particular 2D image within the texture region */
   imageOffset = intel_miptree_image_offset(intel_image->mt,
                                            att->CubeMapFace,
                                            att->TextureLevel);

   if (att->Texture->Target == GL_TEXTURE_3D) {
      const GLuint *offsets = intel_miptree_depth_offsets(intel_image->mt,
                                                          att->TextureLevel);
      imageOffset += offsets[att->Zoffset];
   }

   /* store that offset in the region */
   intel_image->mt->region->draw_offset = imageOffset;

   /* update drawing region, etc */
   intel_draw_buffer(ctx, fb);
}