Пример #1
0
DECLEXPORT(EGLint) eglGetError(void)
{
    struct VBEGLTLS *pTls = getTls();

    if (pTls)
        return pTls->cErr;
    return EGL_NOT_INITIALIZED;
}
Пример #2
0
static EGLBoolean setEGLError(EGLint cErr)
{
    struct VBEGLTLS *pTls = getTls();

    if (pTls)
        pTls->cErr = cErr;
    return EGL_FALSE;
}
Пример #3
0
DECLEXPORT(EGLContext) eglGetCurrentContext(void)
{
    struct VBEGLTLS *pTls = getTls();

    if (!VALID_PTR(pTls))
        return EGL_NO_CONTEXT;
    clearEGLError();
    return pTls->hCurrent;
}
Пример #4
0
static EGLBoolean clearEGLError(void)
{
    struct VBEGLTLS *pTls = getTls();

    if (!VALID_PTR(pTls))
        return EGL_FALSE;
    pTls->cErr = EGL_SUCCESS;
    return EGL_TRUE;
}
Пример #5
0
DECLEXPORT(EGLDisplay) eglGetCurrentDisplay(void)
{
    struct VBEGLTLS *pTls = getTls();

    if (!VALID_PTR(pTls))
        return EGL_NO_DISPLAY;
    clearEGLError();
    return pTls->hCurrentDisplay;
}
Пример #6
0
DECLEXPORT(EGLBoolean) eglReleaseThread()
{
    struct VBEGLTLS *pTls = getTls();

    if (!(pTls))
        return EGL_TRUE;
    RTMemFree(pTls);
    RTTlsSet(g_tls, NULL);
    return EGL_TRUE;
}
Пример #7
0
DECLEXPORT(EGLBoolean) eglReleaseThread()
{
    struct VBEGLTLS *pTls = getTls();

    if (!(pTls))
        return EGL_TRUE;
    free(pTls);
    /* Can this fail with ENOMEM? */
    pthread_setspecific(g_tls, NULL);
    return EGL_TRUE;
}
Пример #8
0
void
GlobalRcu::
exit(size_t epoch)
{
    // Ensures that all reads are terminated before we decrement the epoch
    // counter. Unfortunately there's no equivalent of the release semantic for
    // reads so we need to use a full barrier instead. Sucky but it's life.
    atomic_thread_fence(memory_order_seq_cst);

    getTls()[epoch & 1].count--;
}
Пример #9
0
 RTCError* State::error() 
 {
   RTCError* stored_error = (RTCError*) getTls(thread_error);
   if (stored_error == nullptr) {
     Lock<MutexSys> lock(errors_mutex);
     stored_error = new RTCError(RTC_NO_ERROR);
     thread_errors.push_back(stored_error);
     setTls(thread_error,stored_error);
   }
   return stored_error;
 }
Пример #10
0
 RTCError* getThreadError() 
 {
   RTCError* stored_error = (RTCError*) getTls(g_error);
   if (stored_error == NULL) {
     Lock<MutexSys> lock(g_errors_mutex);
     stored_error = new RTCError(RTC_NO_ERROR);
     g_errors.push_back(stored_error);
     setTls(g_error,stored_error);
   }
   return stored_error;
 }
Пример #11
0
size_t
GlobalRcu::
enter()
{
    while (true) {
        size_t epoch = gRcu.epoch;
        getTls()[epoch & 1].count++;

        // Prevents reads from taking place before we increment the epoch
        // counter.
        atomic_thread_fence(memory_order_acquire);

        /* Fun scenario that we need to guard against:

           1) Read the gRcu.epoch E and gets pre-empted.
           2) gRcu.epoch is moved forward such that epoch E is available for gc.
           3) First pass of the gc thread finds the epoch vacated.
           4) Our thread wakes up and increments epoch E and exits.
           5) Another thread increments epoch E+1 and exits.
           6) Assuming no asserts, gc thread moves gRcu.epoch forward.

           This is an issue because our first thread essentially entered E+2
           even though a later thread entered epoch E+1. This means our first
           thread in E+2 will not be taken into account while gc-ing the epoch
           E+1 even though it entered before the second thread that is in E+1.
           In a nutshell, this breaks the RCU guarantee which is bad(tm).

           The fix is quite simple, make sure that gRcu.epoch hasn't been moved
           foward before exiting. While there are probably cleaner solutions,
           the ones I can think of requrie the introduction of a CAS which would
           limit the scalability of GlobalRcu. Also, gRcu.epoch shouldn't be
           moved forward too often (every 1ms is reasonable enough) so this
           branch should fail very rarely.
        */
        if ((epoch & 1) == (gRcu.epoch & 1)) return epoch;

        getTls()[epoch & 1].count--;
    }
}
Пример #12
0
DECLEXPORT(EGLSurface) eglGetCurrentSurface(EGLint cOp)
{
    struct VBEGLTLS *pTls = getTls();

    if (!VALID_PTR(pTls))
        return EGL_NO_SURFACE;
    clearEGLError();
    switch (cOp)
    {
        case EGL_DRAW:
            return pTls->hCurrentDraw;
        case EGL_READ:
            return pTls->hCurrentRead;
        default:
            setEGLError(EGL_BAD_PARAMETER);
            return EGL_NO_SURFACE;
    }
}
Пример #13
0
DECLEXPORT(EGLBoolean) eglMakeCurrent(EGLDisplay hDisplay, EGLSurface hDraw, EGLSurface hRead, EGLContext hContext)
{
    Display *pDisplay = (Display *)hDisplay;
    GLXDrawable hGLXDraw = hDraw == EGL_NO_SURFACE ? None : (GLXDrawable)hDraw & ~VBEGL_ANY_SURFACE;
    GLXDrawable hGLXRead = hRead == EGL_NO_SURFACE ? None : (GLXDrawable)hRead & ~VBEGL_ANY_SURFACE;
    GLXContext hGLXContext = hContext == EGL_NO_CONTEXT ? None : (GLXContext)hContext;
    struct VBEGLTLS *pTls = getTls();

    if (!VALID_PTR(hDisplay) || !VALID_PTR(pTls))
        return setEGLError(EGL_NOT_INITIALIZED);
    if (glXMakeContextCurrent(pDisplay, hGLXDraw, hGLXRead, hGLXContext))
    {
        pTls->hCurrent = hContext;
        pTls->hCurrentDraw = hDraw;
        pTls->hCurrentRead = hRead;
        return clearEGLError();
    }
    else
        return setEGLError(EGL_BAD_MATCH);
}
Пример #14
0
void
GlobalRcu::
defer(ListNode<DeferFn>* node)
{
    getTls()[gRcu.epoch & 1].deferList.push(node);
}