Esempio n. 1
0
/* internally to wait for one object. Also used as a shortcut to wait
   on events and semaphores */
static int _DkObjectWaitOne (PAL_HANDLE handle, PAL_NUM timeout)
{
    /* only for all these handle which has a file descriptor, or
       a eventfd. events and semaphores will skip this part */
    if (HANDLE_HDR(handle)->flags & HAS_FDS) {
        struct pollfd fds[MAX_FDS];
        int off[MAX_FDS];
        int nfds = 0;
        for (int i = 0 ; i < MAX_FDS ; i++) {
            int events = 0;

            if ((HANDLE_HDR(handle)->flags & RFD(i)) &&
                !(HANDLE_HDR(handle)->flags & ERROR(i)))
                events |= POLLIN;

            if ((HANDLE_HDR(handle)->flags & WFD(i)) &&
                !(HANDLE_HDR(handle)->flags & WRITEABLE(i)) &&
                !(HANDLE_HDR(handle)->flags & ERROR(i)))
                events |= POLLOUT;

            if (events) {
                fds[nfds].fd = handle->generic.fds[i];
                fds[nfds].events = events|POLLHUP|POLLERR;
                fds[nfds].revents = 0;
                off[nfds] = i;
                nfds++;
            }
        }

        if (!nfds)
            return -PAL_ERROR_TRYAGAIN;

        int64_t waittime = timeout;
        int ret = ocall_poll(fds, nfds, timeout != NO_TIMEOUT ? &waittime : NULL);
        if (IS_ERR(ret))
            return unix_to_pal_error(ERRNO(ret));

        if (!ret)
            return -PAL_ERROR_TRYAGAIN;

        for (int i = 0 ; i < nfds ; i++) {
            if (!fds[i].revents)
                continue;
            if (fds[i].revents & POLLOUT)
                HANDLE_HDR(handle)->flags |= WRITEABLE(off[i]);
            if (fds[i].revents & (POLLHUP|POLLERR))
                HANDLE_HDR(handle)->flags |= ERROR(off[i]);
        }

        return 0;
    }

    const struct handle_ops * ops = HANDLE_OPS(handle);

    if (!ops || !ops->wait)
        return -PAL_ERROR_NOTSUPPORT;

    return ops->wait(handle, timeout);
}
Esempio n. 2
0
int _DkMutexLock (struct mutex_handle * mut)
{
    int i, c = 0;
    int ret;
    struct atomic_int * m = &mut->value;

    /* Spin and try to take lock */
    for (i = 0; i < MUTEX_SPINLOCK_TIMES; i++) {
        c = atomic_dec_and_test(m);
        if (c)
            goto success;
        cpu_relax();
    }

    /* The lock is now contended */

    while (!c) {
        int val = atomic_read(m);
        if (val == 1)
            goto again;

        ret = INLINE_SYSCALL(futex, 6, m, FUTEX_WAIT, val, NULL, NULL, 0);

        if (IS_ERR(ret) &&
            ERRNO(ret) != EWOULDBLOCK &&
            ERRNO(ret) != EINTR) {
            ret = unix_to_pal_error(ERRNO(ret));
            goto out;
        }

#ifdef DEBUG_MUTEX
        if (IS_ERR(ret))
            printf("mutex held by thread %d\n", mut->owner);
#endif

again:
        /* Upon wakeup, we still need to check whether mutex is unlocked or
         * someone else took it.
         * If c==0 upon return from xchg (i.e., the older value of m==0), we
         * will exit the loop. Else, we sleep again (through a futex call).
         */
        c = atomic_dec_and_test(m);
    }

success:
#ifdef DEBUG_MUTEX
    mut->owner = INLINE_SYSCALL(gettid, 0);
#endif
    ret = 0;
out:
    return ret;
}
Esempio n. 3
0
int _DkVirtualMemoryAlloc (void ** paddr, int size, int alloc_type,
                           int prot)
{
    void * addr = *paddr, * mem = addr;

    int flags = HOST_FLAGS(alloc_type, prot|PAL_PROT_WRITECOPY);
    prot = HOST_PROT(prot);

    /* The memory should have MAP_PRIVATE and MAP_ANONYMOUS */
    flags |= MAP_ANONYMOUS|(addr ? MAP_FIXED : 0);
    mem = (void *) ARCH_MMAP(addr, size, prot, flags, -1, 0);

    if (IS_ERR_P(mem))
        return unix_to_pal_error(ERRNO_P(mem));

    *paddr = mem;
    return 0;
}
Esempio n. 4
0
int _DkMutexLockTimeout (struct mutex_handle * m, PAL_NUM timeout)
{
    int i, ret = 0;
#ifdef DEBUG_MUTEX
    int tid = INLINE_SYSCALL(gettid, 0);
#endif
    /* If this is a trylock-style call, break more quickly. */
    int iterations = (timeout == 0) ? 1 : MUTEX_SPINLOCK_TIMES;

    /* Spin and try to take lock.  Ignore any contribution this makes toward
     * the timeout.*/
    for (i = 0; i < iterations; i++) {
        if (MUTEX_UNLOCKED == cmpxchg(&m->locked, MUTEX_UNLOCKED, MUTEX_LOCKED))
            goto success;
        CPU_RELAX();
    }

    if (timeout == 0) {
        ret = -PAL_ERROR_TRYAGAIN;
        goto out;
    }

    // Bump up the waiters count; we are probably going to block
    atomic_inc(&m->nwaiters);

    while (MUTEX_LOCKED == cmpxchg(&m->locked, MUTEX_UNLOCKED, MUTEX_LOCKED)) {
        struct timespec waittime, *waittimep = NULL;
        if (timeout != NO_TIMEOUT) {
            long sec = timeout / 1000000;
            long microsec = timeout - (sec * 1000000);
            waittime.tv_sec = sec;
            waittime.tv_nsec = microsec * 1000;
            waittimep = &waittime;
        }

        ret = INLINE_SYSCALL(futex, 6, m, FUTEX_WAIT, MUTEX_LOCKED, waittimep, NULL, 0);

        if (IS_ERR(ret)) {
            if (ERRNO(ret) == EWOULDBLOCK) {
                if (timeout != NO_TIMEOUT) {
                    ret = -PAL_ERROR_TRYAGAIN;
                    atomic_dec(&m->nwaiters);
                    goto out;
                }
            } else {
#ifdef DEBUG_MUTEX
                printf("futex failed (err = %d)\n", ERRNO(ret));
#endif
                ret = unix_to_pal_error(ERRNO(ret));
                atomic_dec(&m->nwaiters);
                goto out;
            }
        }
    }

    atomic_dec(&m->nwaiters);

success:
#ifdef DEBUG_MUTEX
    m->owner = tid;
#endif
    ret = 0;
out:

#ifdef DEBUG_MUTEX
    if (ret < 0)
        printf("mutex failed (%s, tid = %d)\n", PAL_STRERROR(ret), tid);
#endif
    return ret;
}
Esempio n. 5
0
int _DkVirtualMemoryProtect (void * addr, int size, int prot)
{
    int ret = INLINE_SYSCALL(mprotect, 3, addr, size, HOST_PROT(prot));

    return IS_ERR(ret) ? unix_to_pal_error(ERRNO(ret)) : 0;
}
Esempio n. 6
0
int _DkVirtualMemoryFree (void * addr, int size)
{
    int ret = INLINE_SYSCALL(munmap, 2, addr, size);

    return IS_ERR(ret) ? unix_to_pal_error(ERRNO(ret)) : 0;
}
Esempio n. 7
0
int _DkMutexLockTimeout (struct mutex_handle * mut, int timeout)
{
    int i, c = 0;

    if (timeout == -1)
        return -_DkMutexLock(mut);

    struct atomic_int * m = &mut->value;

    /* Spin and try to take lock */
    for (i = 0 ; i < MUTEX_SPINLOCK_TIMES ; i++)
    {
        c = atomic_dec_and_test(m);
        if (c)
            goto success;
        CPU_RELAX();
    }

    /* The lock is now contended */

    int ret;

    if (timeout == 0) {
        ret = c ? 0 : -PAL_ERROR_TRYAGAIN;
        goto out;
    }

    while (!c) {
        int val = atomic_read(m);
        if (val == 1)
            goto again;

        struct timespec waittime;
        long sec = timeout / 1000000;
        long microsec = timeout - (sec * 1000000);
        waittime.tv_sec = sec;
        waittime.tv_nsec = microsec * 1000;

        ret = INLINE_SYSCALL(_umtx_op, 5, m, UMTX_OP_WAIT_UINT, val,
                             NULL, &waittime);

        if (IS_ERR(ret) && ERRNO(ret) != EWOULDBLOCK) {
            ret = unix_to_pal_error(ERRNO(ret));
            goto out;
        }

#ifdef DEBUG_MUTEX
        if (IS_ERR(ret))
            printf("mutex held by thread %d\n", mut->owner);
#endif

again:
        /* Upon wakeup, we still need to check whether mutex is unlocked or
         * someone else took it.
         * If c==0 upon return from xchg (i.e., the older value of m==0), we
         * will exit the loop. Else, we sleep again (through a futex call).
         */
        c = atomic_dec_and_test(m);
    }

success:
#ifdef DEBUG_MUTEX
    mut->owner = INLINE_SYSCALL(gettid, 0);
#endif
    ret = 0;
out:
    return ret;
}
Esempio n. 8
0
/* internally to wait for one object. Also used as a shortcut to wait
   on events and semaphores */
static int _DkObjectWaitOne (PAL_HANDLE handle, int timeout)
{
    /* only for all these handle which has a file descriptor, or
       a eventfd. events and semaphores will skip this part */
    if (handle->__in.flags & HAS_FDS) {
        struct pollfd fds[MAX_FDS];
        int off[MAX_FDS];
        int nfds = 0;
        for (int i = 0 ; i < MAX_FDS ; i++) {
            int events = 0;

            if ((handle->__in.flags & RFD(i)) &&
                !(handle->__in.flags & ERROR(i)))
                events |= POLLIN;

            if ((handle->__in.flags & WFD(i)) &&
                !(handle->__in.flags & WRITEABLE(i)) &&
                !(handle->__in.flags & ERROR(i)))
                events |= POLLOUT;

            if (events) {
                fds[nfds].fd = handle->__in.fds[i];
                fds[nfds].events = events|POLLHUP|POLLERR;
                fds[nfds].revents = 0;
                off[nfds] = i;
                nfds++;
            }
        }

        if (!nfds)
            return -PAL_ERROR_TRYAGAIN;

        int ret = INLINE_SYSCALL(poll, 3, &fds, nfds,
                                 timeout ? timeout : -1);

        if (IS_ERR(ret))
            switch (ERRNO(ret)) {
                case EINTR:
                    return -PAL_ERROR_INTERRUPTED;
                default:
                    return unix_to_pal_error(ERRNO(ret));
            }

        if (!ret)
            return -PAL_ERROR_TRYAGAIN;

        for (int i = 0 ; i < nfds ; i++) {
            if (!fds[i].revents)
                continue;
            if (fds[i].revents & POLLOUT)
                handle->__in.flags |= WRITEABLE(off[i]);
            if (fds[i].revents & (POLLHUP|POLLERR))
                handle->__in.flags |= ERROR(off[i]);
        }

        return 0;
    }

    const struct handle_ops * ops = HANDLE_OPS(handle);

    if (!ops->wait)
        return -PAL_ERROR_NOTSUPPORT;

    return ops->wait(handle, timeout);
}
Esempio n. 9
0
/* _DkObjectsWaitAny for internal use. The function wait for any of the handle
   in the handle array. timeout can be set for the wait. */
int _DkObjectsWaitAny (int count, PAL_HANDLE * handleArray, int timeout,
                       PAL_HANDLE * polled)
{
    if (count <= 0)
        return 0;

    if (count == 1) {
        *polled = handleArray[0];
        return _DkObjectWaitOne(handleArray[0], timeout);
    }

    int i, j, ret, maxfds = 0, nfds = 0;

    /* we are not gonna to allow any polling on muliple synchronous
       objects, doing this is simply violating the division of
       labor between PAL and library OS */
    for (i = 0 ; i < count ; i++) {
        PAL_HANDLE hdl = handleArray[i];

        if (!hdl)
            continue;

        if (!(hdl->__in.flags & HAS_FDS))
            return -PAL_ERROR_NOTSUPPORT;

        /* eliminate repeated entries */
        for (j = 0 ; j < i ; j++)
            if (hdl == handleArray[j])
                break;
        if (j == i) {
            for (j = 0 ; j < MAX_FDS ; j++)
                if (hdl->__in.flags & (RFD(j)|WFD(j)))
                    maxfds++;
        }
    }

    struct pollfd * fds = __alloca(sizeof(struct pollfd) * maxfds);
    PAL_HANDLE * hdls = __alloca(sizeof(PAL_HANDLE) * maxfds);

    for (i = 0 ; i < count ; i++) {
        PAL_HANDLE hdl = handleArray[i];

        if (!hdl)
            continue;

        for (j = 0 ; j < i ; j++)
            if (hdl == handleArray[j])
                break;
        if (j < i)
            continue;

        for (j = 0 ; j < MAX_FDS ; j++) {
            int events = 0;

            if ((hdl->__in.flags & RFD(j)) &&
                !(hdl->__in.flags & ERROR(j)))
                events |= POLLIN;

            if ((hdl->__in.flags & WFD(j)) &&
                !(hdl->__in.flags & WRITEABLE(j)) &&
                !(hdl->__in.flags & ERROR(j)))
                events |= POLLOUT;

            if (events && hdl->__in.fds[j] != PAL_IDX_POISON) {
                fds[nfds].fd = hdl->__in.fds[j];
                fds[nfds].events = events|POLLHUP|POLLERR;
                fds[nfds].revents = 0;
                hdls[nfds] = hdl;
                nfds++;
            }
        }
    }

    if (!nfds)
        return -PAL_ERROR_TRYAGAIN;

    ret = INLINE_SYSCALL(poll, 3, fds, nfds, timeout ? timeout : -1);

    if (IS_ERR(ret))
        switch (ERRNO(ret)) {
            case EINTR:
                return -PAL_ERROR_INTERRUPTED;
            default:
                return unix_to_pal_error(ERRNO(ret));
        }

    if (!ret)
        return -PAL_ERROR_TRYAGAIN;

    PAL_HANDLE polled_hdl = NULL;

    for (i = 0 ; i < nfds ; i++) {
        if (!fds[i].revents)
            continue;

        PAL_HANDLE hdl = hdls[i];

        if (polled_hdl) {
            if (hdl != polled_hdl)
                continue;
        } else {
            polled_hdl = hdl;
        }

        for (j = 0 ; j < MAX_FDS ; j++)
            if ((hdl->__in.flags & (RFD(j)|WFD(j))) &&
                hdl->__in.fds[j] == fds[i].fd)
                break;

        if (j == MAX_FDS)
            continue;

        if (fds[i].revents & POLLOUT)
            hdl->__in.flags |= WRITEABLE(j);
        if (fds[i].revents & (POLLHUP|POLLERR))
            hdl->__in.flags |= ERROR(j);
    }

    *polled = polled_hdl;
    return polled_hdl ? 0 : -PAL_ERROR_TRYAGAIN;
}
Esempio n. 10
0
/* _DkObjectsWaitAny for internal use. The function wait for any of the handle
   in the handle array. timeout can be set for the wait. */
int _DkObjectsWaitAny (int count, PAL_HANDLE * handleArray, PAL_NUM timeout,
                       PAL_HANDLE * polled)
{
    if (count <= 0)
        return 0;

    if (count == 1) {
        // It is possible to have NULL pointers in the handle array.
        // In this case, assume nothing is polled.
        if (!handleArray[0])
            return -PAL_ERROR_TRYAGAIN;

        int rv = _DkObjectWaitOne(handleArray[0], timeout);
        if (rv == 0)
            *polled = handleArray[0];
        return rv;
    }

    int i, j, ret, maxfds = 0, nfds = 0;

    /* we are not gonna to allow any polling on muliple synchronous
       objects, doing this is simply violating the division of
       labor between PAL and library OS */
    for (i = 0 ; i < count ; i++) {
        PAL_HANDLE hdl = handleArray[i];

        if (!hdl)
            continue;

        if (!(HANDLE_HDR(hdl)->flags & HAS_FDS))
            return -PAL_ERROR_NOTSUPPORT;

        /* eliminate repeated entries */
        for (j = 0 ; j < i ; j++)
            if (hdl == handleArray[j])
                break;
        if (j == i) {
            for (j = 0 ; j < MAX_FDS ; j++)
                if (HANDLE_HDR(hdl)->flags & (RFD(j)|WFD(j)))
                    maxfds++;
        }
    }

    struct pollfd * fds = __alloca(sizeof(struct pollfd) * maxfds);
    PAL_HANDLE * hdls = __alloca(sizeof(PAL_HANDLE) * maxfds);

    for (i = 0 ; i < count ; i++) {
        PAL_HANDLE hdl = handleArray[i];

        if (!hdl)
            continue;

        for (j = 0 ; j < i ; j++)
            if (hdl == handleArray[j])
                break;
        if (j < i)
            continue;

        for (j = 0 ; j < MAX_FDS ; j++) {
            int events = 0;

            if ((HANDLE_HDR(hdl)->flags & RFD(j)) &&
                !(HANDLE_HDR(hdl)->flags & ERROR(j)))
                events |= POLLIN;

            if ((HANDLE_HDR(hdl)->flags & WFD(j)) &&
                !(HANDLE_HDR(hdl)->flags & WRITEABLE(j)) &&
                !(HANDLE_HDR(hdl)->flags & ERROR(j)))
                events |= POLLOUT;

            if (events && hdl->generic.fds[j] != PAL_IDX_POISON) {
                fds[nfds].fd = hdl->generic.fds[j];
                fds[nfds].events = events|POLLHUP|POLLERR;
                fds[nfds].revents = 0;
                hdls[nfds] = hdl;
                nfds++;
            }
        }
    }

    if (!nfds)
        return -PAL_ERROR_TRYAGAIN;

    int64_t waittime = timeout;
    ret = ocall_poll(fds, nfds, timeout != NO_TIMEOUT ? &waittime : NULL);
    if (IS_ERR(ret))
        return unix_to_pal_error(ERRNO(ret));

    if (!ret)
        return -PAL_ERROR_TRYAGAIN;

    PAL_HANDLE polled_hdl = NULL;

    for (i = 0 ; i < nfds ; i++) {
        if (!fds[i].revents)
            continue;

        PAL_HANDLE hdl = hdls[i];

        if (polled_hdl) {
            if (hdl != polled_hdl)
                continue;
        } else {
            polled_hdl = hdl;
        }

        for (j = 0 ; j < MAX_FDS ; j++)
            if ((HANDLE_HDR(hdl)->flags & (RFD(j)|WFD(j))) &&
                hdl->generic.fds[j] == (PAL_IDX)fds[i].fd)
                break;

        if (j == MAX_FDS)
            continue;

        if (fds[i].revents & POLLOUT)
            HANDLE_HDR(hdl)->flags |= WRITEABLE(j);
        if (fds[i].revents & (POLLHUP|POLLERR))
            HANDLE_HDR(hdl)->flags |= ERROR(j);
    }

    *polled = polled_hdl;
    return polled_hdl ? 0 : -PAL_ERROR_TRYAGAIN;
}