Пример #1
1
bool ponyint_messageq_push(messageq_t* q, pony_msg_t* first, pony_msg_t* last)
{
  atomic_store_explicit(&last->next, NULL, memory_order_relaxed);

  // Without that fence, the store to last->next above could be reordered after
  // the exchange on the head and after the store to prev->next done by the
  // next push, which would result in the pop incorrectly seeing the queue as
  // empty.
  // Also synchronise with the pop on prev->next.
  atomic_thread_fence(memory_order_release);

  pony_msg_t* prev = atomic_exchange_explicit(&q->head, last,
    memory_order_relaxed);

  bool was_empty = ((uintptr_t)prev & 1) != 0;
  prev = (pony_msg_t*)((uintptr_t)prev & ~(uintptr_t)1);

#ifdef USE_VALGRIND
  // Double fence with Valgrind since we need to have prev in scope for the
  // synchronisation annotation.
  ANNOTATE_HAPPENS_BEFORE(&prev->next);
  atomic_thread_fence(memory_order_release);
#endif
  atomic_store_explicit(&prev->next, first, memory_order_relaxed);

  return was_empty;
}
Пример #2
0
void * ccsynch_delegate_or_lock(void* lock,
                                unsigned int messageSize) {
    CCSynchLock *l = (CCSynchLock*)lock;
    CCSynchLockNode *nextNode;
    CCSynchLockNode *curNode;
    ccsynchlock_initLocalIfNeeded();
    nextNode = ccsynchNextLocalNode;
    atomic_store_explicit(&nextNode->next, (uintptr_t)NULL, memory_order_relaxed);
    atomic_store_explicit(&nextNode->wait, 1, memory_order_relaxed);
    nextNode->completed = false;
    curNode = (CCSynchLockNode *)atomic_exchange_explicit(&l->tailPtr.value, (uintptr_t)nextNode, memory_order_release);
    curNode->messageSize = messageSize;

    curNode->requestFunction = NULL; //Forces helper to stop if it sees this

    atomic_store_explicit(&curNode->next, (uintptr_t)nextNode, memory_order_release);

    ccsynchNextLocalNode = curNode;
    if (atomic_load_explicit(&curNode->wait, memory_order_acquire) == 1){
        //Somone else has the lock delegate
        return curNode->tempBuffer;
    }else{
        //Yey we got the lock
        return NULL;
    }

}
Пример #3
0
/*
 * Release a normal mutex.  The caller is responsible for determining
 * that we are in fact the owner of this lock.
 */
static inline __always_inline void __pthread_normal_mutex_unlock(pthread_mutex_internal_t* mutex,
                                                                 uint16_t shared) {
    const uint16_t unlocked         = shared | MUTEX_STATE_BITS_UNLOCKED;
    const uint16_t locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;

    // We use an atomic_exchange to release the lock. If locked_contended state
    // is returned, some threads is waiting for the lock and we need to wake up
    // one of them.
    // A release fence is required to make previous stores visible to next
    // lock owner threads.
    if (atomic_exchange_explicit(&mutex->state, unlocked,
                                 memory_order_release) == locked_contended) {
        // Wake up one waiting thread. We don't know which thread will be
        // woken or when it'll start executing -- futexes make no guarantees
        // here. There may not even be a thread waiting.
        //
        // The newly-woken thread will replace the unlocked state we just set above
        // with locked_contended state, which means that when it eventually releases
        // the mutex it will also call FUTEX_WAKE. This results in one extra wake
        // call whenever a lock is contended, but let us avoid forgetting anyone
        // without requiring us to track the number of sleepers.
        //
        // It's possible for another thread to sneak in and grab the lock between
        // the exchange above and the wake call below. If the new thread is "slow"
        // and holds the lock for a while, we'll wake up a sleeper, which will swap
        // in locked_uncontended state and then go back to sleep since the lock is
        // still held. If the new thread is "fast", running to completion before
        // we call wake, the thread we eventually wake will find an unlocked mutex
        // and will execute. Either way we have correct behavior and nobody is
        // orphaned on the wait queue.
        __futex_wake_ex(&mutex->state, shared, 1);
    }
}
Пример #4
0
/*
 * Lock a mutex of type NORMAL.
 *
 * As noted above, there are three states:
 *   0 (unlocked, no contention)
 *   1 (locked, no contention)
 *   2 (locked, contention)
 *
 * Non-recursive mutexes don't use the thread-id or counter fields, and the
 * "type" value is zero, so the only bits that will be set are the ones in
 * the lock state field.
 */
static inline __always_inline int __pthread_normal_mutex_lock(pthread_mutex_internal_t* mutex,
                                                              uint16_t shared,
                                                              bool use_realtime_clock,
                                                              const timespec* abs_timeout_or_null) {
    if (__predict_true(__pthread_normal_mutex_trylock(mutex, shared) == 0)) {
        return 0;
    }
    int result = check_timespec(abs_timeout_or_null, true);
    if (result != 0) {
        return result;
    }

    ScopedTrace trace("Contending for pthread mutex");

    const uint16_t unlocked           = shared | MUTEX_STATE_BITS_UNLOCKED;
    const uint16_t locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;

    // We want to go to sleep until the mutex is available, which requires
    // promoting it to locked_contended. We need to swap in the new state
    // and then wait until somebody wakes us up.
    // An atomic_exchange is used to compete with other threads for the lock.
    // If it returns unlocked, we have acquired the lock, otherwise another
    // thread still holds the lock and we should wait again.
    // If lock is acquired, an acquire fence is needed to make all memory accesses
    // made by other threads visible to the current CPU.
    while (atomic_exchange_explicit(&mutex->state, locked_contended,
                                    memory_order_acquire) != unlocked) {
        if (__futex_wait_ex(&mutex->state, shared, locked_contended, use_realtime_clock,
                            abs_timeout_or_null) == -ETIMEDOUT) {
            return ETIMEDOUT;
        }
    }
    return 0;
}
Пример #5
0
/* Test for consistency on sizes 1, 2, 4, 8, 16 and 32.  */
int
main ()
{
  test_struct c;

  atomic_store_explicit (&a, zero, memory_order_relaxed);
  if (memcmp (&a, &zero, size))
    abort ();

  c = atomic_exchange_explicit (&a, ones, memory_order_seq_cst);
  if (memcmp (&c, &zero, size))
    abort ();
  if (memcmp (&a, &ones, size))
    abort ();

  b = atomic_load_explicit (&a, memory_order_relaxed);
  if (memcmp (&b, &ones, size))
    abort ();

  if (!atomic_compare_exchange_strong_explicit (&a, &b, zero, memory_order_seq_cst, memory_order_acquire))
    abort ();
  if (memcmp (&a, &zero, size))
    abort ();

  if (atomic_compare_exchange_weak_explicit (&a, &b, ones, memory_order_seq_cst, memory_order_acquire))
    abort ();
  if (memcmp (&b, &zero, size))
    abort ();

  return 0;
}
Пример #6
0
void LSQBaseDealloc(LSQBaseTypeRef self)
{
    if (self->data.dealloc != NULL)
    {
        self->data.dealloc(self->data.userdata);
    }
    atomic_exchange_explicit(&self->data.userdata, NULL, memory_order_release);
    LSQAllocatorDealloc(self);
}
Пример #7
0
void ponyint_mpmcq_push(mpmcq_t* q, void* data)
{
  mpmcq_node_t* node = POOL_ALLOC(mpmcq_node_t);
  atomic_store_explicit(&node->data, data, memory_order_relaxed);
  atomic_store_explicit(&node->next, NULL, memory_order_relaxed);

  mpmcq_node_t* prev = atomic_exchange_explicit(&q->head, node,
    memory_order_relaxed);
  atomic_store_explicit(&prev->next, node, memory_order_release);
}
Пример #8
0
int pthread_mutex_unlock(pthread_mutex_t* mutex_interface) {
#if !defined(__LP64__)
    // Some apps depend on being able to pass NULL as a mutex and get EINVAL
    // back. Don't need to worry about it for LP64 since the ABI is brand new,
    // but keep compatibility for LP32. http://b/19995172.
    if (mutex_interface == NULL) {
        return EINVAL;
    }
#endif

    pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);

    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
    uint16_t mtype  = (old_state & MUTEX_TYPE_MASK);
    uint16_t shared = (old_state & MUTEX_SHARED_MASK);

    // Handle common case first.
    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
        __pthread_normal_mutex_unlock(mutex, shared);
        return 0;
    }

    // Do we already own this recursive or error-check mutex?
    pid_t tid = __get_thread()->tid;
    if ( tid != atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed) ) {
        return EPERM;
    }

    // If the counter is > 0, we can simply decrement it atomically.
    // Since other threads can mutate the lower state bits (and only the
    // lower state bits), use a compare_exchange loop to do it.
    if (!MUTEX_COUNTER_BITS_IS_ZERO(old_state)) {
        // We still own the mutex, so a release fence is not needed.
        atomic_fetch_sub_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
        return 0;
    }

    // The counter is 0, so we'are going to unlock the mutex by resetting its
    // state to unlocked, we need to perform a atomic_exchange inorder to read
    // the current state, which will be locked_contended if there may have waiters
    // to awake.
    // A release fence is required to make previous stores visible to next
    // lock owner threads.
    atomic_store_explicit(&mutex->owner_tid, 0, memory_order_relaxed);
    const uint16_t unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
    old_state = atomic_exchange_explicit(&mutex->state, unlocked, memory_order_release);
    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(old_state)) {
        __futex_wake_ex(&mutex->state, shared, 1);
    }

    return 0;
}
Пример #9
0
bool ponyint_messageq_push(messageq_t* q, pony_msg_t* m)
{
  atomic_store_explicit(&m->next, NULL, memory_order_relaxed);

  pony_msg_t* prev = atomic_exchange_explicit(&q->head, m,
    memory_order_relaxed);

  bool was_empty = ((uintptr_t)prev & 1) != 0;
  prev = (pony_msg_t*)((uintptr_t)prev & ~(uintptr_t)1);

  atomic_store_explicit(&prev->next, m, memory_order_release);

  return was_empty;
}
Пример #10
0
void ccsynch_lock(void * lock) {
    CCSynchLock *l = (CCSynchLock*)lock;
    CCSynchLockNode *nextNode;
    CCSynchLockNode *curNode;
    ccsynchlock_initLocalIfNeeded();
    nextNode = ccsynchNextLocalNode;
    atomic_store_explicit(&nextNode->next, (uintptr_t)NULL, memory_order_relaxed);
    atomic_store_explicit(&nextNode->wait, 1, memory_order_relaxed);
    nextNode->completed = false;
    curNode = (CCSynchLockNode *)atomic_exchange_explicit( &l->tailPtr.value, (uintptr_t)nextNode, memory_order_release);
    curNode->requestFunction = NULL;
    atomic_store_explicit(&curNode->next, (uintptr_t)nextNode, memory_order_release);
    ccsynchNextLocalNode = curNode;
    while (atomic_load_explicit(&curNode->wait, memory_order_acquire) == 1){
        thread_yield();
    }
}
Пример #11
0
void ccsynch_delegate(void* lock,
                      void (*funPtr)(unsigned int, void *), 
                      unsigned int messageSize,
                      void * messageAddress) {
    CCSynchLock *l = (CCSynchLock*)lock;
    unsigned char * messageBuffer =  (unsigned char *) messageAddress;
    CCSynchLockNode *nextNode;
    CCSynchLockNode *curNode;
    CCSynchLockNode *tmpNode;
    void (*tmpFunPtr)(unsigned int, void *);
    CCSynchLockNode *tmpNodeNext;
    int counter = 0;
    ccsynchlock_initLocalIfNeeded();
    nextNode = ccsynchNextLocalNode;
    atomic_store_explicit(&nextNode->next, (uintptr_t)NULL, memory_order_relaxed);
    atomic_store_explicit(&nextNode->wait, 1, memory_order_relaxed);
    nextNode->completed = false;
    curNode = (CCSynchLockNode *)atomic_exchange_explicit(&l->tailPtr.value, (uintptr_t)nextNode, memory_order_release);
    curNode->buffer = messageBuffer;
    curNode->messageSize = messageSize;
    curNode->requestFunction = funPtr;
    atomic_store_explicit(&curNode->next, (uintptr_t)nextNode, memory_order_release);
    ccsynchNextLocalNode = curNode;
    while (atomic_load_explicit(&curNode->wait, memory_order_acquire) == 1){
        thread_yield();
    }
    if(curNode->completed==true){
        return;
    }else{
        funPtr(messageSize, messageBuffer);
    }
    tmpNode = (CCSynchLockNode *)atomic_load_explicit(&curNode->next, memory_order_acquire);
    while ((tmpNodeNext=(CCSynchLockNode *)atomic_load_explicit(&tmpNode->next, memory_order_acquire)) != NULL && counter < CCSYNCH_HAND_OFF_LIMIT) {
        counter = counter + 1;
        tmpFunPtr = tmpNode->requestFunction;
        if(tmpFunPtr==NULL){
            break;
        }
        tmpFunPtr(tmpNode->messageSize, tmpNode->buffer);
        tmpNode->completed = true;
        atomic_store_explicit(&tmpNode->wait, 0, memory_order_release);
        tmpNode = tmpNodeNext;
    }
    atomic_store_explicit(&tmpNode->wait, 0, memory_order_release);
}
Пример #12
0
void LSQBaseSetUserdata(LSQBaseTypeRef self, void* data)
{
    atomic_exchange_explicit(&self->data.userdata, data, memory_order_release);
}
Пример #13
0
static int statsdWrite(struct timespec* ts, struct iovec* vec, size_t nr) {
    ssize_t ret;
    int sock;
    static const unsigned headerLength = 1;
    struct iovec newVec[nr + headerLength];
    android_log_header_t header;
    size_t i, payloadSize;

    sock = atomic_load(&statsdLoggerWrite.sock);
    if (sock < 0) switch (sock) {
            case -ENOTCONN:
            case -ECONNREFUSED:
            case -ENOENT:
                break;
            default:
                return -EBADF;
        }
    /*
     *  struct {
     *      // what we provide to socket
     *      android_log_header_t header;
     *      // caller provides
     *      union {
     *          struct {
     *              char     prio;
     *              char     payload[];
     *          } string;
     *          struct {
     *              uint32_t tag
     *              char     payload[];
     *          } binary;
     *      };
     *  };
     */

    header.tid = gettid();
    header.realtime.tv_sec = ts->tv_sec;
    header.realtime.tv_nsec = ts->tv_nsec;

    newVec[0].iov_base = (unsigned char*)&header;
    newVec[0].iov_len = sizeof(header);

    // If we dropped events before, try to tell statsd.
    if (sock >= 0) {
        int32_t snapshot = atomic_exchange_explicit(&dropped, 0, memory_order_relaxed);
        if (snapshot) {
            android_log_event_long_t buffer;
            header.id = LOG_ID_STATS;
            // store the last log error in the tag field. This tag field is not used by statsd.
            buffer.header.tag = htole32(atomic_load(&log_error));
            buffer.payload.type = EVENT_TYPE_LONG;
            // format:
            // |atom_tag|dropped_count|
            int64_t composed_long = atomic_load(&atom_tag);
            // Send 2 int32's via an int64.
            composed_long = ((composed_long << 32) | ((int64_t)snapshot));
            buffer.payload.data = htole64(composed_long);

            newVec[headerLength].iov_base = &buffer;
            newVec[headerLength].iov_len = sizeof(buffer);

            ret = TEMP_FAILURE_RETRY(writev(sock, newVec, 2));
            if (ret != (ssize_t)(sizeof(header) + sizeof(buffer))) {
                atomic_fetch_add_explicit(&dropped, snapshot, memory_order_relaxed);
            }
        }
    }

    header.id = LOG_ID_STATS;

    for (payloadSize = 0, i = headerLength; i < nr + headerLength; i++) {
        newVec[i].iov_base = vec[i - headerLength].iov_base;
        payloadSize += newVec[i].iov_len = vec[i - headerLength].iov_len;

        if (payloadSize > LOGGER_ENTRY_MAX_PAYLOAD) {
            newVec[i].iov_len -= payloadSize - LOGGER_ENTRY_MAX_PAYLOAD;
            if (newVec[i].iov_len) {
                ++i;
            }
            break;
        }
    }

    /*
     * The write below could be lost, but will never block.
     *
     * ENOTCONN occurs if statsd has died.
     * ENOENT occurs if statsd is not running and socket is missing.
     * ECONNREFUSED occurs if we can not reconnect to statsd.
     * EAGAIN occurs if statsd is overloaded.
     */
    if (sock < 0) {
        ret = sock;
    } else {
        ret = TEMP_FAILURE_RETRY(writev(sock, newVec, i));
        if (ret < 0) {
            ret = -errno;
        }
    }
    switch (ret) {
        case -ENOTCONN:
        case -ECONNREFUSED:
        case -ENOENT:
            if (statd_writer_trylock()) {
                return ret; /* in a signal handler? try again when less stressed
                             */
            }
            __statsdClose(ret);
            ret = statsdOpen();
            statsd_writer_init_unlock();

            if (ret < 0) {
                return ret;
            }

            ret = TEMP_FAILURE_RETRY(writev(atomic_load(&statsdLoggerWrite.sock), newVec, i));
            if (ret < 0) {
                ret = -errno;
            }
        /* FALLTHRU */
        default:
            break;
    }

    if (ret > (ssize_t)sizeof(header)) {
        ret -= sizeof(header);
    }

    return ret;
}
Пример #14
0
static void statsdNoteDrop(int error, int tag) {
    atomic_fetch_add_explicit(&dropped, 1, memory_order_relaxed);
    atomic_exchange_explicit(&log_error, error, memory_order_relaxed);
    atomic_exchange_explicit(&atom_tag, tag, memory_order_relaxed);
}
Пример #15
0
unsigned vlc_timer_getoverrun (vlc_timer_t timer)
{
    return atomic_exchange_explicit (&timer->overruns, 0,
                                     memory_order_relaxed);
}
Пример #16
0
TEST(stdatomic, atomic_exchange) {
  atomic_int i;
  atomic_store(&i, 123);
  ASSERT_EQ(123, atomic_exchange(&i, 456));
  ASSERT_EQ(456, atomic_exchange_explicit(&i, 123, memory_order_relaxed));
}
Пример #17
0
static int __write_to_log_daemon(log_id_t log_id, struct iovec *vec, size_t nr)
{
    ssize_t ret;
#if FAKE_LOG_DEVICE
    int log_fd;

    if (/*(int)log_id >= 0 &&*/ (int)log_id < (int)LOG_ID_MAX) {
        log_fd = log_fds[(int)log_id];
    } else {
        return -EBADF;
    }
    do {
        ret = fakeLogWritev(log_fd, vec, nr);
        if (ret < 0) {
            ret = -errno;
        }
    } while (ret == -EINTR);
#else
    static const unsigned header_length = 2;
    struct iovec newVec[nr + header_length];
    android_log_header_t header;
    android_pmsg_log_header_t pmsg_header;
    struct timespec ts;
    size_t i, payload_size;
    static uid_t last_uid = AID_ROOT; /* logd *always* starts up as AID_ROOT */
    static pid_t last_pid = (pid_t) -1;
    static atomic_int_fast32_t dropped;

    if (!nr) {
        return -EINVAL;
    }

    if (last_uid == AID_ROOT) { /* have we called to get the UID yet? */
        last_uid = getuid();
    }
    if (last_pid == (pid_t) -1) {
        last_pid = getpid();
    }
    /*
     *  struct {
     *      // what we provide to pstore
     *      android_pmsg_log_header_t pmsg_header;
     *      // what we provide to socket
     *      android_log_header_t header;
     *      // caller provides
     *      union {
     *          struct {
     *              char     prio;
     *              char     payload[];
     *          } string;
     *          struct {
     *              uint32_t tag
     *              char     payload[];
     *          } binary;
     *      };
     *  };
     */

    if (android_log_timestamp() == 'm') {
        clock_gettime(CLOCK_MONOTONIC, &ts);
    } else {
        clock_gettime(CLOCK_REALTIME, &ts);
    }

    pmsg_header.magic = LOGGER_MAGIC;
    pmsg_header.len = sizeof(pmsg_header) + sizeof(header);
    pmsg_header.uid = last_uid;
    pmsg_header.pid = last_pid;

    header.tid = gettid();
    header.realtime.tv_sec = ts.tv_sec;
    header.realtime.tv_nsec = ts.tv_nsec;

    newVec[0].iov_base   = (unsigned char *) &pmsg_header;
    newVec[0].iov_len    = sizeof(pmsg_header);
    newVec[1].iov_base   = (unsigned char *) &header;
    newVec[1].iov_len    = sizeof(header);

    if (logd_fd > 0) {
        int32_t snapshot = atomic_exchange_explicit(&dropped, 0, memory_order_relaxed);
        if (snapshot) {
            android_log_event_int_t buffer;

            header.id = LOG_ID_EVENTS;
            buffer.header.tag = htole32(LIBLOG_LOG_TAG);
            buffer.payload.type = EVENT_TYPE_INT;
            buffer.payload.data = htole32(snapshot);

            newVec[2].iov_base = &buffer;
            newVec[2].iov_len  = sizeof(buffer);

            ret = TEMP_FAILURE_RETRY(writev(logd_fd, newVec + 1, 2));
            if (ret != (ssize_t)(sizeof(header) + sizeof(buffer))) {
                atomic_fetch_add_explicit(&dropped, snapshot, memory_order_relaxed);
            }
        }
    }

    header.id = log_id;

    for (payload_size = 0, i = header_length; i < nr + header_length; i++) {
        newVec[i].iov_base = vec[i - header_length].iov_base;
        payload_size += newVec[i].iov_len = vec[i - header_length].iov_len;

        if (payload_size > LOGGER_ENTRY_MAX_PAYLOAD) {
            newVec[i].iov_len -= payload_size - LOGGER_ENTRY_MAX_PAYLOAD;
            if (newVec[i].iov_len) {
                ++i;
            }
            payload_size = LOGGER_ENTRY_MAX_PAYLOAD;
            break;
        }
    }
    pmsg_header.len += payload_size;

    if (pstore_fd >= 0) {
        TEMP_FAILURE_RETRY(writev(pstore_fd, newVec, i));
    }

    if (last_uid == AID_LOGD) { /* logd, after initialization and priv drop */
        /*
         * ignore log messages we send to ourself (logd).
         * Such log messages are often generated by libraries we depend on
         * which use standard Android logging.
         */
        return 0;
    }

    if (logd_fd < 0) {
        return -EBADF;
    }

    /*
     * The write below could be lost, but will never block.
     *
     * To logd, we drop the pmsg_header
     *
     * ENOTCONN occurs if logd dies.
     * EAGAIN occurs if logd is overloaded.
     */
    ret = TEMP_FAILURE_RETRY(writev(logd_fd, newVec + 1, i - 1));
    if (ret < 0) {
        DECLARE_SIGSET(sigflags);

        ret = -errno;
        if (ret == -ENOTCONN) {
            lock(&sigflags);
            close(logd_fd);
            logd_fd = -1;
            ret = __write_to_log_initialize();
            unlock(&sigflags);

            if (ret < 0) {
                return ret;
            }

            ret = TEMP_FAILURE_RETRY(writev(logd_fd, newVec + 1, i - 1));
            if (ret < 0) {
                ret = -errno;
            }
        }
    }

    if (ret > (ssize_t)sizeof(header)) {
        ret -= sizeof(header);
    } else if (ret == -EAGAIN) {
        atomic_fetch_add_explicit(&dropped, 1, memory_order_relaxed);
    }
#endif

    return ret;
}