Esempio n. 1
0
void
test_sub ()
{
  v = res = 20;
  count = 0;

  atomic_fetch_sub (&v, count + 1);
  if (v != --res)
    abort ();

  atomic_fetch_sub_explicit (&v, count + 1, memory_order_consume);
  if (v != --res)
    abort ();

  atomic_fetch_sub (&v, 1);
  if (v != --res)
    abort ();

  atomic_fetch_sub_explicit (&v, 1, memory_order_release);
  if (v != --res)
    abort ();

  atomic_fetch_sub (&v, count + 1);
  if (v != --res)
    abort ();

  atomic_fetch_sub_explicit (&v, count + 1, memory_order_seq_cst);
  if (v != --res)
    abort ();
}
static inline void write_lock(rwlock_t *rw)
{
	int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
	while (priorvalue != RW_LOCK_BIAS) {
		atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
		do {
			priorvalue = atomic_load_explicit(&rw->lock, memory_order_relaxed);
		} while (priorvalue != RW_LOCK_BIAS);
		priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
	}
}
static inline void read_lock(rwlock_t *rw)
{
	int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
	while (priorvalue <= 0) {
		atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
		do {
			priorvalue = atomic_load_explicit(&rw->lock, memory_order_relaxed);
		} while (priorvalue <= 0);
		priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
	}
}
Esempio n. 4
0
/** Initialize DALEC.  MPI must be initialized before this can be called.  It
  * invalid to make DALEC calls before initialization.  Collective on the world
  * group.
  *
  * @return            Zero on success
  */
int PDALEC_Initialize(MPI_Comm user_comm)
{
    int dalec_alive = atomic_fetch_sub_explicit(&(DALECI_GLOBAL_STATE.alive),
                                                1,memory_order_seq_cst);
    if (dalec_alive == 0) {
        /* Initialize, since this is the first call to this function. */
        int mpi_is_init, mpi_is_fin;
        MPI_Initialized(&mpi_is_init);
        MPI_Finalized(&mpi_is_fin);
        if (!mpi_is_init || mpi_is_fin) {
            DALECI_Warning("MPI must be active when calling DALEC_Initialize");
            return DALEC_ERROR_MPI_USAGE;
        }

        /* Always dupe the user communicator for internal usage. */
        /* Do not abort on MPI failure, let user handle if MPI does not abort. */
        int rc = MPI_Comm_dup(user_comm, &DALECI_GLOBAL_STATE.mpi_comm);
        return DALECI_Check_MPI("DALEC_Initialize", "MPI_Comm_dup", rc);

        /* Determine what level of threading MPI supports. */
        int mpi_thread_level;
        MPI_Query_thread(&mpi_thread_level);
        DALECI_GLOBAL_STATE.mpi_thread_level;

    } else {
        /* Library has already been initialized. */
        return DALEC_SUCCESS;
    }
}
Esempio n. 5
0
static void mainloopDataNotifyDecrease(void *p) {
	caerMainloopData mainloopData = p;

	// No special memory order for decrease, because the acquire load to even start running
	// through a mainloop already synchronizes with the release store above.
	atomic_fetch_sub_explicit(&mainloopData->dataAvailable, 1, memory_order_relaxed);
}
static inline int write_trylock(rwlock_t *rw)
{
	int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
	if (priorvalue == RW_LOCK_BIAS)
		return 1;

	atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
	return 0;
}
static inline int read_trylock(rwlock_t *rw)
{
	int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
	if (priorvalue > 0)
		return 1;

	atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
	return 0;
}
Esempio n. 8
0
void
test_fetch_sub ()
{
  v = res = 20;
  count = 0;

  if (atomic_fetch_sub_explicit (&v, count + 1, memory_order_relaxed) != res--)
    abort ();

  if (atomic_fetch_sub_explicit (&v, 1, memory_order_consume) != res--)
    abort ();

  if (atomic_fetch_sub_explicit (&v, count + 1, memory_order_acquire) != res--)
    abort ();

  if (atomic_fetch_sub_explicit (&v, 1, memory_order_release) != res--)
    abort ();

  if (atomic_fetch_sub_explicit (&v, count + 1, memory_order_acq_rel) != res--)
    abort ();

  if (atomic_fetch_sub_explicit (&v, 1, memory_order_seq_cst) != res--)
    abort ();

  if (atomic_fetch_sub (&v, 1) != res--)
    abort ();
}
int pthread_mutex_unlock(pthread_mutex_t* mutex_interface) {
#if !defined(__LP64__)
    // Some apps depend on being able to pass NULL as a mutex and get EINVAL
    // back. Don't need to worry about it for LP64 since the ABI is brand new,
    // but keep compatibility for LP32. http://b/19995172.
    if (mutex_interface == NULL) {
        return EINVAL;
    }
#endif

    pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);

    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
    uint16_t mtype  = (old_state & MUTEX_TYPE_MASK);
    uint16_t shared = (old_state & MUTEX_SHARED_MASK);

    // Handle common case first.
    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
        __pthread_normal_mutex_unlock(mutex, shared);
        return 0;
    }

    // Do we already own this recursive or error-check mutex?
    pid_t tid = __get_thread()->tid;
    if ( tid != atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed) ) {
        return EPERM;
    }

    // If the counter is > 0, we can simply decrement it atomically.
    // Since other threads can mutate the lower state bits (and only the
    // lower state bits), use a compare_exchange loop to do it.
    if (!MUTEX_COUNTER_BITS_IS_ZERO(old_state)) {
        // We still own the mutex, so a release fence is not needed.
        atomic_fetch_sub_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
        return 0;
    }

    // The counter is 0, so we'are going to unlock the mutex by resetting its
    // state to unlocked, we need to perform a atomic_exchange inorder to read
    // the current state, which will be locked_contended if there may have waiters
    // to awake.
    // A release fence is required to make previous stores visible to next
    // lock owner threads.
    atomic_store_explicit(&mutex->owner_tid, 0, memory_order_relaxed);
    const uint16_t unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
    old_state = atomic_exchange_explicit(&mutex->state, unlocked, memory_order_release);
    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(old_state)) {
        __futex_wake_ex(&mutex->state, shared, 1);
    }

    return 0;
}
Esempio n. 10
0
File: main.cpp Progetto: CCJY/coliru
void reader(int id) 
{
    for (;;) {
        int idx = atomic_fetch_sub_explicit(&cnt, 1, std::memory_order_relaxed);
        if (idx >= 0) {
            std::cout << "reader " << std::to_string(id) << " processed item "
                      << std::to_string(data[idx]) << '\n';
        } else {
            std::cout << "reader " << std::to_string(id) << " done\n";
            break;
        }
    }
}
Esempio n. 11
0
void LSQBaseRelease(LSQBaseTypeRef self)
{
    atomic_fetch_sub_explicit(&self->data.refcount, 1, memory_order_release);
    // Execute release callback
    if (self->vtable != NULL && self->vtable->release != NULL)
    {
        self->vtable->release(self->data.userdata);
    }
    // Execute deallock
    if (self->data.refcount <= 0)
    {
        LSQBaseDealloc(self);
    }
}
Esempio n. 12
0
void tm_thread_do_exit(void)
{
	assert(current_thread->held_locks == 0);
	assert(current_thread->blocklist == 0);

	struct async_call *thread_cleanup_call = async_call_create(&current_thread->cleanup_call, 0, 
							tm_thread_destroy, (unsigned long)current_thread, 0);

	struct ticker *ticker = (void *)atomic_exchange(&current_thread->alarm_ticker, NULL);
	if(ticker) {
		if(ticker_delete(ticker, &current_thread->alarm_timeout) != -ENOENT)
			tm_thread_put(current_thread);
	}

	linkedlist_remove(&current_process->threadlist, &current_thread->pnode);

	tm_thread_remove_kerfs_entries(current_thread);
	atomic_fetch_sub_explicit(&running_threads, 1, memory_order_relaxed);
	if(atomic_fetch_sub(&current_process->thread_count, 1) == 1) {
		atomic_fetch_sub_explicit(&running_processes, 1, memory_order_relaxed);
		tm_process_remove_kerfs_entries(current_process);
		tm_process_exit(current_thread->exit_code);
	}

	cpu_disable_preemption();

	assert(!current_thread->blocklist);
	tqueue_remove(current_thread->cpu->active_queue, &current_thread->activenode);
	atomic_fetch_sub_explicit(&current_thread->cpu->numtasks, 1, memory_order_relaxed);
	tm_thread_raise_flag(current_thread, THREAD_SCHEDULE);
	current_thread->state = THREADSTATE_DEAD;
	
	workqueue_insert(&__current_cpu->work, thread_cleanup_call);
	cpu_interrupt_set(0); /* don't schedule away until we get back
							 to the syscall handler! */
	cpu_enable_preemption();
}
Esempio n. 13
0
static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock,
                                        const timespec* abs_timeout_or_null) {

  if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
                                                                  memory_order_relaxed))) {
    return EDEADLK;
  }

  while (true) {
    int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
    if (__predict_true(old_state == 0)) {
      if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
                                                memory_order_acquire, memory_order_relaxed)) {
        // writer_thread_id is protected by rwlock and can only be modified in rwlock write
        // owner thread. Other threads may read it for EDEADLK error checking, atomic operation
        // is safe enough for it.
        atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
        return 0;
      }
    } else {
      timespec ts;
      timespec* rel_timeout = NULL;

      if (abs_timeout_or_null != NULL) {
        rel_timeout = &ts;
        if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
          return ETIMEDOUT;
        }
      }

      // To avoid losing wake ups, the pending_writers increment should be observed before
      // futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
      // here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
      // operations in futex_wait.
      atomic_fetch_add_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);

      atomic_thread_fence(memory_order_seq_cst);

      int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
                                rel_timeout);

      atomic_fetch_sub_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);

      if (ret == -ETIMEDOUT) {
        return ETIMEDOUT;
      }
    }
  }
}
Esempio n. 14
0
int pthread_rwlock_unlock(pthread_rwlock_t* rwlock_interface) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
  if (__state_owned_by_writer(old_state)) {
    if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) != __get_thread()->tid) {
      return EPERM;
    }
    atomic_store_explicit(&rwlock->writer_tid, 0, memory_order_relaxed);
    old_state = atomic_fetch_and_explicit(&rwlock->state, ~STATE_OWNED_BY_WRITER_FLAG,
                                          memory_order_release);
    if (!__state_have_pending_readers_or_writers(old_state)) {
      return 0;
    }

  } else if (__state_owned_by_readers(old_state)) {
    old_state = atomic_fetch_sub_explicit(&rwlock->state, STATE_READER_COUNT_CHANGE_STEP,
                                          memory_order_release);
    if (!__state_is_last_reader(old_state) || !__state_have_pending_readers_or_writers(old_state)) {
      return 0;
    }

  } else {
    return EPERM;
  }

  // Wake up pending readers or writers.
  rwlock->pending_lock.lock();
  if (rwlock->pending_writer_count != 0) {
    rwlock->pending_writer_wakeup_serial++;
    rwlock->pending_lock.unlock();

    __futex_wake_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared, 1);

  } else if (rwlock->pending_reader_count != 0) {
    rwlock->pending_reader_wakeup_serial++;
    rwlock->pending_lock.unlock();

    __futex_wake_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared, INT_MAX);

  } else {
    // It happens when waiters are woken up by timeout.
    rwlock->pending_lock.unlock();
  }
  return 0;
}
Esempio n. 15
0
void mrqd_rlock(void * lock) {
    MRQDLock *l = (MRQDLock*)lock;
    bool bRaised = false;
    int readPatience = 0;
 start:
    rgri_arrive(&l->readIndicator);
    if(tatas_is_locked(&l->mutexLock)) {
        rgri_depart(&l->readIndicator);
        while(tatas_is_locked(&l->mutexLock)) {
            thread_yield();
            if((readPatience == MRQD_READ_PATIENCE_LIMIT) && !bRaised) {
                atomic_fetch_add_explicit(&l->writeBarrier.value, 1, memory_order_seq_cst);
                bRaised = true;
            }
            readPatience = readPatience + 1;
        }
        goto start;
    }
    if(bRaised) {
        atomic_fetch_sub_explicit(&l->writeBarrier.value, 1, memory_order_seq_cst);
    }
}
Esempio n. 16
0
/**
 * Update speed meter.
 * @param[in] speed
 * @param[in] count
 */
void spdm_update(struct speed_meter *speed, uint64_t count)
{
    uint64_t curr_time = zclock(false);
    uint64_t last_update = atomic_load_explicit(&speed->last_update, memory_order_acquire);

    if (curr_time - last_update >= SEC2USEC(1)) {
        if (atomic_compare_exchange_strong_explicit(&speed->last_update, &last_update, curr_time, memory_order_release,
                                                    memory_order_relaxed)) {
            size_t i = atomic_load_explicit(&speed->i, memory_order_acquire);
            uint64_t speed_aux = atomic_load_explicit(&speed->speed_aux, memory_order_acquire);
            atomic_store_explicit(&speed->backlog[i].speed, speed_aux, memory_order_release);
            atomic_fetch_sub_explicit(&speed->speed_aux, speed_aux, memory_order_release);
            atomic_store_explicit(&speed->backlog[i].timestamp, last_update, memory_order_release);
            i++;
            if (SPEED_METER_BACKLOG == i) {
                i = 0;
            }
            atomic_store_explicit(&speed->i, i, memory_order_release);
        }
    }

    atomic_fetch_add_explicit(&speed->speed_aux, count, memory_order_release);
}
Esempio n. 17
0
/** Finalize DALEC.  Must be called before MPI is finalized.  DALEC calls are
  * not valid after finalization.  Collective on world group.
  *
  * @return            Zero on success
  */
int PDALEC_Finalize(void)
{
    int dalec_alive = atomic_fetch_sub_explicit(&(DALECI_GLOBAL_STATE.alive),
                                                1,memory_order_seq_cst);

    if (dalec_alive == 1) {
        /* Check for MPI initialization */
        int mpi_is_init, mpi_is_fin;
        MPI_Initialized(&mpi_is_init);
        MPI_Finalized(&mpi_is_fin);

        /* Free communicator if possible and return */
        if (!mpi_is_init || mpi_is_fin) {
            DALECI_Warning("MPI must be active when calling DALEC_Finalize");
            return DALEC_ERROR_MPI_USAGE;
        } else {
            int rc = MPI_Comm_free(&DALECI_GLOBAL_STATE.mpi_comm);
            return DALECI_Check_MPI("DALEC_Finalize", "MPI_Comm_free", rc);
        }
    } else {
        /* Library is still active. */
        return DALEC_SUCCESS;
    }
}
Esempio n. 18
0
/**
 * Authenticate and set client info.
 * @param[in] sess Client session.
 * @return Zero on success (or one of *_RC).
 */
static int session_authenticate(struct zsession *sess)
{
    int ret = OTHER_RC;
    VALUE_PAIR *request_attrs = NULL, *response_attrs = NULL, *attrs = NULL;
    char msg[8192]; // WARNING: libfreeradius-client has unsafe working with this buffer.
    rc_handle *rh = zinst()->radh;
    struct in_addr ip_addr;
    char ip_str[INET_ADDRSTRLEN];
    struct zcrules rules;

    crules_init(&rules);

    ip_addr.s_addr = htonl(sess->ip);
    if (unlikely(NULL == inet_ntop(AF_INET, &ip_addr, ip_str, sizeof(ip_str)))) {
        goto end;
    }

    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_USER_NAME, ip_str, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_USER_PASSWORD, "", -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_NAS_IDENTIFIER, zcfg()->radius_nas_identifier, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_CALLING_STATION_ID, ip_str, -1, 0))) {
        goto end;
    }

    ret = rc_auth(rh, 0, request_attrs, &response_attrs, msg);
    if (OK_RC != ret) {
        ZERO_LOG(LOG_ERR, "Session authentication failed for %s (code:%d)", ip_str, ret);
        goto end;
    }

    attrs = response_attrs;
    while (likely(NULL != attrs)) {
        switch (attrs->attribute) {
            case PW_FILTER_ID:
                crules_parse(&rules, attrs->strvalue);
                break;
            case PW_SESSION_TIMEOUT:
                atomic_store_explicit(&sess->max_duration, SEC2USEC(attrs->lvalue), memory_order_release);
                break;
            case PW_ACCT_INTERIM_INTERVAL:
                atomic_store_explicit(&sess->acct_interval, SEC2USEC(attrs->lvalue), memory_order_release);
                break;
        }
        attrs = attrs->next;
    }

    if (likely(rules.have.user_id && rules.have.login)) {
        struct zclient *client = sess->client;

        client_db_find_or_set_id(zinst()->client_db, rules.user_id, &client);
        if (client != sess->client) {
            // found
            pthread_rwlock_wrlock(&sess->lock_client);
            atomic_fetch_add_explicit(&client->refcnt, 1, memory_order_relaxed);
            client_release(sess->client);
            sess->client = client;
            client_session_add(sess->client, sess);
            pthread_rwlock_unlock(&sess->lock_client);
        } else {
            client_apply_rules(sess->client, &rules);
        }

        atomic_fetch_sub_explicit(&zinst()->unauth_sessions_cnt, 1, memory_order_release);

        // log successful authentication
        {
            UT_string rules_str;
            utstring_init(&rules_str);
            utstring_reserve(&rules_str, 1024);

            attrs = response_attrs;
            while (likely(NULL != attrs)) {
                switch (attrs->attribute) {
                    case PW_FILTER_ID:
                        utstring_printf(&rules_str, " %s", attrs->strvalue);
                        break;
                    default:
                        break;
                }
                attrs = attrs->next;
            }

            zero_syslog(LOG_INFO, "Authenticated session %s (rules:%s)", ip_str, utstring_body(&rules_str));
            utstring_done(&rules_str);
        }
    } else {
        ret = OTHER_RC;
        ZERO_LOG(LOG_ERR, "Session authentication failed for %s (code:%d)", ip_str, ret);
    }

    end:
    crules_free(&rules);
    if (request_attrs) rc_avpair_free(request_attrs);
    if (response_attrs) rc_avpair_free(response_attrs);

    return ret;
}
Esempio n. 19
0
/**
 * Send radius accounting packet.
 * @param[in] sess Session
 * @param[in] status Accounting status (PW_STATUS_START, PW_STATUS_STOP, PW_STATUS_ALIVE)
 * @param[in] cause Accounting termination cause (used only in case of PW_STATUS_STOP)
 * @return Zero on success (one of *_RC codes).
 */
static int session_accounting(struct zsession *sess, uint32_t status, uint32_t term_cause)
{
    int ret = OTHER_RC;
    VALUE_PAIR *request_attrs = NULL;
    rc_handle *rh = zinst()->radh;
    struct in_addr ip_addr;
    char ip_str[INET_ADDRSTRLEN];

    uint64_t traff_down = atomic_load_explicit(&sess->traff_down, memory_order_acquire);
    uint64_t traff_up = atomic_load_explicit(&sess->traff_up, memory_order_acquire);
    uint32_t octets_down = (uint32_t) (traff_down % UINT32_MAX);
    uint32_t octets_up = (uint32_t) (traff_up % UINT32_MAX);
    uint32_t packets_down = atomic_load_explicit(&sess->packets_down, memory_order_acquire) % UINT32_MAX;
    uint32_t packets_up = atomic_load_explicit(&sess->packets_up, memory_order_acquire) % UINT32_MAX;
    uint32_t gigawords_down = 0;
    uint32_t gigawords_up = 0;

    char session_id[255];
    snprintf(session_id, sizeof(session_id), "%s-%" PRIu32, sess->client->login, sess->ip);

    ip_addr.s_addr = htonl(sess->ip);
    if (unlikely(NULL == inet_ntop(AF_INET, &ip_addr, ip_str, sizeof(ip_str)))) {
        goto end;
    }

    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_CALLING_STATION_ID, ip_str, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_FRAMED_IP_ADDRESS, &sess->ip, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_USER_NAME, sess->client->login, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_ACCT_SESSION_ID, session_id, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_NAS_IDENTIFIER, zcfg()->radius_nas_identifier, -1, 0))) {
        goto end;
    }
    if (PW_STATUS_STOP == status) {
        if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_ACCT_TERMINATE_CAUSE, &term_cause, -1, 0))) {
            goto end;
        }
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_ACCT_STATUS_TYPE, &status, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_ACCT_INPUT_OCTETS, &octets_down, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_ACCT_INPUT_PACKETS, &packets_down, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_ACCT_OUTPUT_OCTETS, &octets_up, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_ACCT_OUTPUT_PACKETS, &packets_down, -1, 0))) {
        goto end;
    }
    if (unlikely(UINT32_MAX < traff_down)) {
        gigawords_down = (uint32_t) (traff_down / UINT32_MAX);
        if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_ACCT_INPUT_GIGAWORDS, &gigawords_down, -1, 0))) {
            goto end;
        }
    }
    if (unlikely(UINT32_MAX < traff_up)) {
        gigawords_up = (uint32_t) (traff_up / UINT32_MAX);
        if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_ACCT_OUTPUT_GIGAWORDS, &gigawords_up, -1, 0))) {
            goto end;
        }
    }

    ret = rc_acct(rh, 0, request_attrs);
    if (unlikely(OK_RC != ret)) {
        ZERO_LOG(LOG_ERR, "radius accounting failed %s (code:%d)", ip_str, ret);
        goto end;
    }

    atomic_fetch_sub_explicit(&sess->traff_down, traff_down, memory_order_release);
    atomic_fetch_sub_explicit(&sess->traff_up, traff_up, memory_order_release);
    atomic_fetch_sub_explicit(&sess->packets_down, packets_down, memory_order_release);
    atomic_fetch_sub_explicit(&sess->packets_up, packets_up, memory_order_release);

    end:
    if (request_attrs) {
        rc_avpair_free(request_attrs);
    }

    return ret;
}
Esempio n. 20
0
void ponyint_asio_noisy_remove()
{
  atomic_fetch_sub_explicit(&running_base.noisy_count, 1, memory_order_relaxed);
}
Esempio n. 21
0
TEST(stdatomic, atomic_fetch_sub) {
  atomic_int i = ATOMIC_VAR_INIT(123);
  ASSERT_EQ(123, atomic_fetch_sub(&i, 1));
  ASSERT_EQ(122, atomic_fetch_sub_explicit(&i, 1, memory_order_relaxed));
  ASSERT_EQ(121, atomic_load(&i));
}