Ejemplo n.º 1
0
void
test_add ()
{
  v = 0;
  count = 1;

  atomic_fetch_add (&v, count);
  if (v != 1)
    abort ();

  atomic_fetch_add_explicit (&v, count, memory_order_consume);
  if (v != 2)
    abort ();

  atomic_fetch_add (&v, 1);
  if (v != 3)
    abort ();

  atomic_fetch_add_explicit (&v, 1, memory_order_release);
  if (v != 4)
    abort ();

  atomic_fetch_add (&v, 1);
  if (v != 5)
    abort ();

  atomic_fetch_add_explicit (&v, count, memory_order_seq_cst);
  if (v != 6)
    abort ();
}
Ejemplo n.º 2
0
// Increase the counts of all requested pages by 1.
void fs_inode_map_region(struct inode *node, size_t offset, size_t length)
{
	mutex_acquire(&node->mappings_lock);
	__init_physicals(node);
	ASSERT(!(offset & ~PAGE_MASK));
	int page_number = offset / PAGE_SIZE;
	int npages = ((length-1) / PAGE_SIZE) + 1;
	for(int i = page_number; i < (page_number + npages); i++)
	{
		struct physical_page *entry;
		if((entry = hash_lookup(&node->physicals, &i, sizeof(i))) == NULL) {
			// Create the entry, and add it.
			entry = __create_entry();
			entry->pn = i;
			hash_insert(&node->physicals, &entry->pn, sizeof(entry->pn), &entry->hash_elem, entry);
			atomic_fetch_add_explicit(&node->mapped_entries_count, 1, memory_order_relaxed);
		}
		mutex_acquire(&entry->lock);

		// Bump the count...
		atomic_fetch_add_explicit(&entry->count, 1, memory_order_relaxed);
		mutex_release(&entry->lock);
		/* NOTE: We're not actually allocating or mapping anything here, really. All we're doing
		 * is indicating our intent to map a certain section, so we don't free pages. */
	}
	atomic_thread_fence(memory_order_acq_rel);
	mutex_release(&node->mappings_lock);
}
Ejemplo n.º 3
0
void net_receive_packet(struct net_dev *nd, struct net_packet *packets, int count)
{
	TRACE_MSG("net.packet", "receive %d packets\n", count);
	atomic_fetch_add_explicit(&nd->rx_count, count, memory_order_relaxed);
	for(int i=0;i<count;i++) {
		atomic_fetch_add_explicit(&nd->rx_bytes, packets[i].length, memory_order_relaxed);
		net_data_receive(nd, &packets[i]);
	}
}
Ejemplo n.º 4
0
int net_transmit_packet(struct net_dev *nd, struct net_packet *packets, int count)
{
	atomic_fetch_add_explicit(&nd->tx_count, count, memory_order_relaxed);
	for(int i=0;i<count;i++)
		atomic_fetch_add_explicit(&nd->tx_bytes, packets[i].length, memory_order_relaxed);
	TRACE_MSG("net.packet", "send #%d\n", nd->tx_count);
	int ret = net_callback_send(nd, packets, count);
	TRACE_MSG("net.packet", "send returned %d\n", ret);
	return ret;
}
Ejemplo n.º 5
0
__attribute__((noinline)) static void tm_process_exit(int code)
{
	spinlock_acquire(&current_thread->status_lock);
	if(code != -9) 
		current_process->exit_reason.cause = __EXIT;
	current_process->exit_reason.ret = code;
	current_process->exit_reason.pid = current_process->pid;
	spinlock_release(&current_thread->status_lock);

	/* update times */
	if(current_process->parent) {
		time_t total_utime = current_process->utime + current_process->cutime;
		time_t total_stime = current_process->stime + current_process->cstime;
		atomic_fetch_add_explicit(&current_process->parent->cutime,
				total_utime, memory_order_relaxed);
		atomic_fetch_add_explicit(&current_process->parent->cstime,
				total_stime, memory_order_relaxed);
	}
	file_close_all();
	if(current_process->root)
		vfs_icache_put(current_process->root);
	if(current_process->cwd)
		vfs_icache_put(current_process->cwd);
	mutex_destroy(&current_process->fdlock);
	mm_destroy_all_mappings(current_process);
	linkedlist_destroy(&(current_process->mappings));
	valloc_destroy(&current_process->mmf_valloc);

	/* this is done before SIGCHILD is sent out */
	atomic_fetch_or(&current_process->flags, PROCESS_EXITED);
	if(current_process->parent) {
		struct process *init = tm_process_get(0);
		assert(init);
		
		__linkedlist_lock(process_list);
		struct process *child;
		struct linkedentry *node;
		for(node = linkedlist_iter_start(process_list);
				node != linkedlist_iter_end(process_list);
				node = linkedlist_iter_next(node)) {
			child = linkedentry_obj(node);
			if(child->parent == current_process) {
				tm_process_inc_reference(init);
				child->parent = init;
				tm_process_put(current_process);
			}
		}
		__linkedlist_unlock(process_list);
		tm_signal_send_process(current_process->parent, SIGCHILD);
		tm_blocklist_wakeall(&current_process->waitlist);
		tm_process_put(init);
	}
	tm_process_put(current_process); /* fork starts us out at refs = 1 */
}
Ejemplo n.º 6
0
void tm_timer_handler(struct registers *r, int int_no, int flags)
{
	if(current_thread) {
		ticker_tick(&current_thread->cpu->ticker, ONE_SECOND / current_hz);
		if(current_thread->system)
			atomic_fetch_add_explicit(&current_process->stime, ONE_SECOND / current_hz, memory_order_relaxed);
		else
			atomic_fetch_add_explicit(&current_process->utime, ONE_SECOND / current_hz, memory_order_relaxed);
		current_thread->timeslice /= 2;
		if(!current_thread->timeslice) {
			current_thread->flags |= THREAD_SCHEDULE;
			current_thread->timeslice = current_thread->priority;
		}
	}
}
Ejemplo n.º 7
0
void cpu_interrupt_irq_entry(struct registers *regs, int int_no)
{
	cpu_interrupt_set(0);
	atomic_fetch_add_explicit(&interrupt_counts[int_no], 1, memory_order_relaxed);
	int already_in_kernel = 0;
	if(!current_thread->regs)
		current_thread->regs = regs;
	else
		already_in_kernel = 1;
	atomic_fetch_add(&current_thread->interrupt_level, 1);
	/* Now, run through the stage1 handlers, and see if we need any
	 * stage2 handlers to run later. */
	int s1started = timer_start(&interrupt_timers[int_no]);
	for(int i = 0; i < MAX_HANDLERS; i++)
	{
		if(interrupt_handlers[int_no][i].fn)
			(interrupt_handlers[int_no][i].fn)(regs, int_no, 0);
	}
	if(s1started)
		timer_stop(&interrupt_timers[int_no]);
	cpu_interrupt_set(0);
	atomic_fetch_sub(&current_thread->interrupt_level, 1);
	if(!already_in_kernel) {
		__setup_signal_handler(regs);
		current_thread->regs = 0;
		ASSERT(!current_thread || !current_process || (current_process == kernel_process) || current_thread->held_locks == 0 || (current_thread->flags & THREAD_KERNEL));
	}
}
Ejemplo n.º 8
0
// Setup frame with a new reference to buffer. The buffer must have been
// allocated from the given pool.
static int ffmmal_set_ref(AVFrame *frame, FFPoolRef *pool,
                          MMAL_BUFFER_HEADER_T *buffer)
{
    FFBufferRef *ref = av_mallocz(sizeof(*ref));
    if (!ref)
        return AVERROR(ENOMEM);

    ref->pool = pool;
    ref->buffer = buffer;

    frame->buf[0] = av_buffer_create((void *)ref, sizeof(*ref),
                                     ffmmal_release_frame, NULL,
                                     AV_BUFFER_FLAG_READONLY);
    if (!frame->buf[0]) {
        av_free(ref);
        return AVERROR(ENOMEM);
    }

    atomic_fetch_add_explicit(&ref->pool->refcount, 1, memory_order_relaxed);
    mmal_buffer_header_acquire(buffer);

    frame->format = AV_PIX_FMT_MMAL;
    frame->data[3] = (uint8_t *)ref->buffer;
    return 0;
}
Ejemplo n.º 9
0
static void ffmmal_poolref_unref(FFPoolRef *ref)
{
    if (ref &&
        atomic_fetch_add_explicit(&ref->refcount, -1, memory_order_acq_rel) == 1) {
        mmal_pool_destroy(ref->pool);
        av_free(ref);
    }
}
Ejemplo n.º 10
0
static inline int write_trylock(rwlock_t *rw)
{
	int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
	if (priorvalue == RW_LOCK_BIAS)
		return 1;

	atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
	return 0;
}
Ejemplo n.º 11
0
static inline int read_trylock(rwlock_t *rw)
{
	int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
	if (priorvalue > 0)
		return 1;

	atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
	return 0;
}
Ejemplo n.º 12
0
void* LSQBaseRetain(LSQBaseTypeRef self)
{
    atomic_fetch_add_explicit(&self->data.refcount, 1, memory_order_release);
    // Execute callback
    if (self->vtable != NULL && self->vtable->retain != NULL)
    {
        self = self->vtable->retain(self->data.userdata);
    }
    // Return object
    return self;
}
Ejemplo n.º 13
0
static inline void write_lock(rwlock_t *rw)
{
	int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
	while (priorvalue != RW_LOCK_BIAS) {
		atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
		do {
			priorvalue = atomic_load_explicit(&rw->lock, memory_order_relaxed);
		} while (priorvalue != RW_LOCK_BIAS);
		priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
	}
}
Ejemplo n.º 14
0
static inline void read_lock(rwlock_t *rw)
{
	int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
	while (priorvalue <= 0) {
		atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
		do {
			priorvalue = atomic_load_explicit(&rw->lock, memory_order_relaxed);
		} while (priorvalue <= 0);
		priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
	}
}
Ejemplo n.º 15
0
void
test_fetch_add ()
{
  v = 0;
  count = 1;

  if (atomic_fetch_add_explicit (&v, count, memory_order_relaxed) != 0)
    abort ();

  if (atomic_fetch_add_explicit (&v, 1, memory_order_consume) != 1)
    abort ();

  if (atomic_fetch_add_explicit (&v, count, memory_order_acquire) != 2)
    abort ();

  if (atomic_fetch_add_explicit (&v, 1, memory_order_release) != 3)
    abort ();

  if (atomic_fetch_add_explicit (&v, count, memory_order_acq_rel) != 4)
    abort ();

  if (atomic_fetch_add_explicit (&v, 1, memory_order_seq_cst) != 5)
    abort ();

  if (atomic_fetch_add (&v, 1) != 6)
    abort ();
}
Ejemplo n.º 16
0
static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock,
                                        const timespec* abs_timeout_or_null) {

  if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
                                                                  memory_order_relaxed))) {
    return EDEADLK;
  }

  while (true) {
    int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
    if (__predict_true(old_state == 0)) {
      if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
                                                memory_order_acquire, memory_order_relaxed)) {
        // writer_thread_id is protected by rwlock and can only be modified in rwlock write
        // owner thread. Other threads may read it for EDEADLK error checking, atomic operation
        // is safe enough for it.
        atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
        return 0;
      }
    } else {
      timespec ts;
      timespec* rel_timeout = NULL;

      if (abs_timeout_or_null != NULL) {
        rel_timeout = &ts;
        if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
          return ETIMEDOUT;
        }
      }

      // To avoid losing wake ups, the pending_writers increment should be observed before
      // futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
      // here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
      // operations in futex_wait.
      atomic_fetch_add_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);

      atomic_thread_fence(memory_order_seq_cst);

      int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
                                rel_timeout);

      atomic_fetch_sub_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);

      if (ret == -ETIMEDOUT) {
        return ETIMEDOUT;
      }
    }
  }
}
Ejemplo n.º 17
0
/* This common inlined function is used to increment the counter of a recursive mutex.
 *
 * If the counter overflows, it will return EAGAIN.
 * Otherwise, it atomically increments the counter and returns 0.
 *
 */
static inline __always_inline int __recursive_increment(pthread_mutex_internal_t* mutex,
                                                        uint16_t old_state) {
    // Detect recursive lock overflow and return EAGAIN.
    // This is safe because only the owner thread can modify the
    // counter bits in the mutex value.
    if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(old_state)) {
        return EAGAIN;
    }

    // Other threads are able to change the lower bits (e.g. promoting it to "contended"),
    // but the mutex counter will not overflow. So we use atomic_fetch_add operation here.
    // The mutex is still locked by current thread, so we don't need a release fence.
    atomic_fetch_add_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
    return 0;
}
Ejemplo n.º 18
0
Archivo: timer.c Proyecto: 0xheart0/vlc
VLC_NORETURN
static void *vlc_timer_thread (void *data)
{
    struct vlc_timer *timer = data;

    vlc_mutex_lock (&timer->lock);
    mutex_cleanup_push (&timer->lock);

    for (;;)
    {
        while (timer->value == 0)
            vlc_cond_wait (&timer->reschedule, &timer->lock);

        if (vlc_cond_timedwait (&timer->reschedule, &timer->lock,
                                timer->value) == 0)
            continue;
        if (timer->interval == 0)
            timer->value = 0; /* disarm */
        vlc_mutex_unlock (&timer->lock);

        int canc = vlc_savecancel ();
        timer->func (timer->data);
        vlc_restorecancel (canc);

        mtime_t now = mdate ();
        unsigned misses;

        vlc_mutex_lock (&timer->lock);
        if (timer->interval == 0)
            continue;

        misses = (now - timer->value) / timer->interval;
        timer->value += timer->interval;
        /* Try to compensate for one miss (mwait() will return immediately)
         * but no more. Otherwise, we might busy loop, after extended periods
         * without scheduling (suspend, SIGSTOP, RT preemption, ...). */
        if (misses > 1)
        {
            misses--;
            timer->value += misses * timer->interval;
            atomic_fetch_add_explicit (&timer->overruns, misses,
                                       memory_order_relaxed);
        }
    }

    vlc_cleanup_pop ();
    vlc_assert_unreachable ();
}
Ejemplo n.º 19
0
// This function is used by pthread_cond_broadcast and
// pthread_cond_signal to atomically decrement the counter
// then wake up thread_count threads.
static int __pthread_cond_pulse(pthread_cond_internal_t* cond, int thread_count) {
  // We don't use a release/seq_cst fence here. Because pthread_cond_wait/signal can't be
  // used as a method for memory synchronization by itself. It should always be used with
  // pthread mutexes. Note that Spurious wakeups from pthread_cond_wait/timedwait may occur,
  // so when using condition variables there is always a boolean predicate involving shared
  // variables associated with each condition wait that is true if the thread should proceed.
  // If the predicate is seen true before a condition wait, pthread_cond_wait/timedwait will
  // not be called. That's why pthread_wait/signal pair can't be used as a method for memory
  // synchronization. And it doesn't help even if we use any fence here.

  // The increase of value should leave flags alone, even if the value can overflows.
  atomic_fetch_add_explicit(&cond->state, COND_COUNTER_STEP, memory_order_relaxed);

  __futex_wake_ex(&cond->state, cond->process_shared(), thread_count);
  return 0;
}
Ejemplo n.º 20
0
void* ponyint_mpmcq_pop(mpmcq_t* q)
{
  size_t my_ticket = atomic_fetch_add_explicit(&q->ticket, 1,
    memory_order_relaxed);

  while(my_ticket != atomic_load_explicit(&q->waiting_for,
    memory_order_relaxed))
    ponyint_cpu_relax();

  atomic_thread_fence(memory_order_acquire);

  mpmcq_node_t* tail = atomic_load_explicit(&q->tail, memory_order_relaxed);
  // Get the next node rather than the tail. The tail is either a stub or has
  // already been consumed.
  mpmcq_node_t* next = atomic_load_explicit(&tail->next, memory_order_relaxed);
  
  // Bailout if we have no next node.
  if(next == NULL)
  {
    atomic_store_explicit(&q->waiting_for, my_ticket + 1, memory_order_relaxed);
    return NULL;
  }

  atomic_store_explicit(&q->tail, next, memory_order_relaxed);
  atomic_store_explicit(&q->waiting_for, my_ticket + 1, memory_order_release);

  // Synchronise-with the push.
  atomic_thread_fence(memory_order_acquire);
  
  // We'll return the data pointer from the next node.
  void* data = atomic_load_explicit(&next->data, memory_order_relaxed);

  // Since we will be freeing the old tail, we need to be sure no other
  // consumer is still reading the old tail. To do this, we set the data
  // pointer of our new tail to NULL, and we wait until the data pointer of
  // the old tail is NULL.
  atomic_store_explicit(&next->data, NULL, memory_order_release);

  while(atomic_load_explicit(&tail->data, memory_order_relaxed) != NULL)
    ponyint_cpu_relax();

  atomic_thread_fence(memory_order_acquire);

  // Free the old tail. The new tail is the next node.
  POOL_FREE(mpmcq_node_t, tail);
  return data;
}
Ejemplo n.º 21
0
void cpu_interrupt_syscall_entry(struct registers *regs, int syscall_num)
{
	cpu_interrupt_set(0);
	ASSERT(!current_thread->regs);
	current_thread->regs = regs;
	atomic_fetch_add_explicit(&interrupt_counts[0x80], 1, memory_order_relaxed);
	if(syscall_num == 128) {
		arch_tm_userspace_signal_cleanup(regs);
	} else {
		current_thread->regs = regs;
		syscall_handler(regs);
	}
	cpu_interrupt_set(0);
	__setup_signal_handler(regs);
	current_thread->regs = 0;
	ASSERT((current_process == kernel_process) || current_thread->held_locks == 0);
}
Ejemplo n.º 22
0
void mrqd_rlock(void * lock) {
    MRQDLock *l = (MRQDLock*)lock;
    bool bRaised = false;
    int readPatience = 0;
 start:
    rgri_arrive(&l->readIndicator);
    if(tatas_is_locked(&l->mutexLock)) {
        rgri_depart(&l->readIndicator);
        while(tatas_is_locked(&l->mutexLock)) {
            thread_yield();
            if((readPatience == MRQD_READ_PATIENCE_LIMIT) && !bRaised) {
                atomic_fetch_add_explicit(&l->writeBarrier.value, 1, memory_order_seq_cst);
                bRaised = true;
            }
            readPatience = readPatience + 1;
        }
        goto start;
    }
    if(bRaised) {
        atomic_fetch_sub_explicit(&l->writeBarrier.value, 1, memory_order_seq_cst);
    }
}
Ejemplo n.º 23
0
/**
 * Update speed meter.
 * @param[in] speed
 * @param[in] count
 */
void spdm_update(struct speed_meter *speed, uint64_t count)
{
    uint64_t curr_time = zclock(false);
    uint64_t last_update = atomic_load_explicit(&speed->last_update, memory_order_acquire);

    if (curr_time - last_update >= SEC2USEC(1)) {
        if (atomic_compare_exchange_strong_explicit(&speed->last_update, &last_update, curr_time, memory_order_release,
                                                    memory_order_relaxed)) {
            size_t i = atomic_load_explicit(&speed->i, memory_order_acquire);
            uint64_t speed_aux = atomic_load_explicit(&speed->speed_aux, memory_order_acquire);
            atomic_store_explicit(&speed->backlog[i].speed, speed_aux, memory_order_release);
            atomic_fetch_sub_explicit(&speed->speed_aux, speed_aux, memory_order_release);
            atomic_store_explicit(&speed->backlog[i].timestamp, last_update, memory_order_release);
            i++;
            if (SPEED_METER_BACKLOG == i) {
                i = 0;
            }
            atomic_store_explicit(&speed->i, i, memory_order_release);
        }
    }

    atomic_fetch_add_explicit(&speed->speed_aux, count, memory_order_release);
}
Ejemplo n.º 24
0
void cpu_interrupt_isr_entry(struct registers *regs, int int_no, addr_t return_address)
{
	int already_in_kernel = 0;
	cpu_interrupt_set(0);
	atomic_fetch_add_explicit(&interrupt_counts[int_no], 1, memory_order_relaxed);
	if(current_thread && !current_thread->regs) {
		current_thread->regs = regs;
		current_thread->system = 255;
	}
	else
		already_in_kernel = 1;
	int started = timer_start(&interrupt_timers[int_no]);
	char called = 0;
	for(int i = 0; i < MAX_HANDLERS; i++)
	{
		if(interrupt_handlers[int_no][i].fn)
		{
			interrupt_handlers[int_no][i].fn(regs, int_no, 0);
			called = 1;
		}
	}
	if(started)
		timer_stop(&interrupt_timers[int_no]);
	cpu_interrupt_set(0);
	// If it went unhandled, kill the process or panic.
	if(!already_in_kernel) {
		current_thread->system = 0;
	}
	if(!called)
		faulted(int_no, !already_in_kernel, return_address, regs->err_code, regs);
	if(!already_in_kernel) {
		__setup_signal_handler(regs);
		current_thread->regs = 0;
		ASSERT(!current_thread || (current_process == kernel_process) || current_thread->held_locks == 0);
	}
}
Ejemplo n.º 25
0
static void a(void *obj)
{
	int i;
	for (i = 0; i < N; i++)
		atomic_fetch_add_explicit(&x, 1, memory_order_relaxed);
}
Ejemplo n.º 26
0
/**
 * Authenticate and set client info.
 * @param[in] sess Client session.
 * @return Zero on success (or one of *_RC).
 */
static int session_authenticate(struct zsession *sess)
{
    int ret = OTHER_RC;
    VALUE_PAIR *request_attrs = NULL, *response_attrs = NULL, *attrs = NULL;
    char msg[8192]; // WARNING: libfreeradius-client has unsafe working with this buffer.
    rc_handle *rh = zinst()->radh;
    struct in_addr ip_addr;
    char ip_str[INET_ADDRSTRLEN];
    struct zcrules rules;

    crules_init(&rules);

    ip_addr.s_addr = htonl(sess->ip);
    if (unlikely(NULL == inet_ntop(AF_INET, &ip_addr, ip_str, sizeof(ip_str)))) {
        goto end;
    }

    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_USER_NAME, ip_str, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_USER_PASSWORD, "", -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_NAS_IDENTIFIER, zcfg()->radius_nas_identifier, -1, 0))) {
        goto end;
    }
    if (unlikely(NULL == rc_avpair_add(rh, &request_attrs, PW_CALLING_STATION_ID, ip_str, -1, 0))) {
        goto end;
    }

    ret = rc_auth(rh, 0, request_attrs, &response_attrs, msg);
    if (OK_RC != ret) {
        ZERO_LOG(LOG_ERR, "Session authentication failed for %s (code:%d)", ip_str, ret);
        goto end;
    }

    attrs = response_attrs;
    while (likely(NULL != attrs)) {
        switch (attrs->attribute) {
            case PW_FILTER_ID:
                crules_parse(&rules, attrs->strvalue);
                break;
            case PW_SESSION_TIMEOUT:
                atomic_store_explicit(&sess->max_duration, SEC2USEC(attrs->lvalue), memory_order_release);
                break;
            case PW_ACCT_INTERIM_INTERVAL:
                atomic_store_explicit(&sess->acct_interval, SEC2USEC(attrs->lvalue), memory_order_release);
                break;
        }
        attrs = attrs->next;
    }

    if (likely(rules.have.user_id && rules.have.login)) {
        struct zclient *client = sess->client;

        client_db_find_or_set_id(zinst()->client_db, rules.user_id, &client);
        if (client != sess->client) {
            // found
            pthread_rwlock_wrlock(&sess->lock_client);
            atomic_fetch_add_explicit(&client->refcnt, 1, memory_order_relaxed);
            client_release(sess->client);
            sess->client = client;
            client_session_add(sess->client, sess);
            pthread_rwlock_unlock(&sess->lock_client);
        } else {
            client_apply_rules(sess->client, &rules);
        }

        atomic_fetch_sub_explicit(&zinst()->unauth_sessions_cnt, 1, memory_order_release);

        // log successful authentication
        {
            UT_string rules_str;
            utstring_init(&rules_str);
            utstring_reserve(&rules_str, 1024);

            attrs = response_attrs;
            while (likely(NULL != attrs)) {
                switch (attrs->attribute) {
                    case PW_FILTER_ID:
                        utstring_printf(&rules_str, " %s", attrs->strvalue);
                        break;
                    default:
                        break;
                }
                attrs = attrs->next;
            }

            zero_syslog(LOG_INFO, "Authenticated session %s (rules:%s)", ip_str, utstring_body(&rules_str));
            utstring_done(&rules_str);
        }
    } else {
        ret = OTHER_RC;
        ZERO_LOG(LOG_ERR, "Session authentication failed for %s (code:%d)", ip_str, ret);
    }

    end:
    crules_free(&rules);
    if (request_attrs) rc_avpair_free(request_attrs);
    if (response_attrs) rc_avpair_free(response_attrs);

    return ret;
}
Ejemplo n.º 27
0
static inline void write_unlock(rwlock_t *rw)
{
	atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
}
Ejemplo n.º 28
0
static void statsdNoteDrop(int error, int tag) {
    atomic_fetch_add_explicit(&dropped, 1, memory_order_relaxed);
    atomic_exchange_explicit(&log_error, error, memory_order_relaxed);
    atomic_exchange_explicit(&atom_tag, tag, memory_order_relaxed);
}
Ejemplo n.º 29
0
static int statsdWrite(struct timespec* ts, struct iovec* vec, size_t nr) {
    ssize_t ret;
    int sock;
    static const unsigned headerLength = 1;
    struct iovec newVec[nr + headerLength];
    android_log_header_t header;
    size_t i, payloadSize;

    sock = atomic_load(&statsdLoggerWrite.sock);
    if (sock < 0) switch (sock) {
            case -ENOTCONN:
            case -ECONNREFUSED:
            case -ENOENT:
                break;
            default:
                return -EBADF;
        }
    /*
     *  struct {
     *      // what we provide to socket
     *      android_log_header_t header;
     *      // caller provides
     *      union {
     *          struct {
     *              char     prio;
     *              char     payload[];
     *          } string;
     *          struct {
     *              uint32_t tag
     *              char     payload[];
     *          } binary;
     *      };
     *  };
     */

    header.tid = gettid();
    header.realtime.tv_sec = ts->tv_sec;
    header.realtime.tv_nsec = ts->tv_nsec;

    newVec[0].iov_base = (unsigned char*)&header;
    newVec[0].iov_len = sizeof(header);

    // If we dropped events before, try to tell statsd.
    if (sock >= 0) {
        int32_t snapshot = atomic_exchange_explicit(&dropped, 0, memory_order_relaxed);
        if (snapshot) {
            android_log_event_long_t buffer;
            header.id = LOG_ID_STATS;
            // store the last log error in the tag field. This tag field is not used by statsd.
            buffer.header.tag = htole32(atomic_load(&log_error));
            buffer.payload.type = EVENT_TYPE_LONG;
            // format:
            // |atom_tag|dropped_count|
            int64_t composed_long = atomic_load(&atom_tag);
            // Send 2 int32's via an int64.
            composed_long = ((composed_long << 32) | ((int64_t)snapshot));
            buffer.payload.data = htole64(composed_long);

            newVec[headerLength].iov_base = &buffer;
            newVec[headerLength].iov_len = sizeof(buffer);

            ret = TEMP_FAILURE_RETRY(writev(sock, newVec, 2));
            if (ret != (ssize_t)(sizeof(header) + sizeof(buffer))) {
                atomic_fetch_add_explicit(&dropped, snapshot, memory_order_relaxed);
            }
        }
    }

    header.id = LOG_ID_STATS;

    for (payloadSize = 0, i = headerLength; i < nr + headerLength; i++) {
        newVec[i].iov_base = vec[i - headerLength].iov_base;
        payloadSize += newVec[i].iov_len = vec[i - headerLength].iov_len;

        if (payloadSize > LOGGER_ENTRY_MAX_PAYLOAD) {
            newVec[i].iov_len -= payloadSize - LOGGER_ENTRY_MAX_PAYLOAD;
            if (newVec[i].iov_len) {
                ++i;
            }
            break;
        }
    }

    /*
     * The write below could be lost, but will never block.
     *
     * ENOTCONN occurs if statsd has died.
     * ENOENT occurs if statsd is not running and socket is missing.
     * ECONNREFUSED occurs if we can not reconnect to statsd.
     * EAGAIN occurs if statsd is overloaded.
     */
    if (sock < 0) {
        ret = sock;
    } else {
        ret = TEMP_FAILURE_RETRY(writev(sock, newVec, i));
        if (ret < 0) {
            ret = -errno;
        }
    }
    switch (ret) {
        case -ENOTCONN:
        case -ECONNREFUSED:
        case -ENOENT:
            if (statd_writer_trylock()) {
                return ret; /* in a signal handler? try again when less stressed
                             */
            }
            __statsdClose(ret);
            ret = statsdOpen();
            statsd_writer_init_unlock();

            if (ret < 0) {
                return ret;
            }

            ret = TEMP_FAILURE_RETRY(writev(atomic_load(&statsdLoggerWrite.sock), newVec, i));
            if (ret < 0) {
                ret = -errno;
            }
        /* FALLTHRU */
        default:
            break;
    }

    if (ret > (ssize_t)sizeof(header)) {
        ret -= sizeof(header);
    }

    return ret;
}
Ejemplo n.º 30
0
// 1. Figure out actor index, create one if it does not exist
// 2. check info for evnum/evterm data
int sqlite3WalOpen(sqlite3_vfs *pVfs, sqlite3_file *pDbFd, const char *zWalName,int bNoShm, i64 mxWalSize, Wal **ppWal)
{
	MDB_val key, data;
	int rc;

	#if ATOMIC
	db_thread *thr 		= g_tsd_thread;
	db_connection *conn = g_tsd_conn;
	#else
	db_thread* thr 	  = enif_tsd_get(g_tsd_thread);
	db_connection* conn = enif_tsd_get(g_tsd_conn);
	#endif
	mdbinf * const mdb 	= &thr->mdb;
	Wal *pWal = &conn->wal;
	MDB_dbi actorsdb, infodb;
	MDB_txn *txn = mdb->txn;
	int offset = 0, cutoff = 0, nmLen = 0;

	actorsdb = mdb->actorsdb;
	infodb = mdb->infodb;

	if (zWalName[0] == '/')
		offset = 1;
	nmLen = strlen(zWalName+offset);
	if (zWalName[offset+nmLen-1] == 'l' && zWalName[offset+nmLen-2] == 'a' && 
		zWalName[offset+nmLen-3] == 'w' && zWalName[offset+nmLen-4] == '-')
		cutoff = 4;

	DBG("Wal name=%s %lld",zWalName,(i64)txn);

	// shorten size to ignore "-wal" at the end
	key.mv_size = nmLen-cutoff;
	key.mv_data = (void*)(zWalName+offset);//thr->curConn->dbpath;
	rc = mdb_get(txn,actorsdb,&key,&data);

	// This is new actor, assign an index
	if (rc == MDB_NOTFOUND)
	{
		i64 index = 0;
		// MDB_val key1 = {1,(void*)"?"};
		#ifndef _TESTAPP_
		qitem *item;
		db_command *cmd;

		item = command_create(conn->wthreadind,-1,g_pd);
		cmd = (db_command*)item->cmd;
		cmd->type = cmd_actorsdb_add;

		#if ATOMIC
		index = atomic_fetch_add_explicit(&g_pd->actorIndexes[thr->nEnv], 1, memory_order_relaxed);
		#else
		enif_mutex_lock(g_pd->actorIndexesMtx[thr->nEnv]);
		index = g_pd->actorIndexes[thr->nEnv]++;
		enif_mutex_unlock(g_pd->actorIndexesMtx[thr->nEnv]);
		#endif
		pWal->index = index;

		cmd->arg = enif_make_string(item->env,zWalName,ERL_NIF_LATIN1);
		cmd->arg1 = enif_make_uint64(item->env, index);
		push_command(conn->wthreadind, -1, g_pd, item);
		
		#else
		if (thr->isreadonly)
		{
			return SQLITE_ERROR;
		}
		else
		{
			#if ATOMIC
			char filename[MAX_PATHNAME];
			sprintf(filename,"%.*s",(int)(nmLen-cutoff),zWalName+offset);
			index = atomic_fetch_add_explicit(&g_pd->actorIndexes[thr->nEnv], 1, memory_order_relaxed);
			pWal->index = index;
			if (register_actor(index, filename) != SQLITE_OK)
				return SQLITE_ERROR;
			#else
			return SQLITE_ERROR;
			#endif
		}
		#endif
	}
	// Actor exists, read evnum/evterm info
	else if (rc == MDB_SUCCESS)
	{
		// data contains index
		key = data;
		pWal->index = *(i64*)data.mv_data;
		DBG("Actor at index %lld",pWal->index);
		rc = mdb_get(txn,infodb,&key,&data);

		if (rc == MDB_SUCCESS)
		{
			if (*(u8*)data.mv_data != 1)
			{
				return SQLITE_ERROR;
			}
			memcpy(&pWal->firstCompleteTerm, ((u8*)data.mv_data)+1, sizeof(u64));
			memcpy(&pWal->firstCompleteEvnum,((u8*)data.mv_data)+1+sizeof(u64), sizeof(u64));
			memcpy(&pWal->lastCompleteTerm,  ((u8*)data.mv_data)+1+sizeof(u64)*2, sizeof(u64));
			memcpy(&pWal->lastCompleteEvnum, ((u8*)data.mv_data)+1+sizeof(u64)*3, sizeof(u64));
			memcpy(&pWal->inProgressTerm,    ((u8*)data.mv_data)+1+sizeof(u64)*4, sizeof(u64));
			memcpy(&pWal->inProgressEvnum,   ((u8*)data.mv_data)+1+sizeof(u64)*5, sizeof(u64));
			memcpy(&pWal->mxPage,   ((u8*)data.mv_data)+1+sizeof(u64)*6,            sizeof(u32));
			memcpy(&pWal->allPages, ((u8*)data.mv_data)+1+sizeof(u64)*6+sizeof(u32),sizeof(u32));
			pWal->readSafeTerm = pWal->lastCompleteTerm;
			pWal->readSafeEvnum = pWal->lastCompleteEvnum;
			pWal->readSafeMxPage = pWal->mxPage;

			// if (pWal->inProgressTerm != 0)
			// {
			// 	doundo(pWal,NULL,NULL,1);
			// }
		}
		else if (rc == MDB_NOTFOUND)
		{
			// DBG("Info for actor not found"));
		}
	}
	else
	{
		DBG("Error open=%d",rc);
		thr->forceCommit = 2;
		return SQLITE_ERROR;
	}

	conn->changed = 1;
	if (ppWal != NULL)
		(*ppWal) = pWal;
	return SQLITE_OK;
}