KOKKOS_INLINE_FUNCTION
  void operator()( size_type i ) const
  {
    const size_type invalid_index = map_type::invalid_index;

    uint32_t length = 0;
    size_type min_index = ~0u, max_index = 0;
    for (size_type curr = m_map.m_hash_lists(i); curr != invalid_index; curr = m_map.m_next_index[curr]) {
      ++length;
      min_index = (curr < min_index) ? curr : min_index;
      max_index = (max_index < curr) ? curr : max_index;
    }

    size_type distance = (0u < length) ? max_index - min_index : 0u;
    size_type blocks = (0u < length) ? max_index/32u - min_index/32u : 0u;

    // normalize data
    length   = length   < 100u ? length   : 99u;
    distance = distance < 100u ? distance : 99u;
    blocks   = blocks   < 100u ? blocks   : 99u;

    if (0u < length)
    {
      atomic_fetch_add( &m_length(length), 1);
      atomic_fetch_add( &m_distance(distance), 1);
      atomic_fetch_add( &m_block_distance(blocks), 1);
    }
  }
Exemple #2
0
void
test_add ()
{
  v = 0;
  count = 1;

  atomic_fetch_add (&v, count);
  if (v != 1)
    abort ();

  atomic_fetch_add_explicit (&v, count, memory_order_consume);
  if (v != 2)
    abort ();

  atomic_fetch_add (&v, 1);
  if (v != 3)
    abort ();

  atomic_fetch_add_explicit (&v, 1, memory_order_release);
  if (v != 4)
    abort ();

  atomic_fetch_add (&v, 1);
  if (v != 5)
    abort ();

  atomic_fetch_add_explicit (&v, count, memory_order_seq_cst);
  if (v != 6)
    abort ();
}
/*******************************************************************************
 * Run tests
 ******************************************************************************/
int main(int argc, char **argv)
{
	char *test_name   = "---gnu_gcc_atomics---";
	char *csv_header  = ":functionName: runtime(ns)";
	char *csv_path	  = "./builtin_atomic_tests.csv"; // Touch to create
	ct_info_t *info;
	uint64_t iters, start_time, stop_time; // ~4*10^6 iterations

	info = ct_init_info(test_name, csv_path);

	if (argc != 2) {
		iters = 1 << 22;
	} else {
		iters = atoi(argv[1]);
	}

	ct_add_line(info, "IterationsOfEachFunction:%lu\n", iters);
	ct_add_line(info, "%-s\n", csv_header, FIELD_WIDTH);

	RUN_TIMED_TEST("C_postfix_increment", POST_FIX_INCREMENT, iters);
	RUN_TIMED_TEST("atomic_init", INIT, iters);
	RUN_TIMED_TEST("atomic_thread_fence", THREAD_FENCE, iters);
	RUN_TIMED_TEST("atomic_signal_fence", SIGNAL_FENCE, iters);
	RUN_TIMED_TEST("atomic_is_lock_free", IS_LOCK_FREE, iters);
	RUN_TIMED_TEST("atomic_store_explicit", STORE_EXPLICIT, iters);
	RUN_TIMED_TEST("atomic_store", STORE, iters);
	RUN_TIMED_TEST("atomic_load_explicit", LOAD_EXPLICIT, iters);
	RUN_TIMED_TEST("atomic_load", LOAD, iters);
	RUN_TIMED_TEST("atomic_exchange_explicit", EXCHANGE_EXPLICIT, iters);
	RUN_TIMED_TEST("atomic_exchange", EXCHANGE, iters);
	/* RUN_TIMED_TEST("atomic_compare_exchange_strong_explicit", */
	/* 	       COMPARE_EXCHANGE_STRONG_EXPLICIT, iters); */
	/* RUN_TIMED_TEST("atomic_compare_exchange_strong", */
	/* 	       COMPARE_EXCHANGE_STRONG, iters); */
	/* RUN_TIMED_TEST("atomic_compare_exchange_weak_explicit", */
	/* 	       COMPARE_EXCHANGE_WEAK_EXPLICIT, iters); */
	/* RUN_TIMED_TEST("atomic_compare_exchange_weak", COMPARE_EXCHANGE_WEAK, */
	/* 	       iters); */
	RUN_TIMED_TEST("atomic_fetch_add", FETCH_ADD, iters);
	RUN_TIMED_TEST("atomic_fetch_add_explicit", FETCH_ADD_EXPLICIT, iters);
	RUN_TIMED_TEST("atomic_fetch_sub", FETCH_SUB, iters);
	RUN_TIMED_TEST("atomic_fetch_sub_explicit", FETCH_SUB_EXPLICIT, iters);
	RUN_TIMED_TEST("atomic_fetch_or", FETCH_OR, iters);
	RUN_TIMED_TEST("atomic_fetch_or_explicit", FETCH_OR_EXPLICIT, iters);
	RUN_TIMED_TEST("atomic_fetch_xor", FETCH_XOR, iters);
	RUN_TIMED_TEST("atomic_fetch_xor_explicit", FETCH_XOR_EXPLICIT, iters);
	RUN_TIMED_TEST("atomic_fetch_and", FETCH_AND, iters);
	RUN_TIMED_TEST("atomic_fetch_and_explicit", FETCH_AND_EXPLICIT, iters);
	RUN_TIMED_TEST("atomic_flag_test_and_set", FLAG_TEST_AND_SET, iters);
	RUN_TIMED_TEST("atomic_flag_test_and_set_explicit",
		       FLAG_TEST_AND_SET_EXPLICIT, iters);
	RUN_TIMED_TEST("atomic_flag_clear", FLAG_CLEAR, iters);
	RUN_TIMED_TEST("atomic_flag_clear_explicit", FLAG_CLEAR_EXPLICIT, iters);

	atomic_init(&dummy_atomic_var, atomic_fetch_add(&atomic_var, 0));
	printf("dummy_atomic_var = %d\n", atomic_fetch_add(&dummy_atomic_var, 0));

	ct_fini_info(info);
}
Exemple #4
0
Fichier : dec.c Projet : etix/vlc
/*****************************************************************************
 * aout_DecPlay : filter & mix the decoded buffer
 *****************************************************************************/
int aout_DecPlay (audio_output_t *aout, block_t *block, int input_rate)
{
    aout_owner_t *owner = aout_owner (aout);

    assert (input_rate >= INPUT_RATE_DEFAULT / AOUT_MAX_INPUT_RATE);
    assert (input_rate <= INPUT_RATE_DEFAULT * AOUT_MAX_INPUT_RATE);
    assert (block->i_pts >= VLC_TS_0);

    block->i_length = CLOCK_FREQ * block->i_nb_samples
                                 / owner->input_format.i_rate;

    aout_OutputLock (aout);
    int ret = aout_CheckReady (aout);
    if (unlikely(ret == AOUT_DEC_FAILED))
        goto drop; /* Pipeline is unrecoverably broken :-( */

    const mtime_t now = mdate (), advance = block->i_pts - now;
    if (advance < -AOUT_MAX_PTS_DELAY)
    {   /* Late buffer can be caused by bugs in the decoder, by scheduling
         * latency spikes (excessive load, SIGSTOP, etc.) or if buffering is
         * insufficient. We assume the PTS is wrong and play the buffer anyway:
         * Hopefully video has encountered a similar PTS problem as audio. */
        msg_Warn (aout, "buffer too late (%"PRId64" us): dropped", advance);
        goto drop;
    }
    if (advance > AOUT_MAX_ADVANCE_TIME)
    {   /* Early buffers can only be caused by bugs in the decoder. */
        msg_Err (aout, "buffer too early (%"PRId64" us): dropped", advance);
        goto drop;
    }
    if (block->i_flags & BLOCK_FLAG_DISCONTINUITY)
        owner->sync.discontinuity = true;

    block = aout_FiltersPlay (owner->filters, block, input_rate);
    if (block == NULL)
        goto lost;

    /* Software volume */
    aout_volume_Amplify (owner->volume, block);

    /* Drift correction */
    aout_DecSynchronize (aout, block->i_pts, input_rate);

    /* Output */
    owner->sync.end = block->i_pts + block->i_length + 1;
    owner->sync.discontinuity = false;
    aout_OutputPlay (aout, block);
    atomic_fetch_add(&owner->buffers_played, 1);
out:
    aout_OutputUnlock (aout);
    return ret;
drop:
    owner->sync.discontinuity = true;
    block_Release (block);
lost:
    atomic_fetch_add(&owner->buffers_lost, 1);
    goto out;
}
Exemple #5
0
// Reserves a free slot in ReserveBMap
qitem* queue_get_item(queue *q)
{
	int i;
	qitem *buf        = (qitem*)(q->buf + q->mapbytes*2);
	atomic_llong *map = (atomic_llong*)q->buf;
	int zbit;
	qitem *r;

	while(1)
	{
		// Reserve space
		for (i = 0; i < q->map_elements; i++)
		{
			long long int mval = atomic_load(&map[i]);
			long long int nval;

			// if no zero bit go to next element
			if (!(zbit = ffz(mval)))
			{
				continue;
			}

			nval = mval | (((long long int)1) << (--zbit));

			// update map val with bit set. 
			// If successful we are done.
			if (atomic_compare_exchange_strong(&map[i], &mval, nval))
			{
				// printf("ZBIT %d %lld %lld\n",zbit,mval, sizeof(mval));
				atomic_fetch_add(&q->size, 1);
				break;
			}
			// Unable to exchange, go again for same index. 
			else
			{
				atomic_fetch_add(&q->ct, 1);
				i--;
			}
		}

		if (i < q->map_elements)
		{
			r = &buf[i*64+zbit];
			break;
		}
		else
		{
			usleep(100);
		}
	}
	return r;
}
static void *Worker( void *arg ) {
    TYPE id = (size_t)arg;
	uint64_t entry;
#ifdef FAST
	unsigned int cnt = 0, oid = id;
#endif // FAST

    for ( int r = 0; r < RUNS; r += 1 ) {
        entry = 0;
        while ( atomic_load(&stop) == 0 ) {
            atomic_store(&states[id*PADRATIO], LOCKED);
            while (1) {
                int lturn = atomic_load(&turn);
                if (!validate_left(id, lturn)) {
                    atomic_store(&states[id*PADRATIO], WAITING);
                    while (1) {
                        if (validate_left(id, lturn) && lturn == atomic_load_explicit(&turn, memory_order_acquire)) break;
                        Pause();
                        lturn = atomic_load_explicit(&turn, memory_order_acquire);
                    }
                    atomic_store(&states[id*PADRATIO], LOCKED);
                    continue;
                }
                while (lturn == atomic_load_explicit(&turn, memory_order_acquire)) {
                    if (validate_right(id, lturn)) break;
                    Pause();
                }
                if (lturn == atomic_load_explicit(&turn, memory_order_acquire)) break;
            }
			CriticalSection( id );						// critical section
			int lturn = (atomic_load_explicit(&turn, memory_order_relaxed)+1) % N;
			atomic_store_explicit(&turn, lturn, memory_order_relaxed);
			atomic_store_explicit(&states[id*PADRATIO], UNLOCKED, memory_order_release); // exit protocol
#ifdef FAST
			id = startpoint( cnt );						// different starting point each experiment
			cnt = cycleUp( cnt, NoStartPoints );
#endif // FAST
			entry += 1;
		} // while
#ifdef FAST
		id = oid;
#endif // FAST
		entries[r][id] = entry;
        atomic_fetch_add( &Arrived, 1 );
        while ( atomic_load(&stop) != 0 ) Pause();
        atomic_fetch_add( &Arrived, -1 );
	} // for
	return NULL;
} // Worker
Exemple #7
0
static void ffmmal_stop_decoder(AVCodecContext *avctx)
{
    MMALDecodeContext *ctx = avctx->priv_data;
    MMAL_COMPONENT_T *decoder = ctx->decoder;
    MMAL_BUFFER_HEADER_T *buffer;

    mmal_port_disable(decoder->input[0]);
    mmal_port_disable(decoder->output[0]);
    mmal_port_disable(decoder->control);

    mmal_port_flush(decoder->input[0]);
    mmal_port_flush(decoder->output[0]);
    mmal_port_flush(decoder->control);

    while ((buffer = mmal_queue_get(ctx->queue_decoded_frames)))
        mmal_buffer_header_release(buffer);

    while (ctx->waiting_buffers) {
        FFBufferEntry *buffer = ctx->waiting_buffers;

        ctx->waiting_buffers = buffer->next;

        if (buffer->flags & MMAL_BUFFER_HEADER_FLAG_FRAME_END)
            atomic_fetch_add(&ctx->packets_buffered, -1);

        av_buffer_unref(&buffer->ref);
        av_free(buffer);
    }
    ctx->waiting_buffers_tail = NULL;

    av_assert0(atomic_load(&ctx->packets_buffered) == 0);

    ctx->frames_output = ctx->eos_received = ctx->eos_sent = ctx->packets_sent = ctx->extradata_sent = 0;
}
Exemple #8
0
void mm_physical_increment_count(addr_t page)
{
	if (!frames)
		return;
	if ((page / PAGE_SIZE) < maximum_page_number)
		return atomic_fetch_add(&frames[page / PAGE_SIZE].count, 1);
}
Exemple #9
0
ring_buffer_size_t PaUtil_AdvanceRingBufferReadIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount )
{
    /* ensure that previous reads (copies out of the ring buffer) are always completed before updating (writing) the read index.
       (write-after-read) => full barrier
    */
    return (atomic_fetch_add(&rbuf->readIndex,elementCount) +elementCount) & rbuf->bigMask;
}
Exemple #10
0
Fichier : rat.c Projet : esoma/rat
RatResult
rat_pool_put(RatPool* pool, RatFunction function, void* arg)
{
    RAT_ASSERT(pool);
    RAT_ASSERT(function);
    RatResult result;
    // acquire the pool lock
    result = rat_lock_acquire(pool->lock);
    RAT_ASSERT(result == RAT_RESULT_SUCCESS);
    // put the work in the pool list
    atomic_fetch_add(&pool->total_work, 1);
    RatResult put_result = rat_work_put(
        &pool->work,
        &pool->free_work,
        function,
        arg
    );
    // wake the pool thread
    if (put_result == RAT_RESULT_SUCCESS)
    {
        result = rat_condition_signal(pool->wake_condition);
        RAT_ASSERT(result == RAT_RESULT_SUCCESS);
    }
    // release the pool lock
    result = rat_lock_release(pool->lock);
    RAT_ASSERT(result == RAT_RESULT_SUCCESS);
    return put_result;
}
Exemple #11
0
input_item_t *input_item_Hold( input_item_t *p_item )
{
    input_item_owner_t *owner = item_owner(p_item);

    atomic_fetch_add( &owner->refs, (atomic_uint)1 );			// sunqueen modify
    return p_item;
}
Exemple #12
0
ring_buffer_size_t PaUtil_AdvanceRingBufferWriteIndex( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount )
{
    /* ensure that previous writes are seen before we update the write index
       (write after write)
    */
    return (atomic_fetch_add ( &rbuf->writeIndex,elementCount) + elementCount) & rbuf->bigMask;;
}
Exemple #13
0
struct udev *
_udev_ref(struct udev *udev)
{

	atomic_fetch_add(&udev->refcount, 1);
	return udev;
}
Exemple #14
0
// Sets UsedBMap to signal consumer it can be used.
int queue_push(queue *q, qitem* item)
{
	qitem *buf        = (qitem*)(q->buf + q->mapbytes*2);
	atomic_llong *map = (atomic_llong*)(q->buf + q->mapbytes);
	const int diff    = (item - buf);
	const int i       = diff / 64;
	const int zbit    = diff % 64;

	while (1)
	{
		long long int mval = atomic_load(&map[i]);
		long long int nval;

		nval = mval | (((long long int)1) << zbit);

		if (atomic_compare_exchange_strong(&map[i], &mval, nval))
		{
			// printf("PUSHING ON POS i=%d zbit=%d diff=%d rdiff=%lld\n",i, zbit, diff, item-buf);
			break;
		}
		else
			atomic_fetch_add(&q->ct, 1);

		usleep(DELAY_BY);
	}
	return 1;
}
Exemple #15
0
void
test_fetch_add ()
{
  v = 0;
  count = 1;

  if (atomic_fetch_add_explicit (&v, count, memory_order_relaxed) != 0)
    abort ();

  if (atomic_fetch_add_explicit (&v, 1, memory_order_consume) != 1)
    abort ();

  if (atomic_fetch_add_explicit (&v, count, memory_order_acquire) != 2)
    abort ();

  if (atomic_fetch_add_explicit (&v, 1, memory_order_release) != 3)
    abort ();

  if (atomic_fetch_add_explicit (&v, count, memory_order_acq_rel) != 4)
    abort ();

  if (atomic_fetch_add_explicit (&v, 1, memory_order_seq_cst) != 5)
    abort ();

  if (atomic_fetch_add (&v, 1) != 6)
    abort ();
}
Exemple #16
0
void cpu_interrupt_irq_entry(struct registers *regs, int int_no)
{
	cpu_interrupt_set(0);
	atomic_fetch_add_explicit(&interrupt_counts[int_no], 1, memory_order_relaxed);
	int already_in_kernel = 0;
	if(!current_thread->regs)
		current_thread->regs = regs;
	else
		already_in_kernel = 1;
	atomic_fetch_add(&current_thread->interrupt_level, 1);
	/* Now, run through the stage1 handlers, and see if we need any
	 * stage2 handlers to run later. */
	int s1started = timer_start(&interrupt_timers[int_no]);
	for(int i = 0; i < MAX_HANDLERS; i++)
	{
		if(interrupt_handlers[int_no][i].fn)
			(interrupt_handlers[int_no][i].fn)(regs, int_no, 0);
	}
	if(s1started)
		timer_stop(&interrupt_timers[int_no]);
	cpu_interrupt_set(0);
	atomic_fetch_sub(&current_thread->interrupt_level, 1);
	if(!already_in_kernel) {
		__setup_signal_handler(regs);
		current_thread->regs = 0;
		ASSERT(!current_thread || !current_process || (current_process == kernel_process) || current_thread->held_locks == 0 || (current_thread->flags & THREAD_KERNEL));
	}
}
Exemple #17
0
/* Called from render callbacks. No lock, wait, and IO here */
void
ca_Render(audio_output_t *p_aout, uint8_t *p_output, size_t i_requested)
{
    struct aout_sys_common *p_sys = (struct aout_sys_common *) p_aout->sys;

    if (atomic_load_explicit(&p_sys->b_paused, memory_order_relaxed))
    {
        memset(p_output, 0, i_requested);
        return;
    }

    /* Pull audio from buffer */
    int32_t i_available;
    void *p_data = TPCircularBufferTail(&p_sys->circular_buffer,
                                        &i_available);
    if (i_available < 0)
        i_available = 0;

    size_t i_tocopy = __MIN(i_requested, (size_t) i_available);

    if (i_tocopy > 0)
    {
        memcpy(p_output, p_data, i_tocopy);
        TPCircularBufferConsume(&p_sys->circular_buffer, i_tocopy);
    }

    /* Pad with 0 */
    if (i_requested > i_tocopy)
    {
        atomic_fetch_add(&p_sys->i_underrun_size, i_requested - i_tocopy);
        memset(&p_output[i_tocopy], 0, i_requested - i_tocopy);
    }
}
Exemple #18
0
picture_t *picture_pool_Wait(picture_pool_t *pool)
{
    unsigned i;

    vlc_mutex_lock(&pool->lock);
    assert(pool->refs > 0);

    while (pool->available == 0)
        vlc_cond_wait(&pool->wait, &pool->lock);

    i = ffsll(pool->available);
    assert(i > 0);
    pool->available &= ~(1ULL << (i - 1));
    vlc_mutex_unlock(&pool->lock);

    picture_t *picture = pool->picture[i - 1];

    if (pool->pic_lock != NULL && pool->pic_lock(picture) != 0) {
        vlc_mutex_lock(&pool->lock);
        pool->available |= 1ULL << (i - 1);
        vlc_cond_signal(&pool->wait);
        vlc_mutex_unlock(&pool->lock);
        return NULL;
    }

    picture_t *clone = picture_pool_ClonePicture(pool, i - 1);
    if (clone != NULL) {
        assert(clone->p_next == NULL);
        atomic_fetch_add(&pool->refs, 1);
    }
    return clone;
}
Exemple #19
0
void net_notify_packet_ready(struct net_dev *nd)
{
	atomic_fetch_add(&nd->rx_pending, 1);
	if(nd->callbacks->poll)
		tm_thread_resume(nd->rec_thread.thread);
	/* TODO: notify CPU to schedule this process NOW */
}
Exemple #20
0
void picture_pool_Delete(picture_pool_t *pool)
{
    for (int i = 0; i < pool->picture_count; i++) {
        picture_t *picture = pool->picture[i];
        if (pool->master) {
            for (int j = 0; j < pool->master->picture_count; j++) {
                if (pool->master->picture[j] == picture)
                    pool->master->picture_reserved[j] = false;
            }
        } else {
            picture_gc_sys_t *gc_sys = picture->gc.p_sys;

            assert(!pool->picture_reserved[i]);

            /* Restore the original garbage collector */
            if (atomic_fetch_add(&picture->gc.refcount, 1) == 0)
            {   /* Simple case: the picture is not locked, destroy it now. */
                picture->gc.pf_destroy = gc_sys->destroy;
                picture->gc.p_sys      = gc_sys->destroy_sys;
                free(gc_sys);
            }
            else /* Intricate case: the picture is still locked and the gc
                    cannot be modified (w/o memory synchronization). */
                atomic_store(&gc_sys->zombie, true);

            picture_Release(picture);
        }
    }
    free(pool->picture_reserved);
    free(pool->picture);
    free(pool);
}
Exemple #21
0
picture_t *picture_pool_Get(picture_pool_t *pool)
{
    vlc_mutex_lock(&pool->lock);
    assert(pool->refs > 0);

    for (unsigned i = ffsll(pool->available); i; i = fnsll(pool->available, i))
    {
        pool->available &= ~(1ULL << (i - 1));
        vlc_mutex_unlock(&pool->lock);

        picture_t *picture = pool->picture[i - 1];

        if (pool->pic_lock != NULL && pool->pic_lock(picture) != 0) {
            vlc_mutex_lock(&pool->lock);
            pool->available |= 1ULL << (i - 1);
            continue;
        }

        picture_t *clone = picture_pool_ClonePicture(pool, i - 1);
        if (clone != NULL) {
            assert(clone->p_next == NULL);
            atomic_fetch_add(&pool->refs, 1);
        }
        return clone;
    }

    vlc_mutex_unlock(&pool->lock);
    return NULL;
}
Exemple #22
0
/**
 * Create a stream
 *
 * Allocate and initialize memory for a stream.
 *
 * @return pointer to the created stream
 */
lpel_stream_t *LpelStreamCreate(int size)
{
  assert( size >= 0);
  if (0==size) size = STREAM_BUFFER_SIZE;

  lpel_stream_t *s;
  s = LpelWorkerGetStream();		// try to get from the free list first
  if (s == NULL) {
  	s = (lpel_stream_t *) malloc( sizeof(lpel_stream_t) );		// allocate if fail
  	LpelBufferInit( &s->buffer, size);
  }

  assert(LpelBufferIsEmpty(&s->buffer));

  s->uid = atomic_fetch_add( &stream_seq, 1);
  PRODLOCK_INIT( &s->prod_lock );
  atomic_init( &s->n_sem, 0);
  atomic_init( &s->e_sem, size);
  s->is_poll = 0;
  s->prod_sd = NULL;
  s->cons_sd = NULL;
  s->usr_data = NULL;
  s->type = LPEL_STREAM_MIDDLE;
  s->next = NULL;
  s->read_cnt = 0;
  s->write_cnt = 0;
  return s;
}
Exemple #23
0
/**
 * Blocking, consuming read from a stream
 *
 * If the stream is empty, the task is suspended until
 * a producer writes an item to the stream.
 *
 * @param sd  stream descriptor
 * @return    the next item of the stream
 * @pre       current task is single reader
 */
void *LpelStreamRead( lpel_stream_desc_t *sd)
{
  void *item;
  lpel_task_t *self = sd->task;

  assert( sd->mode == 'r');

  /* MONITORING CALLBACK */
#ifdef USE_TASK_EVENT_LOGGING
  if (sd->mon && MON_CB(stream_readprepare)) {
    MON_CB(stream_readprepare)(sd->mon);
  }
#endif

  /* quasi P(n_sem) */
  if ( atomic_fetch_sub( &sd->stream->n_sem, 1) == 0) {

#ifdef USE_TASK_EVENT_LOGGING
    /* MONITORING CALLBACK */
    if (sd->mon && MON_CB(stream_blockon)) {
      MON_CB(stream_blockon)(sd->mon);
    }
#endif

    /* wait on stream: */
    LpelTaskBlockStream( self);
  }


  /* read the top element */
  item = LpelBufferTop( &sd->stream->buffer);
  assert( item != NULL);
  /* pop off the top element */
  LpelBufferPop( &sd->stream->buffer);


  /* quasi V(e_sem) */
  if ( atomic_fetch_add( &sd->stream->e_sem, 1) < 0) {
    /* e_sem was -1 */
    lpel_task_t *prod = sd->stream->prod_sd->task;
    /* wakeup producer: make ready */
    LpelTaskUnblock( self, prod);

    /* MONITORING CALLBACK */
#ifdef USE_TASK_EVENT_LOGGING
    if (sd->mon && MON_CB(stream_wakeup)) {
      MON_CB(stream_wakeup)(sd->mon);
    }
#endif

  }

  /* MONITORING CALLBACK */
#ifdef USE_TASK_EVENT_LOGGING
  if (sd->mon && MON_CB(stream_readfinish)) {
    MON_CB(stream_readfinish)(sd->mon, item);
  }
#endif
  return item;
}
Exemple #24
0
  void report(std::thread::id tid, const char* format, ...) {
    Report report;
    report.time = chrono::duration_cast<chrono::duration<double>>(
        chrono::high_resolution_clock::now() - logStartTime).count();

    if (queueThreadMap.find(tid) == queueThreadMap.end()) {

      // this makes n and queueThreadMap thread-safe
      //
      queueThreadMapMutex.lock();
      queueThreadMap[tid] = atomic_fetch_add(&n, 1);
      queueThreadMapMutex.unlock();

      assert(n < NUMBER_OF_QUEUES);
    }

    int q = queueThreadMap[tid];

    // XXX bug here! user can give long strings that are not handled!
    char buffer[256];
    va_list args;
    va_start(args, format);
    vsprintf(buffer, format, args);
    // perror(buffer);
    va_end(args);

    sprintf(report.text, "%.8lf|%d|%s", report.time, q, buffer);
    report.text[REPORT_LENGTH - 1] = '\0';
    queue[q].push(report);
  }
Exemple #25
0
input_item_t *input_item_Hold( input_item_t *p_item )
{
    input_item_owner_t *owner = item_owner(p_item);

    atomic_fetch_add( &owner->refs, 1 );
    return p_item;
}
Exemple #26
0
/*
 *  This function just provide calculated blocktime per cpu and trace it.
 *  Total blocktime is calculated in mark_postcopy_blocktime_end.
 *
 *
 * Assume we have 3 CPU
 *
 *      S1        E1           S1               E1
 * -----***********------------xxx***************------------------------> CPU1
 *
 *             S2                E2
 * ------------****************xxx---------------------------------------> CPU2
 *
 *                         S3            E3
 * ------------------------****xxx********-------------------------------> CPU3
 *
 * We have sequence S1,S2,E1,S3,S1,E2,E3,E1
 * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3
 * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 -
 *            it's a part of total blocktime.
 * S1 - here is last_begin
 * Legend of the picture is following:
 *              * - means blocktime per vCPU
 *              x - means overlapped blocktime (total blocktime)
 *
 * @addr: host virtual address
 */
static void mark_postcopy_blocktime_end(uintptr_t addr)
{
    MigrationIncomingState *mis = migration_incoming_get_current();
    PostcopyBlocktimeContext *dc = mis->blocktime_ctx;
    int i, affected_cpu = 0;
    bool vcpu_total_blocktime = false;
    uint32_t read_vcpu_time, low_time_offset;

    if (!dc) {
        return;
    }

    low_time_offset = get_low_time_offset(dc);
    /* lookup cpu, to clear it,
     * that algorithm looks straighforward, but it's not
     * optimal, more optimal algorithm is keeping tree or hash
     * where key is address value is a list of  */
    for (i = 0; i < smp_cpus; i++) {
        uint32_t vcpu_blocktime = 0;

        read_vcpu_time = atomic_fetch_add(&dc->page_fault_vcpu_time[i], 0);
        if (atomic_fetch_add(&dc->vcpu_addr[i], 0) != addr ||
            read_vcpu_time == 0) {
            continue;
        }
        atomic_xchg(&dc->vcpu_addr[i], 0);
        vcpu_blocktime = low_time_offset - read_vcpu_time;
        affected_cpu += 1;
        /* we need to know is that mark_postcopy_end was due to
         * faulted page, another possible case it's prefetched
         * page and in that case we shouldn't be here */
        if (!vcpu_total_blocktime &&
            atomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) {
            vcpu_total_blocktime = true;
        }
        /* continue cycle, due to one page could affect several vCPUs */
        dc->vcpu_blocktime[i] += vcpu_blocktime;
    }

    atomic_sub(&dc->smp_cpus_down, affected_cpu);
    if (vcpu_total_blocktime) {
        dc->total_blocktime += low_time_offset - atomic_fetch_add(
                &dc->last_begin, 0);
    }
    trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime,
                                      affected_cpu);
}
Exemple #27
0
static int atomic_hash_table_update_one_file(english_word *word){
  uint64_t hashv=fnv_hash(word->str,word->len);
  uint64_t index=hashv%global_hash_table_size;
  //this next line results in a lot of cache misses
  //for obvious reasons
  if(!global_hash_table[index]){//word isn't in the hash table, add it
    uint8_t *mem=xmalloc(word->len);
    word->str=(char*)my_strcpy(mem,(uint8_t*)word->str,word->len);
    void *prev=global_hash_table[index];
    int test=atomic_compare_exchange_n(global_hash_table+index,&prev,word);
    if(test){
      //we added the word
      //this needs to be atomic to prevent two threads writing different
      //values to the same index of indices
      uint64_t old_indices_index=atomic_fetch_add(&indices_index,1);
      //this doesn't need to be atomic, since indices_index will never be
      //decremented, so no one else will change this
      hash_table_indices[old_indices_index]=index;
      goto end1;
    }
    //else, someone else changed the value of global_hash_table[index] before us
  }
  while(1){
    do {
      //see if the value in the table is the same as our value
      //if so update the value already in the table
      if(string_compare(global_hash_table[index],word)){
        //atomically increment word count
        atomic_add(&global_hash_table[index]->count,1);
        goto end0;
      }
    } while(global_hash_table[++index]);
    //not in the table use next free index (if we can)
    void *prev=global_hash_table[index];
    int test=atomic_compare_exchange_n(global_hash_table+index,&prev,word);
    if(test){
      uint64_t old_indices_index=atomic_fetch_add(&indices_index,1);
      hash_table_indices[old_indices_index]=index;
      goto end1;
    }
    //if !test the compare exchange failed and we need to keep looping
  }
 end0:
  return 0;
 end1:
  return 1;
}
/*
 * Locks the mutex
 * Progress Condition: Blocking
 */
void ticket_mutex_lock(ticket_mutex_t * self)
{
    long lingress = atomic_fetch_add(&self->ingress, 1);
    while (lingress != atomic_load(&self->egress)) {
        sched_yield();  // Replace this with thrd_yield() if you use <threads.h>
    }
    // This thread has acquired the lock on the mutex
}
Exemple #29
0
/* increase a refcount on an inode which is already owned (has a non-zero refcount). */
void vfs_inode_get(struct inode *node)
{
	mutex_acquire(ic_lock);
	atomic_fetch_add(&node->count, 1);
	assert(node->count > 1);
	assert((node->flags & INODE_INUSE));
	mutex_release(ic_lock);
}
Exemple #30
0
void writer(void)  // Writes positive values; ends with a negative value.
{
    const int N = 20;                  // Number of data values to write.
    for( int n = 0; n <= N; ++n)
    {
        int d = n < N ? 10+n : -1;     // Prepare data or end marker.
        // When no readers are busy, lock the semaphore (count = -1):
        while( atomic_fetch_sub(&count,MAX_READERS+1) != MAX_READERS)
            atomic_fetch_add(&count, MAX_READERS+1);

        printf("Writer is writing %d\n", d),     // Critical section.
        data = d;
        atomic_fetch_add(&count, MAX_READERS+1); // Release the
                                                 // semaphores.
        thrd_sleep(&ms,NULL);          // Simulate data production.
    }
}