void sys_event_fire(sys_event_t * event)
{
    sys_event_client_t *client, ** pclient, ** plast;

    plast = atomic_load(&event->plast_client, memory_order_acquire);
    pclient = &event->clients;
    while (pclient != plast)
    {
        do
        {
            client = atomic_load(pclient, memory_order_acquire);
            if (unlikely(client == NULL))
            {
                pthread_yield();
            }
        } while (unlikely(client == NULL));

        if (client->notify != NULL)
        {
            client->notify();
        }

        pclient = &client->next;
    }
}
void Executer::doRun(){
    
    if(atomic_load(&m_state) != EXECUTER_STATE_STARTED){
        return;
    }
    
    for(;;){
        if(m_task_queue->isEmpty()){
            // just sleep
            usleep(1000);
            continue;
        }
        
        
        Task t = m_task_queue->wait_to_take();
        t.run();
        
        if(atomic_load(&m_state) == EXECUTER_STATE_SHUTTINGDOWN){
            if(m_task_queue->isEmpty()){
                atomic_store(&m_state, EXECUTER_STATE_SHUTDOWN);
                return;
            }
        }
        
    }
}
Exemple #3
0
static void flush(filter_t *filter)
{
    filter_sys_t *sys = filter->p_sys;
    MMAL_BUFFER_HEADER_T *buffer;

    msg_Dbg(filter, "flush deinterlace filter");

    if (atomic_load(&sys->input_in_transit) ||
            atomic_load(&sys->output_in_transit)) {

        msg_Dbg(filter, "flush: flush ports (input: %d, output: %d in transit)",
                sys->input_in_transit, sys->output_in_transit);
        mmal_port_flush(sys->output);
        mmal_port_flush(sys->input);

        msg_Dbg(filter, "flush: wait for all buffers to be returned");
        vlc_mutex_lock(&sys->buffer_cond_mutex);
        while (atomic_load(&sys->input_in_transit) ||
                atomic_load(&sys->output_in_transit)) {
            vlc_cond_wait(&sys->buffer_cond, &sys->buffer_cond_mutex);
        }
        vlc_mutex_unlock(&sys->buffer_cond_mutex);
    }

    while ((buffer = mmal_queue_get(sys->filtered_pictures))) {
        picture_t *pic = (picture_t *)buffer->user_data;
        msg_Dbg(filter, "flush: release already filtered pic %p",
                (void *)pic);
        picture_Release(pic);
    }
    msg_Dbg(filter, "flush: done");
}
Exemple #4
0
QueueResult queue_pop(queue_p q)
{
    assert(q);
    assert(queue_has_front(q) == QUEUE_TRUE);

    /* get the head */
    node *popped = (node*) atomic_load(&q->head);
    node *compare = popped;

    /* set the tail and head to nothing if they are the same */
    if (atomic_compare_exchange_strong(&q->tail, &compare, 0)) {
        compare = popped;
        /* it is possible for another thread to have pushed after
         * we swap out the tail. In this case, the head will be different
         * then what was popped, so we just do a blind exchange regardless
         * of the result.
         */
        atomic_compare_exchange_strong(&q->head, &compare, 0);
    } else {
        /* tail is different from head, set the head to the next value */
        node *new_head = 0;
        while (!new_head) {
            /* it is possible that the next node has not been assigned yet,
             * so just spin until the pushing thread stores the value.
	     */
            new_head = (node *) atomic_load(&popped->next);
        }
        atomic_store(&q->head, new_head->next);
    }

    free(popped);
    return QUEUE_SUCCESS;
}
Exemple #5
0
static void fill_output_port(decoder_t *dec)
{
    decoder_sys_t *sys = dec->p_sys;

    unsigned max_buffers_in_transit = 0;
    int buffers_available = 0;
    int buffers_to_send = 0;
    int i;

    if (sys->output_pool) {
        max_buffers_in_transit = __MAX(sys->output_pool->headers_num,
                MIN_NUM_BUFFERS_IN_TRANSIT);
        buffers_available = mmal_queue_length(sys->output_pool->queue);
    } else {
        max_buffers_in_transit = __MAX(sys->output->buffer_num, MIN_NUM_BUFFERS_IN_TRANSIT);
        buffers_available = NUM_DECODER_BUFFER_HEADERS - atomic_load(&sys->output_in_transit) -
            mmal_queue_length(sys->decoded_pictures);
    }
    buffers_to_send = max_buffers_in_transit - atomic_load(&sys->output_in_transit);

    if (buffers_to_send > buffers_available)
        buffers_to_send = buffers_available;

#ifndef NDEBUG
    msg_Dbg(dec, "Send %d buffers to output port (available: %d, "
                    "in_transit: %d, decoded: %d, buffer_num: %d)",
                    buffers_to_send, buffers_available,
                    atomic_load(&sys->output_in_transit),
                    mmal_queue_length(sys->decoded_pictures),
                    sys->output->buffer_num);
#endif
    for (i = 0; i < buffers_to_send; ++i)
        if (send_output_buffer(dec) < 0)
            break;
}
Exemple #6
0
void picture_pool_NonEmpty(picture_pool_t *pool, bool reset)
{
    picture_t *old = NULL;

    for (int i = 0; i < pool->picture_count; i++) {
        if (pool->picture_reserved[i])
            continue;

        picture_t *picture = pool->picture[i];
        if (reset) {
            if (atomic_load(&picture->gc.refcount) > 0)
                Unlock(picture);
            atomic_store(&picture->gc.refcount, 0);
        } else if (atomic_load(&picture->gc.refcount) == 0) {
            return;
        } else if (!old || picture->gc.p_sys->tick < old->gc.p_sys->tick) {
            old = picture;
        }
    }
    if (!reset && old) {
        if (atomic_load(&old->gc.refcount) > 0)
            Unlock(old);
        atomic_store(&old->gc.refcount, 0);
    }
}
Exemple #7
0
TEST(stdatomic, init) {
  atomic_int v = ATOMIC_VAR_INIT(123);
  ASSERT_EQ(123, atomic_load(&v));

  atomic_init(&v, 456);
  ASSERT_EQ(456, atomic_load(&v));

  atomic_flag f = ATOMIC_FLAG_INIT;
  ASSERT_FALSE(atomic_flag_test_and_set(&f));
}
Exemple #8
0
/*****************************************************************************
 * Render: displays previously rendered output
 *****************************************************************************
 * This function send the currently rendered image to adjust modified image,
 * waits until it is displayed and switch the two rendering buffers, preparing
 * next frame.
 *****************************************************************************/
static picture_t *Filter( filter_t *p_filter, picture_t *p_pic )
{
    picture_t *p_outpic;
    filter_sys_t *p_sys = p_filter->p_sys;
    int i_simthres = atomic_load( &p_sys->i_simthres );
    int i_satthres = atomic_load( &p_sys->i_satthres );
    int i_color = atomic_load( &p_sys->i_color );

    if( !p_pic ) return NULL;

    p_outpic = filter_NewPicture( p_filter );
    if( !p_outpic )
    {
        picture_Release( p_pic );
        return NULL;
    }

    /* Copy the Y plane */
    plane_CopyPixels( &p_outpic->p[Y_PLANE], &p_pic->p[Y_PLANE] );

    /*
     * Do the U and V planes
     */
    int refu, refv, reflength;
    GetReference( &refu, &refv, &reflength, i_color );

    for( int y = 0; y < p_pic->p[U_PLANE].i_visible_lines; y++ )
    {
        uint8_t *p_src_u = &p_pic->p[U_PLANE].p_pixels[y * p_pic->p[U_PLANE].i_pitch];
        uint8_t *p_src_v = &p_pic->p[V_PLANE].p_pixels[y * p_pic->p[V_PLANE].i_pitch];
        uint8_t *p_dst_u = &p_outpic->p[U_PLANE].p_pixels[y * p_outpic->p[U_PLANE].i_pitch];
        uint8_t *p_dst_v = &p_outpic->p[V_PLANE].p_pixels[y * p_outpic->p[V_PLANE].i_pitch];

        for( int x = 0; x < p_pic->p[U_PLANE].i_visible_pitch; x++ )
        {
            if( IsSimilar( *p_src_u - 0x80, *p_src_v - 0x80,
                           refu, refv, reflength,
                           i_satthres, i_simthres ) )

            {
                *p_dst_u++ = *p_src_u;
                *p_dst_v++ = *p_src_v;
            }
            else
            {
                *p_dst_u++ = 0x80;
                *p_dst_v++ = 0x80;
            }
            p_src_u++;
            p_src_v++;
        }
    }

    return CopyInfoAndRelease( p_outpic, p_pic );
}
Exemple #9
0
void send_server_status(struct bufferevent *bev)
{
    struct packet_server_status data;

    data.hdr.proto = PROTO_EDONKEY;
    data.hdr.length = sizeof(data) - sizeof(data.hdr);
    data.opcode = OP_SERVERSTATUS;
    data.user_count = atomic_load(&g_srv.user_count);
    data.file_count = atomic_load(&g_srv.file_count);

    bufferevent_write(bev, &data, sizeof(data));
}
Exemple #10
0
int main() {
    std::shared_ptr < int > a;

    std::shared_ptr < int > b;
    std::shared_ptr < int > c = std::make_shared < int > (10);

    while(atomic_compare_exchange_weak(&a, &b, c))
      ;

    assert(atomic_load(&a) == c);  
    assert(atomic_load(&a).use_count() == 2);
}
Exemple #11
0
void aeron_distinct_error_log_close(aeron_distinct_error_log_t *log)
{
    aeron_distinct_observation_t *observations = atomic_load(&log->observations_pimpl->observations);
    size_t num_observations = atomic_load(&log->observations_pimpl->num_observations);
    
    for (size_t i = 0; i < num_observations; i++)
    {
        aeron_free((void *)observations[i].description);
    }
    
    aeron_free(observations);
    aeron_free(log->observations_pimpl);
}
Exemple #12
0
static unsigned int
AcquireSegmentBufferSpace(
    _In_ SystemPipe_t*              Pipe,
    _In_ SystemPipeSegmentBuffer_t* Buffer,
    _In_ size_t                     Length)
{
    // Variables
    unsigned int ReadIndex;
    unsigned int WriteIndex;
    size_t BytesAvailable;

    // Make sure we write all the bytes
    while (1) {
        WriteIndex      = atomic_load(&Buffer->WritePointer);
        ReadIndex       = atomic_load(&Buffer->ReadCommitted);
        BytesAvailable  = MIN(
            CalculateBytesAvailableForWriting(Buffer, ReadIndex, WriteIndex), Length);
        if (BytesAvailable != Length) {
            SchedulerAtomicThreadSleep((atomic_int*)&Buffer->ReadCommitted, (int*)&ReadIndex, 0);
            continue; // Start over
        }

        // Synchronize with other producers
        if (Pipe->Configuration & PIPE_MULTIPLE_PRODUCERS) {
            while (BytesAvailable == Length) {
                size_t NewWritePointer  = WriteIndex + BytesAvailable;
                if (atomic_compare_exchange_weak(&Buffer->WritePointer, &WriteIndex, NewWritePointer)) {
                    break;
                }
                ReadIndex       = atomic_load(&Buffer->ReadCommitted);
                BytesAvailable  = MIN(
                    CalculateBytesAvailableForWriting(Buffer, ReadIndex, WriteIndex), Length);
            }

            // Did we end up overcomitting?
            if (BytesAvailable != Length) {
                continue; // Start write loop all over
            }
        }
        else {
            atomic_store_explicit(&Buffer->WritePointer, WriteIndex + BytesAvailable, memory_order_relaxed);
        }

        // Break us out here
        if (BytesAvailable == Length) {
            break;
        }
    }
    return WriteIndex;
}
Exemple #13
0
// get/wait for next item for consumer.
// Does not set any bmap bits. Item must remain in buffer
// until we are done with it.
qitem* queue_pop(queue *q)
{
	qitem *buf        = (qitem*)(q->buf + q->mapbytes*2);
	atomic_llong *map = (atomic_llong*)(q->buf + q->mapbytes);
	int sbit;
	int next;

	// printf("POP %lld, %lld %d\n",(long long int)q->buf, (long long int)map, q->mapbytes);

	while (1)
	{
		long long int mval = atomic_load(&map[q->last_map_pos]);

		if ((sbit = ffsll(mval & (~q->visited))))
		{
			// printf("SET BIT ! %d %d %lld\n",sbit,q->last_map_pos, mval);
			--sbit;
			q->visited |= (((long long int)1) << sbit);
			atomic_fetch_sub(&q->size, 1);
			return (qitem*)&buf[q->last_map_pos*64 + sbit];
		}

		if (q->last_map_pos == q->map_elements-1)
		{
			next = 0;
		}
		else
		{
			next = q->last_map_pos+1;
		}

		q->last_map_pos = next;
		q->visited = (long long int)0;
		mval = atomic_load(&map[next]);
		if ((sbit = ffsll(mval)))
		{
			--sbit;
			q->visited |= (((long long int)1) << sbit);
			atomic_fetch_sub(&q->size, 1);
			return (qitem*)&buf[next*64 + sbit];
		}
		else
		{
			q->last_map_pos = 0;
		}

		usleep(DELAY_BY);
	}
}
Exemple #14
0
/**
 * Codec worker thread.
 *
 * Automatically calls ff_thread_finish_setup() if the codec does
 * not provide an update_thread_context method, or if the codec returns
 * before calling it.
 */
static attribute_align_arg void *frame_worker_thread(void *arg)
{
    PerThreadContext *p = arg;
    AVCodecContext *avctx = p->avctx;
    const AVCodec *codec = avctx->codec;

    while (1) {
        if (atomic_load(&p->state) == STATE_INPUT_READY) {
            pthread_mutex_lock(&p->mutex);
            while (atomic_load(&p->state) == STATE_INPUT_READY) {
                if (p->die) {
                    pthread_mutex_unlock(&p->mutex);
                    goto die;
                }
                pthread_cond_wait(&p->input_cond, &p->mutex);
            }
            pthread_mutex_unlock(&p->mutex);
        }

        if (!codec->update_thread_context && avctx->thread_safe_callbacks)
            ff_thread_finish_setup(avctx);

        pthread_mutex_lock(&p->mutex);
        av_frame_unref(p->frame);
        p->got_frame = 0;
        p->result = codec->decode(avctx, p->frame, &p->got_frame, &p->avpkt);

        if ((p->result < 0 || !p->got_frame) && p->frame->buf[0]) {
            if (avctx->internal->allocate_progress)
                av_log(avctx, AV_LOG_ERROR, "A frame threaded decoder did not "
                       "free the frame on failure. This is a bug, please report it.\n");
            av_frame_unref(p->frame);
        }

        if (atomic_load(&p->state) == STATE_SETTING_UP)
            ff_thread_finish_setup(avctx);

        atomic_store(&p->state, STATE_INPUT_READY);

        pthread_mutex_lock(&p->progress_mutex);
        pthread_cond_signal(&p->output_cond);
        pthread_mutex_unlock(&p->progress_mutex);

        pthread_mutex_unlock(&p->mutex);
    }
die:

    return NULL;
}
static void *Worker( void *arg ) {
    TYPE id = (size_t)arg;
	uint64_t entry;
#ifdef FAST
	unsigned int cnt = 0, oid = id;
#endif // FAST

    for ( int r = 0; r < RUNS; r += 1 ) {
        entry = 0;
        while ( atomic_load(&stop) == 0 ) {
            atomic_store(&states[id*PADRATIO], LOCKED);
            while (1) {
                int lturn = atomic_load(&turn);
                if (!validate_left(id, lturn)) {
                    atomic_store(&states[id*PADRATIO], WAITING);
                    while (1) {
                        if (validate_left(id, lturn) && lturn == atomic_load_explicit(&turn, memory_order_acquire)) break;
                        Pause();
                        lturn = atomic_load_explicit(&turn, memory_order_acquire);
                    }
                    atomic_store(&states[id*PADRATIO], LOCKED);
                    continue;
                }
                while (lturn == atomic_load_explicit(&turn, memory_order_acquire)) {
                    if (validate_right(id, lturn)) break;
                    Pause();
                }
                if (lturn == atomic_load_explicit(&turn, memory_order_acquire)) break;
            }
			CriticalSection( id );						// critical section
			int lturn = (atomic_load_explicit(&turn, memory_order_relaxed)+1) % N;
			atomic_store_explicit(&turn, lturn, memory_order_relaxed);
			atomic_store_explicit(&states[id*PADRATIO], UNLOCKED, memory_order_release); // exit protocol
#ifdef FAST
			id = startpoint( cnt );						// different starting point each experiment
			cnt = cycleUp( cnt, NoStartPoints );
#endif // FAST
			entry += 1;
		} // while
#ifdef FAST
		id = oid;
#endif // FAST
		entries[r][id] = entry;
        atomic_fetch_add( &Arrived, 1 );
        while ( atomic_load(&stop) != 0 ) Pause();
        atomic_fetch_add( &Arrived, -1 );
	} // for
	return NULL;
} // Worker
void Executer::execute(Task& task){
    
    int oldState = atomic_load(&m_state);
    if(oldState >= EXECUTER_STATE_SHUTTINGDOWN){
        // start the thread again
        start();
    }
    
    if(atomic_load(&m_state) != EXECUTER_STATE_STARTED){
        cerr << "Can't start the Executor!" << endl;
        return;
    }
    
    addTask(task);
}
Exemple #17
0
void __thread_tsd_run_dtors(void) {
    thrd_t self = __thrd_current();
    int i, j, not_finished = self->tsd_used;
    for (j = 0; not_finished && j < TSS_DTOR_ITERATIONS; j++) {
        not_finished = 0;
        for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
            if (self->tsd[i] && atomic_load(&keys[i])) {
                void* tmp = self->tsd[i];
                self->tsd[i] = 0;
                atomic_load(&keys[i])(tmp);
                not_finished = 1;
            }
        }
    }
}
Exemple #18
0
static void handle_reply(uip_ipaddr_t *source, uint8_t ttl, uint8_t *data,
                         uint16_t datalen)
{
    char addr_str[IPV6_ADDR_MAX_STR_LEN];
    icmpv6_echo_t *ping = (icmpv6_echo_t *)data;

    _waiting = false;

    ipv6_addr_to_str(addr_str, (ipv6_addr_t *)source, sizeof(addr_str));

    (void)atomic_fetch_add(&received, 1); /* Ignore return value, we only want to increment the counter */
    printf("%" PRIu16 " bytes from %s: icmp_seq=%" PRIu16 " ttl=%u quota=%i/%i\n",
           datalen, addr_str, byteorder_ntohs(ping->seq), (unsigned)ttl,
           atomic_load(&received), atomic_load(&num));
}
Exemple #19
0
/*****************************************************************************
 * RenderBlur: renders a blurred picture
 *****************************************************************************/
static void RenderBlur( filter_sys_t *p_sys, picture_t *p_newpic,
                        picture_t *p_outpic )
{
    const int i_oldfactor = atomic_load( &p_sys->i_factor );
    int i_newfactor = 128 - i_oldfactor;

    for( int i_plane = 0; i_plane < p_outpic->i_planes; i_plane++ )
    {
        uint8_t *p_old, *p_new, *p_out, *p_out_end, *p_out_line_end;
        const int i_visible_pitch = p_outpic->p[i_plane].i_visible_pitch;
        const int i_visible_lines = p_outpic->p[i_plane].i_visible_lines;

        p_out = p_outpic->p[i_plane].p_pixels;
        p_new = p_newpic->p[i_plane].p_pixels;
        p_old = p_sys->p_tmp->p[i_plane].p_pixels;
        p_out_end = p_out + p_outpic->p[i_plane].i_pitch * i_visible_lines;
        while ( p_out < p_out_end )
        {
            p_out_line_end = p_out + i_visible_pitch;

            while ( p_out < p_out_line_end )
            {
                *p_out++ = (((*p_old++) * i_oldfactor) +
                            ((*p_new++) * i_newfactor)) >> 7;
            }

            p_old += p_sys->p_tmp->p[i_plane].i_pitch - i_visible_pitch;
            p_new += p_newpic->p[i_plane].i_pitch     - i_visible_pitch;
            p_out += p_outpic->p[i_plane].i_pitch     - i_visible_pitch;
        }
    }
}
Exemple #20
0
int pthread_rwlock_wrlock (pthread_rwlock_t *rwp)
{
  int flags = rwp->__flags & GSYNC_SHARED;
  unsigned int self_id = PTHREAD_SELF->id;

  if (rwl_owned_p (rwp, self_id, flags))
    return (EDEADLK);

  while (1)
    {
      unsigned int owner = atomic_load (&rwl_oid(rwp->__oid_nrd));
      if (owner == RWLOCK_UNOWNED)
        {
          /* The lock is unowned. Try to take ownership. */
          if (atomic_cas_bool (&rwl_oid(rwp->__oid_nrd), owner, self_id))
            {
              rwl_setown (rwp, flags);
              return (0);
            }
        }
      else
        {
          /* Wait on the address. We are only interested in the
           * value of the OID field, but we need a different queue
           * for writers. As such, we use 64-bit values, with the
           * high limb being the owner id. */
          unsigned int *ptr = &rwl_qwr(rwp->__oid_nrd);
          unsigned int nw = atomic_add (ptr, +1);
          lll_xwait (ptr, nw + 1, owner, flags);
          atomic_add (ptr, -1);
        }
    }
}
Exemple #21
0
static bool region_close(struct region *region)
{
    optics_assert(slock_try_lock(&region->lock),
            "closing optics with active thread");

    struct region_vma *node = &region->vma;
    while (node) {
        void *vma_ptr = (void *) atomic_load(&node->ptr);
        size_t vma_len = node->len;

        if (munmap(vma_ptr, vma_len) == -1) {
            optics_fail_errno("unable to unmap region '%s': {%p, %lu}",
                    region->name, vma_ptr, vma_len);
            return false;
        }

        struct region_vma *next = node->next;
        if (node != &region->vma) free(node);
        node = next;
    }

    if (close(region->fd) == -1) {
        optics_fail_errno("unable to close region '%s'", region->name);
        return false;
    }

    if (region->owned) {
        if (shm_unlink(region->name) == -1) {
            optics_fail_errno("unable to unlink region '%s'", region->name);
            return false;
        }
    }

    return true;
}
Exemple #22
0
TEST(stdatomic, atomic_store) {
  atomic_int i;
  atomic_store(&i, 123);
  ASSERT_EQ(123, atomic_load(&i));
  atomic_store_explicit(&i, 123, memory_order_relaxed);
  ASSERT_EQ(123, atomic_load_explicit(&i, memory_order_relaxed));
}
Exemple #23
0
/***************************************************************************
** Get address of region(s) from which we can read data.
** If the region is contiguous, size2 will be zero.
** If non-contiguous, size2 will be the size of second region.
** Returns room available to be read or elementCount, whichever is smaller.
*/
ring_buffer_size_t PaUtil_GetRingBufferReadRegions( PaUtilRingBuffer *rbuf, ring_buffer_size_t elementCount,
        void **dataPtr1, ring_buffer_size_t *sizePtr1,
        void **dataPtr2, ring_buffer_size_t *sizePtr2 )
{
    ring_buffer_size_t   index;
    ring_buffer_size_t   available = PaUtil_GetRingBufferReadAvailable( rbuf ); /* doesn't use memory barrier */
    if( elementCount > available ) elementCount = available;
    /* Check to see if read is not contiguous. */
    if( elementCount )
    {
        index = atomic_load(&rbuf->readIndex) & rbuf->smallMask;
        if( (index + elementCount) > rbuf->bufferSize )
        {
            /* Write data in two blocks that wrap the buffer. */
            ring_buffer_size_t firstHalf = rbuf->bufferSize - index;
            *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes];
            *sizePtr1 = firstHalf;
            *dataPtr2 = &rbuf->buffer[0];
            *sizePtr2 = elementCount - firstHalf;
        }
        else
        {
            *dataPtr1 = &rbuf->buffer[index*rbuf->elementSizeBytes];
            *sizePtr1 = elementCount;
            *dataPtr2 = NULL;
            *sizePtr2 = 0;
        }
    }
    return elementCount;
}
Exemple #24
0
static double get_device_delay(struct wasapi_state *state) {
    UINT64 sample_count = atomic_load(&state->sample_count);
    UINT64 position, qpc_position;
    HRESULT hr;

    switch (hr = IAudioClock_GetPosition(state->pAudioClock, &position, &qpc_position)) {
        case S_OK: case S_FALSE:
            break;
        default:
            MP_ERR(state, "IAudioClock::GetPosition returned %s\n", wasapi_explain_err(hr));
    }

    LARGE_INTEGER qpc_count;
    QueryPerformanceCounter(&qpc_count);
    double qpc_diff = (qpc_count.QuadPart * 1e7 / state->qpc_frequency.QuadPart) - qpc_position;

    position += state->clock_frequency * (uint64_t)(qpc_diff / 1e7);

    /* convert position to the same base as sample_count */
    position = position * state->format.Format.nSamplesPerSec / state->clock_frequency;

    double diff = sample_count - position;
    double delay = diff / state->format.Format.nSamplesPerSec;

    MP_TRACE(state, "device delay: %g samples (%g ms)\n", diff, delay * 1000);

    return delay;
}
Exemple #25
0
// Sets UsedBMap to signal consumer it can be used.
int queue_push(queue *q, qitem* item)
{
	qitem *buf        = (qitem*)(q->buf + q->mapbytes*2);
	atomic_llong *map = (atomic_llong*)(q->buf + q->mapbytes);
	const int diff    = (item - buf);
	const int i       = diff / 64;
	const int zbit    = diff % 64;

	while (1)
	{
		long long int mval = atomic_load(&map[i]);
		long long int nval;

		nval = mval | (((long long int)1) << zbit);

		if (atomic_compare_exchange_strong(&map[i], &mval, nval))
		{
			// printf("PUSHING ON POS i=%d zbit=%d diff=%d rdiff=%lld\n",i, zbit, diff, item-buf);
			break;
		}
		else
			atomic_fetch_add(&q->ct, 1);

		usleep(DELAY_BY);
	}
	return 1;
}
Exemple #26
0
static bool __confirm(void *data)
{
	struct mutex *m = data;
	if(!atomic_load(&m->lock))
		return false;
	return true;
}
static void* __timer_thread_start(void* arg) {
  PosixTimer* timer = reinterpret_cast<PosixTimer*>(arg);

  kernel_sigset_t sigset;
  sigaddset(sigset.get(), TIMER_SIGNAL);

  while (true) {
    // Wait for a signal...
    siginfo_t si;
    memset(&si, 0, sizeof(si));
    int rc = __rt_sigtimedwait(sigset.get(), &si, NULL, sizeof(sigset));
    if (rc == -1) {
      continue;
    }

    if (si.si_code == SI_TIMER) {
      // This signal was sent because a timer fired, so call the callback.

      // All events to the callback will be ignored when the timer is deleted.
      if (atomic_load(&timer->deleted) == true) {
        continue;
      }
      timer->callback(timer->callback_argument);
    } else if (si.si_code == SI_TKILL) {
      // This signal was sent because someone wants us to exit.
      free(timer);
      return NULL;
    }
  }
}
Exemple #28
0
static void fetch_trigo( struct filter_sys_t *sys, int *i_sin, int *i_cos )
{
    uint32_t sincos = atomic_load( &sys->sincos );

    *i_sin = (int16_t)(sincos & 0xFFFF);
    *i_cos = (int16_t)(sincos >> 16);
}
Exemple #29
0
void *mqueue_writer_parpare(struct mqueue *q)
{
    struct item *ret = NULL;
    uint64_t oldf, newf;
    uint32_t freen, seq;
    for (;;) {
        rmb();

        oldf  = atomic_load(q->free);
        freen = oldf >> 32;
        seq   = (oldf & 0xFFFFFFFF) + 1;
        if (unlikely(freen == TAIL_IDX))
            return NULL;

        ret  = ITEM(q, freen);
        newf = ((uint64_t)ret->next << 32) | seq;

        if (compare_and_swap(&q->free, oldf, newf)) {
            ret->next = UNUSED_FLAG;
            return ret->content;
        }

        cpu_relax();
    }

    return NULL;
}
Exemple #30
0
static int timer_stress_worker(void* void_arg) {
    timer_stress_args* args = reinterpret_cast<timer_stress_args*>(void_arg);
    while (!atomic_load(&args->timer_stress_done)) {
        timer_t t = TIMER_INITIAL_VALUE(t);
        zx_duration_t timer_duration = rand_duration(ZX_MSEC(5));

        // Set a timer, then switch to a different CPU to ensure we race with it.

        arch_disable_ints();
        uint timer_cpu = arch_curr_cpu_num();
        const Deadline deadline = Deadline::no_slack(current_time() + timer_duration);
        timer_set(&t, deadline, timer_stress_cb, void_arg);
        thread_set_cpu_affinity(get_current_thread(), ~cpu_num_to_mask(timer_cpu));
        DEBUG_ASSERT(arch_curr_cpu_num() != timer_cpu);
        arch_enable_ints();

        // We're now running on something other than timer_cpu.

        atomic_add_u64(&args->num_set, 1);

        // Sleep for the timer duration so that this thread's timer_cancel races with the timer
        // callback. We want to race to ensure there are no synchronization or memory visibility
        // issues.
        thread_sleep_relative(timer_duration);
        timer_cancel(&t);
    }
    return 0;
}