bool push_back() {
   if(full())
     return false;
   back_ = next_index(back_);
   full_ = back_ == front_;
   return true;
 }
    void pop()
    {
        buffer[pending_pop_read_index].~T();

        size_t next = next_index(pending_pop_read_index);
        read_index_.store(next, std::memory_order_release);
    }
Beispiel #3
0
void app_sched_execute(void)
{
    while (!is_app_sched_paused() && !APP_SCHED_QUEUE_EMPTY())
    {
        // Since this function is only called from the main loop, there is no
        // need for a critical region here, however a special care must be taken
        // regarding update of the queue start index (see the end of the loop).
        uint16_t event_index = m_queue_start_index;

        void * p_event_data;
        uint16_t event_data_size;
        app_sched_event_handler_t event_handler;

        p_event_data = &m_queue_event_data[event_index * m_queue_event_size];
        event_data_size = m_queue_event_headers[event_index].event_data_size;
        event_handler   = m_queue_event_headers[event_index].handler;

        event_handler(p_event_data, event_data_size);

        // Event processed, now it is safe to move the queue start index,
        // so the queue entry occupied by this event can be used to store
        // a next one.
        m_queue_start_index = next_index(m_queue_start_index);
    }
}
/**@brief Function for reading the next event from specified event queue.
 *
 * @param[out]  pp_event_data       Pointer to pointer to event data.
 * @param[out]  p_event_data_size   Pointer to size of event data.
 * @param[out]  p_event_handler     Pointer to event handler function pointer.
 *
 * @return      NRF_SUCCESS if new event, NRF_ERROR_NOT_FOUND if event queue is empty.
 */
static uint32_t app_sched_event_get(void                     ** pp_event_data,
                                    uint16_t *                  p_event_data_size,
                                    app_sched_event_handler_t * p_event_handler)
{
    uint32_t err_code = NRF_ERROR_NOT_FOUND;

    if (!APP_SCHED_QUEUE_EMPTY())
    {
        uint16_t event_index;

        // NOTE: There is no need for a critical region here, as this function will only be called
        //       from app_sched_execute() from inside the main loop, so it will never interrupt
        //       app_sched_event_put(). Also, updating of (i.e. writing to) the start index will be
        //       an atomic operation.
        event_index         = m_queue_start_index;
        m_queue_start_index = next_index(m_queue_start_index);

        *pp_event_data     = &m_queue_event_data[event_index * m_queue_event_size];
        *p_event_data_size = m_queue_event_headers[event_index].event_data_size;
        *p_event_handler   = m_queue_event_headers[event_index].handler;

        err_code = NRF_SUCCESS;
    }

    return err_code;
}
 // Unlike the corresponding method on list or deqeue, push_back may
 // fail if full() is true. Then false is returned.
 bool push_back(const T& x) {
   if(full())
     return false;
   data_[back_] = x;
   back_        = next_index(back_);
   full_        = back_ == front_;
   return true;
 }
uint32_t app_sched_event_put(void                    * p_event_data,
                             uint16_t                  event_data_size,
                             app_sched_event_handler_t handler)
{
    uint32_t err_code;

    if (event_data_size <= m_queue_event_size)
    {
        uint16_t event_index = 0xFFFF;

        CRITICAL_REGION_ENTER();

        if (!APP_SCHED_QUEUE_FULL())
        {
            event_index       = m_queue_end_index;
            m_queue_end_index = next_index(m_queue_end_index);

        #ifdef APP_SCHEDULER_WITH_PROFILER
            // This function call must be protected with critical region because
            // it modifies 'm_max_queue_utilization'.
            queue_utilization_check();
        #endif
        }

        CRITICAL_REGION_EXIT();

        if (event_index != 0xFFFF)
        {
            // NOTE: This can be done outside the critical region since the event consumer will
            //       always be called from the main loop, and will thus never interrupt this code.
            m_queue_event_headers[event_index].handler = handler;
            if ((p_event_data != NULL) && (event_data_size > 0))
            {
                memcpy(&m_queue_event_data[event_index * m_queue_event_size],
                       p_event_data,
                       event_data_size);
                m_queue_event_headers[event_index].event_data_size = event_data_size;
            }
            else
            {
                m_queue_event_headers[event_index].event_data_size = 0;
            }

            err_code = NRF_SUCCESS;
        }
        else
        {
            err_code = NRF_ERROR_NO_MEM;
        }
    }
    else
    {
        err_code = NRF_ERROR_INVALID_LENGTH;
    }

    return err_code;
}
Beispiel #7
0
GitgColor *
gitg_color_next()
{
	GitgColor *res = g_new(GitgColor, 1);
	res->ref_count = 1;
	res->index = next_index();

	return res;
}
Beispiel #8
0
	size_t source::start_frame(const bool limit)
	{
		size_t previus_time = current_time_;
		size_t current_time = SDL_GetTicks();
		size_t frame_used = current_time - previus_time;
		if (limit && frame_time_ > frame_used)
		{
			// use delay to wait so we don't take all cpu
			size_t wait_time = frame_time_ - frame_used;
			SDL_Delay(wait_time);
			current_time += wait_time;
		}


		// Advance index only in begin of frame
		index_ = next_index();
		time_[index_] = current_time;
		if (mode_ == REAL_TIME)
		{
			current_time_ = current_time;
		}
		else
		{
			// Check if we have allready enought frames
			// for smooth time calculation
			if (time_[next_index()])
			{
				// smooth calcuations uses rounding to
				// keep time as near of real time as possible
				// @todo There should be error correction
				//		Maybe should use 3 bits shifted values

				size_t average = ((current_time - time_[next_index()]))/((frames_to_remember - 1));


				current_time_ += average;
			}
			else
			{
				current_time_ = current_time;
			}
		}
		return frame_used;
	}
Beispiel #9
0
/*
 * We compute the tweak masks twice (both before and after the ECB encryption or
 * decryption) to avoid having to allocate a temporary buffer and/or make
 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
 * just doing the next_index() calls again.
 */
static int xor_tweak(struct skcipher_request *req, bool second_pass)
{
	const int bs = LRW_BLOCK_SIZE;
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct priv *ctx = crypto_skcipher_ctx(tfm);
	struct rctx *rctx = skcipher_request_ctx(req);
	be128 t = rctx->t;
	struct skcipher_walk w;
	__be32 *iv;
	u32 counter[4];
	int err;

	if (second_pass) {
		req = &rctx->subreq;
		/* set to our TFM to enforce correct alignment: */
		skcipher_request_set_tfm(req, tfm);
	}

	err = skcipher_walk_virt(&w, req, false);
	if (err)
		return err;

	iv = (__be32 *)w.iv;
	counter[0] = be32_to_cpu(iv[3]);
	counter[1] = be32_to_cpu(iv[2]);
	counter[2] = be32_to_cpu(iv[1]);
	counter[3] = be32_to_cpu(iv[0]);

	while (w.nbytes) {
		unsigned int avail = w.nbytes;
		be128 *wsrc;
		be128 *wdst;

		wsrc = w.src.virt.addr;
		wdst = w.dst.virt.addr;

		do {
			be128_xor(wdst++, &t, wsrc++);

			/* T <- I*Key2, using the optimization
			 * discussed in the specification */
			be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
		} while ((avail -= bs) >= bs);

		if (second_pass && w.nbytes == w.total) {
			iv[0] = cpu_to_be32(counter[3]);
			iv[1] = cpu_to_be32(counter[2]);
			iv[2] = cpu_to_be32(counter[1]);
			iv[3] = cpu_to_be32(counter[0]);
		}

		err = skcipher_walk_done(&w, avail);
	}

	return err;
}
Beispiel #10
0
void index_boolean_array(const boolean_array_t* source,
                         const index_spec_t* source_spec,
                         boolean_array_t* dest)
{
    _index_t* idx_vec1;
    _index_t* idx_vec2;
    _index_t* idx_size;
    int j;
    int i;

    assert(base_array_ok(source));
    assert(base_array_ok(dest));
    assert(index_spec_ok(source_spec));
    assert(index_spec_fit_base_array(source_spec,source));
    for(i = 0, j = 0; i < source->ndims; ++i) {
        if((source_spec->index_type[i] == 'W')
            ||
            (source_spec->index_type[i] == 'A')) {
            ++j;
        }
    }
    assert(j == dest->ndims);

    idx_vec1 = size_alloc(source->ndims);  /*indices in the source array*/
    idx_vec2 = size_alloc(dest->ndims); /* indices in the destination array*/
    idx_size = size_alloc(source_spec->ndims);

    for(i = 0; i < source->ndims; ++i) {
        idx_vec1[i] = 0;
    }
    for(i = 0; i < source_spec->ndims; ++i) {
        if(source_spec->index[i]) {
            idx_size[i] = imax(source_spec->dim_size[i],1);
        } else {
            idx_size[i] = source->dim_size[i];
        }
    }

    do {
        for(i = 0, j = 0; i < source->ndims; ++i) {
            if((source_spec->index_type[i] == 'W')
                ||
                (source_spec->index_type[i] == 'A')) {
                idx_vec2[j] = idx_vec1[i];
                j++;
            }
        }

        boolean_set(dest, calc_base_index(dest->ndims, idx_vec2, dest),
                    boolean_get(*source,
                                calc_base_index_spec(source->ndims, idx_vec1,
                                                     source, source_spec)));

    } while(0 == next_index(source->ndims, idx_vec1, idx_size));
}
Beispiel #11
0
bool InliningDatabase::lookup(LookupKey* outer, LookupKey* inner) {
  if (table_no == 0) return NULL;  // Skim the cream

  unsigned int index = index_for(outer, inner);
  if (!table[index].is_filled()) return false; 
  while (!table[index].equal(outer, inner)) {
    index = next_index(index);
    if (table[index].is_empty()) return false;
  }
  return true;
}
Beispiel #12
0
/**
 * Retrieves a copy and removes the head of the queue
 *
 * @return The data stored in the front; or INT_MIN if any exception happens.
 */
int queue_poll() {
    if (false == queue_initialized() || true == queue_is_empty())
        return INT_MIN;

    int data = q_buff[front_index];
    front_index = next_index(front_index);
    size--;
    if (true == queue_is_empty())
        front_index = tail_index = -1;
    return data;
}
Beispiel #13
0
static void req_retry(struct rxe_qp *qp)
{
	struct rxe_send_wqe *wqe;
	unsigned int wqe_index;
	unsigned int mask;
	int npsn;
	int first = 1;

	wqe = queue_head(qp->sq.queue);
	npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK;

	qp->req.wqe_index	= consumer_index(qp->sq.queue);
	qp->req.psn		= qp->comp.psn;
	qp->req.opcode		= -1;

	for (wqe_index = consumer_index(qp->sq.queue);
		wqe_index != producer_index(qp->sq.queue);
		wqe_index = next_index(qp->sq.queue, wqe_index)) {
		wqe = addr_from_index(qp->sq.queue, wqe_index);
		mask = wr_opcode_mask(wqe->wr.opcode, qp);

		if (wqe->state == wqe_state_posted)
			break;

		if (wqe->state == wqe_state_done)
			continue;

		wqe->iova = (mask & WR_ATOMIC_MASK) ?
			     wqe->wr.wr.atomic.remote_addr :
			     (mask & WR_READ_OR_WRITE_MASK) ?
			     wqe->wr.wr.rdma.remote_addr :
			     0;

		if (!first || (mask & WR_READ_MASK) == 0) {
			wqe->dma.resid = wqe->dma.length;
			wqe->dma.cur_sge = 0;
			wqe->dma.sge_offset = 0;
		}

		if (first) {
			first = 0;

			if (mask & WR_WRITE_OR_SEND_MASK)
				retry_first_write_send(qp, wqe, mask, npsn);

			if (mask & WR_READ_MASK)
				wqe->iova += npsn * qp->mtu;
		}

		wqe->state = wqe_state_posted;
	}
}
bool RingBuffer::get_element(uint8_t* out_buf){
  if(occupancy == 0){
    return false;
  }
  
  *out_buf = buf[tail];
  if(head != tail){
    tail = next_index(tail);
  }    
  
  occupancy--;
  return true;
}
Beispiel #15
0
int cb_pop( READING* r )
{
    CB_t next_tail = next_index( cb_tail );

    if( next_tail == cb_head )
    {
        return 0;
    }

    *r = cb_data[cb_tail];
    cb_tail = next_tail;
    return 1;
}
Beispiel #16
0
void cb_push( READING* r )
{
    CB_t next_head = next_index(cb_head);

    if( next_head == cb_tail )
    {
        Led0_toggle();
        return;
    }

    cb_data[cb_head] = *r;
    cb_head = next_head;
}
Beispiel #17
0
RScope* InliningDatabase::lookup_and_remove(LookupKey* outer, LookupKey* inner) {
  if (table_no == 0) return NULL;  // Skim the cream

  unsigned int index = index_for(outer, inner);
  if (!table[index].is_filled()) return NULL; 
  while (!table[index].equal(outer, inner)) {
    index = next_index(index);
    if (table[index].is_empty()) return NULL;
  }
  table[index].set_deleted();
  table_no--;
  return file_in(outer, inner);
}
    bool consume_one(Functor & f)
    {
        const size_t write_index = write_index_.load(std::memory_order_acquire);
        const size_t read_index  = read_index_.load(std::memory_order_relaxed); // only written from pop thread
        if (empty(write_index, read_index))
            return false;

        f(buffer[read_index]);
        buffer[read_index].~T();

        size_t next = next_index(read_index);
        read_index_.store(next, std::memory_order_release);
        return true;
    }
Beispiel #19
0
    void table<RecordType>::find_index(int key, bool& found, size_t& i) const
    // Library facilities used: cstdlib
    {
	size_t count; // Number of entries that have been examined

	count = 0;
	i = hash(key);
	while((count < CAPACITY) && (data[i].key != NEVER_USED) && (data[i].key != key))
	{
	    ++count;
	    i = next_index(i);
	}
	found = (data[i].key == key);
    }
Beispiel #20
0
bool    QueueStatic<T>::push( T d )
{
    if( num < _size )
    {
        data[tail]  =   d;
        tail        =   next_index( tail );
        return  true;
    }
    else
    {
        error_msg( "queue is full." );
        return  false;
    }
}
bool RingBuffer::store_element(uint8_t data){
  if(occupancy == capacity){
    return false;
  }
  
  if(occupancy != 0){
    head = next_index(head);
  }
  
  buf[head] = data;
  
  occupancy++;
  return true;
}
Beispiel #22
0
static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
			 struct rxe_pkt_info *pkt, int payload)
{
	qp->req.opcode = pkt->opcode;

	if (pkt->mask & RXE_END_MASK)
		qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);

	qp->need_req_skb = 0;

	if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
		mod_timer(&qp->retrans_timer,
			  jiffies + qp->qp_timeout_jiffies);
}
Beispiel #23
0
// returns a valarray of indices reprsented by the generalized slice
static std::valarray<std::size_t>
get_index_array (const std::gslice &gsl)
{
    const std::size_t size = get_array_size (gsl);

    std::valarray<std::size_t> indices (size);

    std::valarray<std::size_t> tmpstore;

    for (std::size_t i = 0; i != size; ++i)
        indices [i] = next_index (gsl, tmpstore);

    return indices;
}
Beispiel #24
0
    bool push(T const & t, T * buffer, size_t max_size)
    {
        const size_t write_index = write_index_.load(memory_order_relaxed);  // only written from push thread
        const size_t next = next_index(write_index, max_size);

        if (next == read_index_.load(memory_order_acquire))
            return false; /* ringbuffer is full */

        new (buffer + write_index) T(t); // copy-construct

        write_index_.store(next, memory_order_release);

        return true;
    }
Beispiel #25
0
    bool consume_one(Functor const & functor, T * buffer, size_t max_size)
    {
        const size_t write_index = write_index_.load(memory_order_acquire);
        const size_t read_index  = read_index_.load(memory_order_relaxed); // only written from pop thread
        if ( empty(write_index, read_index) )
            return false;

        T & object_to_consume = buffer[read_index];
        functor( object_to_consume );
        object_to_consume.~T();

        size_t next = next_index(read_index, max_size);
        read_index_.store(next, memory_order_release);
        return true;
    }
Beispiel #26
0
int main()
{
    chr::vanilla_chronicle_settings settings("/tmp/example");
    chr::vanilla_chronicle chronicle(settings);

    auto tailer = chronicle.create_tailer();
    while(true)
    {
        if(!tailer.next_index())
            continue;
        std::cout << "int:  " << tailer.read<int>() << '\n';
        std::cout << "text: " << tailer.read(chr::readers::chars()) << '\n';
    }

}
Beispiel #27
0
T   QueueStatic<T>::pop()
{
    if( _size <= 0 )
    {
        error_msg( "queue is empty." );
        return  0;
    }
    
    T   tmp;
    
    tmp     =   data[head];
    head    =   next_index( head );
    
    return  tmp;
}
void indexed_assign_string_array(const string_array_t * source,
                                 string_array_t* dest,
                                 const index_spec_t* dest_spec)
{
    _index_t* idx_vec1;
    _index_t* idx_vec2;
    _index_t* idx_size;
    int i,j;

    assert(base_array_ok(source));
    assert(base_array_ok(dest));
    assert(index_spec_ok(dest_spec));
    assert(index_spec_fit_base_array(dest_spec, dest));
    for(i = 0,j = 0; i < dest_spec->ndims; ++i) {
        if(dest_spec->dim_size[i] != 0) {
            ++j;
        }
    }
    assert(j == source->ndims);

    idx_vec1 = size_alloc(dest->ndims);
    idx_vec2 = size_alloc(source->ndims);
    idx_size = size_alloc(dest_spec->ndims);

    for(i = 0; i < dest_spec->ndims; ++i) {
        idx_vec1[i] = 0;

        if(dest_spec->index[i] != NULL) {
            idx_size[i] = imax(dest_spec->dim_size[i],1);
        } else {
            idx_size[i] = dest->dim_size[i];
        }
    }

    do {
        for(i = 0, j = 0; i < dest_spec->ndims; ++i) {
            if(dest_spec->dim_size[i] != 0) {
                idx_vec2[j] = idx_vec1[i];
                ++j;
            }
        }
        string_set(dest, calc_base_index_spec(dest->ndims, idx_vec1,
                                              dest, dest_spec),
                   string_get(*source, calc_base_index(source->ndims,
                                                      idx_vec2, source)));

    } while(0 == next_index(dest_spec->ndims, idx_vec1, idx_size));
}
Beispiel #29
0
/**
 * Add a new node at the tail of the queue.
 */
bool queue_offer(const int data) {
    if (false == queue_initialized())
        return false;

    if (true == is_full()) {
        // grow the buffer
        grow();
    }

    tail_index = next_index(tail_index);
    // enqueue the data
    q_buff[tail_index] = data;
    // set new front, tail
    if (front_index == -1) front_index = 0;
    size++;
    return true;
}
Beispiel #30
0
void indexed_assign_boolean_array(const boolean_array_t source, boolean_array_t* dest,
                                  const index_spec_t* dest_spec)
{
    _index_t *idx_vec1, *idx_size;
    int j;
    indexed_assign_base_array_size_alloc(&source, dest, dest_spec, &idx_vec1, &idx_size);

    j = 0;
    do {
        boolean_set(dest,
                 calc_base_index_spec(dest->ndims, idx_vec1, dest, dest_spec),
                 boolean_get(source, j));
        j++;

    } while(0 == next_index(dest_spec->ndims, idx_vec1, idx_size));

    omc_assert_macro(j == base_array_nr_of_elements(source));
}