int queue_enq(struct Queue *q, void *item) { struct QueueEntry *qi; AZ(pthread_mutex_lock(&q->mutex)); if (queue_full(q)) { q->enq_waiters++; while (queue_full(q)) AZ(pthread_cond_wait(&q->enq_wait_cv, &q->mutex)); q->enq_waiters--; } if (!STAILQ_EMPTY(&q->pool)) { qi = STAILQ_FIRST(&q->pool); STAILQ_REMOVE_HEAD(&q->pool, entries); q->pool_length--; } else { if (!(qi = (struct QueueEntry *)malloc(sizeof(struct QueueEntry)))) abort(); // could return 0/-1, but meh. } qi->item = item; STAILQ_INSERT_TAIL(&q->queue, qi, entries); q->length++; AZ(pthread_cond_signal(&q->cv)); AZ(pthread_mutex_unlock(&q->mutex)); return 1; }
void queue_test() { int i; int item; int length = 128; Queue *queue = make_queue(length); assert(queue_empty(queue)); for (i=0; i<length-1; i++) { queue_push(queue, i); } assert(queue_full(queue)); for (i=0; i<10; i++) { item = queue_pop(queue); assert(i == item); } assert(!queue_empty(queue)); assert(!queue_full(queue)); for (i=0; i<10; i++) { queue_push(queue, i); } assert(queue_full(queue)); for (i=0; i<10; i++) { item = queue_pop(queue); } assert(item == 19); }
/** * Push new data onto the queue. Blocks if the queue is full. Once * the push operation has completed, it signals other threads waiting * in queue_pop() that they may continue consuming sockets. */ int queue_push(queue_t *queue, void *data) { int rv; if (queue->terminated) { return QUEUE_EOF; /* no more elements ever again */ } rv = thread_mutex_lock(queue->one_big_mutex); if (rv != 0) { return rv; } if (queue_full(queue)) { if (!queue->terminated) { queue->full_waiters++; rv = thread_cond_wait(queue->not_full, queue->one_big_mutex); queue->full_waiters--; if (rv != 0) { thread_mutex_unlock(queue->one_big_mutex); return rv; } } /* If we wake up and it's still empty, then we were interrupted */ if (queue_full(queue)) { rv = thread_mutex_unlock(queue->one_big_mutex); if (rv != 0) { return rv; } if (queue->terminated) { return QUEUE_EOF; /* no more elements ever again */ } else { return QUEUE_EINTR; } } } queue->data[queue->in] = data; queue->in = (queue->in + 1) % queue->bounds; queue->nelts++; if (queue->empty_waiters) { rv = thread_cond_signal(queue->not_empty); if (rv != 0) { thread_mutex_unlock(queue->one_big_mutex); return rv; } } rv = thread_mutex_unlock(queue->one_big_mutex); return rv; }
int putchar(int c) { while (queue_full(&txbuf)); enqueue(&txbuf, c); return c; }
void enqueue(TARGET *t) { // don't call this function when the queue is full, but just in case, wait for a move to complete and free up the space for the passed target while (queue_full()) delay(WAITING_DELAY); uint8_t h = mb_head + 1; h &= (MOVEBUFFER_SIZE - 1); if (t != NULL) { dda_create(&movebuffer[h], t); } else { // it's a wait for temp movebuffer[h].waitfor_temp = 1; movebuffer[h].nullmove = 0; // set "step" timeout to maximum movebuffer[h].c = 0xFFFFFF00; } mb_head = h; // fire up in case we're not running yet if (isHwTimerEnabled(0) == 0) next_move(); }
void enqueue(int *queues, int element) { if (is_full()) { queues = queue_full(queues, rear, front); } queues[++rear] = element; rear = rear % max_size; }
/** * Push new data onto the queue. If the queue is full, return QUEUE_EAGAIN. If * the push operation completes successfully, it signals other threads * waiting in queue_pop() that they may continue consuming sockets. */ int queue_trypush(queue_t *queue, void *data) { int rv; if (queue->terminated) { return QUEUE_EOF; /* no more elements ever again */ } rv = thread_mutex_lock(queue->one_big_mutex); if (rv != 0) { return rv; } if (queue_full(queue)) { rv = thread_mutex_unlock(queue->one_big_mutex); return QUEUE_EAGAIN; } queue->data[queue->in] = data; queue->in = (queue->in + 1) % queue->bounds; queue->nelts++; if (queue->empty_waiters) { rv = thread_cond_signal(queue->not_empty); if (rv != 0) { thread_mutex_unlock(queue->one_big_mutex); return rv; } } rv = thread_mutex_unlock(queue->one_big_mutex); return rv; }
// Lock will be held when this returns. // Must be released with a call to queue_push/release() // as soon as possible. Make _sure_ not to sleep inbetween! // (Storing irq flags like this is supposedly incompatible // with Sparc CPU:s. But that only applies for Irq_lock queues.) int queue_back (Queue *queue) { int back; unsigned long flags = 0; QUEUE_DEBUG_RET(0); QUEUE_DEBUG_LOCK_RET(0); LOCKQ(queue, flags); back = queue->head; // Is there actually any space in the queue? #ifndef ATOMIC_LENGTH // (Holding lock, so can't use queue_full.) { int length = back - queue->tail; if (length < 0) length += queue->size; if (length >= queue->size - 1) { back = -1; } } #else if (queue_full(queue)) { back = -1; } #endif queue->flags = flags; return back; }
static void update_frame_v1(struct accuraterip_v1 *v1, unsigned total_pcm_frames, unsigned start_offset, unsigned end_offset, unsigned value) { /*calculate initial checksum*/ if ((v1->index >= start_offset) && (v1->index <= end_offset)) { v1->checksums[0] += (value * v1->index); v1->values_sum += value; } /*store the first (pcm_frame_range - 1) values in initial_values*/ if ((v1->index >= start_offset) && (!queue_full(v1->initial_values))) { queue_push(v1->initial_values, value); } /*store the trailing (pcm_frame_range - 1) values in final_values*/ if ((v1->index > end_offset) && (!queue_full(v1->final_values))) { queue_push(v1->final_values, value); } /*calculate incremental checksums*/ if (v1->index > total_pcm_frames) { const uint32_t initial_value = queue_pop(v1->initial_values); const uint32_t final_value = queue_pop(v1->final_values); const uint32_t initial_value_product = (uint32_t)(start_offset - 1) * initial_value; const uint32_t final_value_product = (uint32_t)end_offset * final_value; v1->checksums[v1->index - total_pcm_frames] = v1->checksums[v1->index - total_pcm_frames - 1] + final_value_product - v1->values_sum - initial_value_product; v1->values_sum -= initial_value; v1->values_sum += final_value; } v1->index++; }
bool enqueue(queue *q, void *value) { if (queue_full(q)) { return false; } q->ele[q->tail] = value; q->tail = (q->tail + 1) % q->size; return true; }
int enqueue (queue_t *buf, int data) { if (queue_full(buf)) { return 0; } else { buf->buffer[buf->head] = data; buf->head = ((buf->head + 1) == QUEUE_SIZE) ? 0 : buf->head + 1; } return 1; }
/*入队*/ void addq(int *rear,element item) { if(*rear == MAX_QUEUE_SIZE-1) /*是否满了*/ { queue_full(); return; } queue[++*rear] = item; }
int enqueue (queue_t *buf, int data) { if(queue_full(buf)) return 0; else { buf->buffer[buf->tail] = data; buf->tail = ((buf->tail +1) == QUEUE_SIZE) ? 0 : buf->tail + 1; } return 1; }
/************************************* * 教材P59:循环队列的入队算法 **************************************/ int queue_append(QueuePtr q, EntryType item){ int status = 1; if (! queue_full(q)){ // 若队列未满 q->entry[q->rear] = item; q->rear = (q->rear + 1) % MAXQUEUE; status = 0; } return status; }
void addq(int front, int *rear, element item) { *rear = (*rear + 1) %MAX_QUEUE_SIZE; if(front == *rear) { queue_full(); return; } queue[*rear] = item; }
// We enqueu by first making sure our queue isn't already full // If it's not full, we add it to the tail, the increment the tail pointer int enqueue(queue_t* cb, int c) { // check for a buffer overrun if (queue_full(cb)) { return 0; // can't write } else { cb->buffer[cb->tail] = c; cb->tail = (cb->tail + 1) % QUEUE_SIZE; } return 1; // write succeeded }
/*negative return value indicates a failure*/ int enqueue_single(struct queue_stub* stub,struct queue_element *ele) { if(queue_full(stub)) return -1; stub->records[stub->rear_ptr].rte_pkt_offset=ele->rte_pkt_offset; stub->records[stub->rear_ptr].rte_data_offset=ele->rte_data_offset; WRITE_MEM_WB();/*this is essential because here we can not guanrantee order of the write operation issues*/ stub->rear_ptr=(stub->rear_ptr+1)%(stub->ele_num+1); return 0; }
static OSKIT_COMDECL asyncio_poll(oskit_asyncio_t *f) { struct char_queue_stream *s = (void *) (f - 1); if (queue_empty(s)) return OSKIT_ASYNCIO_WRITABLE; if (queue_full(s)) return OSKIT_ASYNCIO_READABLE; return OSKIT_ASYNCIO_READABLE|OSKIT_ASYNCIO_WRITABLE; }
void test_queue_full_normal (void) { int *original_queues = initialize(3); original_queues[0] = 1; original_queues[2] = 3; int *target_queues = queue_full(original_queues, 0, 1); TEST_ASSERT_EQUAL(5, getFront()); TEST_ASSERT_EQUAL(1, getRear()); TEST_ASSERT_EQUAL(3, target_queues[0]); TEST_ASSERT_EQUAL(1, target_queues[1]); }
int __libnet_internal__serial_send(struct port *port, const unsigned char *buf, int size) { const unsigned char *p = buf; while (!queue_full(port->send) && (size--)) queue_put(port->send, *p++); enable_thre_int(port->baseaddr); return p - buf; }
void test_queue_front_max (void) { int *original_queues = initialize(3); original_queues[0] = 1; original_queues[1] = 2; int *target_queues = queue_full(original_queues, 1, 2); TEST_ASSERT_EQUAL(5, getFront()); TEST_ASSERT_EQUAL(1, getRear()); TEST_ASSERT_EQUAL(1, target_queues[0]); TEST_ASSERT_EQUAL(2, target_queues[1]); }
struct packet_t *queue_push(struct queue_t *queue){ if (queue_full(queue) == TRUE){ exception("Queue push: ",QUEUE_FULL); return NULL; } int tmp = (*queue).next; ++(*queue).next; (*queue).next %= BUFF_SIZE; ++(*queue).size; return &((*queue).buffer[tmp]); }
void test_queue_front_zero (void) { int *original_queues = initialize(3); original_queues[1] = 2; original_queues[2] = 3; int *target_queues = queue_full(original_queues, 2, 0); TEST_ASSERT_EQUAL(5, getFront()); TEST_ASSERT_EQUAL(1, getRear()); TEST_ASSERT_EQUAL(2, target_queues[0]); TEST_ASSERT_EQUAL(3, target_queues[1]); }
static int queue_add(queue *q, int c) { if (!queue_full(q)) { q->buf[q->head] = c; q->head = queue_next(q->head); return 1; } return 0; }
void enqueue_home(TARGET *t, uint8_t endstop_check, uint8_t endstop_stop_cond) { // don't call this function when the queue is full, but just in case, wait for a move to complete and free up the space for the passed target while (queue_full()) delay(WAITING_DELAY); uint8_t h = mb_head + 1; h &= (MOVEBUFFER_SIZE - 1); DDA* new_movebuffer = &(movebuffer[h]); if (t != NULL) { dda_create(new_movebuffer, t); new_movebuffer->endstop_check = endstop_check; new_movebuffer->endstop_stop_cond = endstop_stop_cond; } else { // it's a wait for temp new_movebuffer->waitfor_temp = 1; new_movebuffer->nullmove = 0; } // make certain all writes to global memory // are flushed before modifying mb_head. MEMORY_BARRIER(); mb_head = h; uint8_t save_reg = SREG; cli(); CLI_SEI_BUG_MEMORY_BARRIER(); uint8_t isdead = (movebuffer[mb_tail].live == 0); MEMORY_BARRIER(); SREG = save_reg; if (isdead) { timer1_compa_deferred_enable = 0; next_move(); if (timer1_compa_deferred_enable) { uint8_t save_reg = SREG; cli(); CLI_SEI_BUG_MEMORY_BARRIER(); TIMSK1 |= MASK(OCIE1A); MEMORY_BARRIER(); SREG = save_reg; } } }
int main (int argc, char** argv) { sim_start(argc, argv); #else int main (void) { #endif init(); // main loop for (;;) { // if queue is full, no point in reading chars- host will just have to wait if (queue_full() == 0) { if (serial_rxchars() != 0) { uint8_t c = serial_popchar(); gcode_parse_char(c); } #ifdef CANNED_CYCLE /** WARNING! This code works on a per-character basis. Any data received over serial WILL be randomly distributed through the canned gcode, and you'll have a big mess! The solution is to either store gcode parser state with each source, or only parse a line at a time. This will take extra ram, and may be out of scope for the Teacup project. If ever print-from-SD card is implemented, these changes may become necessary. */ static uint32_t canned_gcode_pos = 0; gcode_parse_char(pgm_read_byte(&(canned_gcode_P[canned_gcode_pos]))); canned_gcode_pos++; if (pgm_read_byte(&(canned_gcode_P[canned_gcode_pos])) == 0) canned_gcode_pos = 0; #endif /* CANNED_CYCLE */ } clock(); } }
/** Enumerates all entries in queue in order (from first to last item). * Returns false immediately iff proc returns false. * Returns true if every call to proc returned true. */ bool queue_enum(queue_enum_proc proc, void *data) { #if NABTO_APPREQ_QUEUE_SIZE > 1 queue_entry* entry; if (queueLastAdd) { //At least one record in queue. //queue may be full. queue_first_used == queue_next_free means full. UNABTO_ASSERT(!queue_empty()); UNABTO_ASSERT(queue_first_used->state != APPREQ_FREE); entry = queue_first_used; do { // If queue->state == APPREQ_FREE, we have an empty (already answered) slot // in the queue. Don't use (requests are inserted in strict order to start // treatment in the same order). if (entry->state != APPREQ_FREE) { // Let caller determine whether to proceed. if (!(*proc)(&entry->data, data)) { return false; } } queue_inc(entry); } while (entry != queue_next_free); } else { //queue may be empty. queue_first_used == queue_next_free means empty. UNABTO_ASSERT(!queue_full()); entry = queue_first_used; while (entry != queue_next_free) { // If queue->state == APPREQ_FREE, we have an empty (already answered) slot // in the queue. Don't use (requests are inserted in strict order to start // treatment in the same order). if (entry->state != APPREQ_FREE) { // Let caller determine whether to proceed. if (!(*proc)(&entry->data, data)) { return false; } } queue_inc(entry); } } #else //NABTO_APPREQ_QUEUE_SIZE == 1 if (!queue_empty()) { // Let caller determine whether to proceed. if (!(*proc)(&queue[0].data, data)) { return false; } } #endif return true; }
/// this is where it all starts, and ends /// /// just run init(), then run an endless loop where we pass characters from the serial RX buffer to gcode_parse_char() and check the clocks int main (void) { init(); // main loop for (;;) { // if queue is full, no point in reading chars- host will just have to wait if ((serial_rxchars() != 0) && (queue_full() == 0)) { uint8_t c = serial_popchar(); gcode_parse_char(c); } clock(); } }
int main() { queue *q = queue_create(8, NULL); assert(q != NULL); int a = 10; int b = 20; enqueue(q, &a); enqueue(q, &b); printf("size=%d\n", queue_size(q)); assert(queue_full(q) != true); printf("%d\n", *(int *)dequeue(q)); printf("%d\n", *(int *)dequeue(q)); assert(queue_empty(q) != false); queue_release(q); }
static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) { int err; int i; u32 length; struct rxe_recv_wqe *recv_wqe; int num_sge = ibwr->num_sge; if (unlikely(queue_full(rq->queue))) { err = -ENOMEM; goto err1; } if (unlikely(num_sge > rq->max_sge)) { err = -EINVAL; goto err1; } length = 0; for (i = 0; i < num_sge; i++) length += ibwr->sg_list[i].length; recv_wqe = producer_addr(rq->queue); recv_wqe->wr_id = ibwr->wr_id; recv_wqe->num_sge = num_sge; memcpy(recv_wqe->dma.sge, ibwr->sg_list, num_sge * sizeof(struct ib_sge)); recv_wqe->dma.length = length; recv_wqe->dma.resid = length; recv_wqe->dma.num_sge = num_sge; recv_wqe->dma.cur_sge = 0; recv_wqe->dma.sge_offset = 0; /* make sure all changes to the work queue are written before we * update the producer pointer */ smp_wmb(); advance_producer(rq->queue); return 0; err1: return err; }