odp_buffer_hdr_t *queue_deq(queue_entry_t *queue) { odp_buffer_hdr_t *buf_hdr; if (LOAD_PTR(queue->s.head) == NULL) return NULL; LOCK(queue); buf_hdr = LOAD_PTR(queue->s.head); if (buf_hdr == NULL) { UNLOCK(queue); return NULL; } INVALIDATE(buf_hdr); STORE_PTR(queue->s.head, buf_hdr->next); if (buf_hdr->next == NULL) { /* Queue is now empty */ STORE_PTR(queue->s.tail, NULL); if (LOAD_S32(queue->s.status) == QUEUE_STATUS_SCHED) STORE_S32(queue->s.status, QUEUE_STATUS_NOTSCHED); } buf_hdr->next = NULL; UNLOCK(queue); return buf_hdr; }
int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr) { int sched = 0; LOCK(queue); int status = LOAD_S32(queue->s.status); if (odp_unlikely(status < QUEUE_STATUS_READY)) { UNLOCK(queue); ODP_ERR("Bad queue status\n"); return -1; } if (LOAD_PTR(queue->s.head) == NULL) { /* Empty queue */ STORE_PTR(queue->s.head, buf_hdr); STORE_PTR(queue->s.tail, buf_hdr); buf_hdr->next = NULL; } else { STORE_PTR(((typeof(queue->s.tail))LOAD_PTR(queue->s.tail))->next, buf_hdr); STORE_PTR(queue->s.tail, buf_hdr); buf_hdr->next = NULL; } if (status == QUEUE_STATUS_NOTSCHED) { STORE_S32(queue->s.status, QUEUE_STATUS_SCHED); sched = 1; /* retval: schedule queue */ } UNLOCK(queue); /* Add queue to scheduling */ if (sched) schedule_queue(queue); return 0; }
int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num) { odp_buffer_hdr_t *hdr; int i; LOCK(queue); int status = LOAD_S32(queue->s.status); if (odp_unlikely(status < QUEUE_STATUS_READY)) { /* Bad queue, or queue has been destroyed. * Scheduler finalizes queue destroy after this. */ UNLOCK(queue); return -1; } hdr = LOAD_PTR(queue->s.head); if (hdr == NULL) { /* Already empty queue */ if (status == QUEUE_STATUS_SCHED) STORE_S32(queue->s.status, QUEUE_STATUS_NOTSCHED); UNLOCK(queue); return 0; } for (i = 0; i < num && hdr; i++) { INVALIDATE(hdr); buf_hdr[i] = hdr; hdr = hdr->next; buf_hdr[i]->next = NULL; } STORE_PTR(queue->s.head, hdr); if (hdr == NULL) { /* Queue is now empty */ STORE_PTR(queue->s.tail, NULL); } UNLOCK(queue); return i; }
static void ins_long P1(long, l) { if (prog_code + 8 > prog_code_max) { mem_block_t *mbp = &mem_block[A_PROGRAM]; UPDATE_PROGRAM_SIZE; realloc_mem_block(mbp); prog_code = mbp->block + mbp->current_size; prog_code_max = mbp->block + mbp->max_size; } STORE_PTR(prog_code, l); }
static void timer_init(odp_timer *tim, tick_buf_t *tb, odp_queue_t _q, void *_up) { tim->queue = _q; tim->user_ptr = _up; STORE_PTR(tb->tmo_buf, ODP_BUFFER_INVALID); /* All pad fields need a defined and constant value */ /* Release the timer by setting timer state to inactive */ _odp_atomic_u64_store_mm(&tb->exp_tck, TMO_INACTIVE, _ODP_MEMMODEL_RLS); }
int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num) { int sched = 0; int i; odp_buffer_hdr_t *tail; for (i = 0; i < num - 1; i++) buf_hdr[i]->next = buf_hdr[i+1]; tail = buf_hdr[num-1]; buf_hdr[num-1]->next = NULL; LOCK(queue); int status = LOAD_S32(queue->s.status); if (odp_unlikely(status < QUEUE_STATUS_READY)) { UNLOCK(queue); ODP_ERR("Bad queue status\n"); return -1; } /* Empty queue */ if (LOAD_PTR(queue->s.head) == NULL) STORE_PTR(queue->s.head, buf_hdr[0]); else STORE_PTR(((typeof(queue->s.tail))LOAD_PTR(queue->s.tail))->next, buf_hdr[0]); STORE_PTR(queue->s.tail, tail); if (status == QUEUE_STATUS_NOTSCHED) { STORE_PTR(queue->s.status, QUEUE_STATUS_SCHED); sched = 1; /* retval: schedule queue */ } UNLOCK(queue); /* Add queue to scheduling */ if (sched) schedule_queue(queue); return num; /* All events enqueued */ }