int qw_pop(void *handle, void **data) { struct qw_node *node = NULL; QW *qw_p = (QW *)handle; *data = NULL; if(!TAILQ_EMPTY(&qw_p->h)) { node = TAILQ_LAST(&qw_p->h, qw_h); *data = node->d; TAILQ_REMOVE(&qw_p->h, node, entries); free(node); } return ((*data != NULL) ? 0 : -1); }
int arm_remove_irqhandler(int irq, void *cookie) { struct intr_event *event; int error; event = intr_events[irq]; arm_mask_irq(irq); error = intr_event_remove_handler(cookie); if (!TAILQ_EMPTY(&event->ie_handlers)) arm_unmask_irq(irq); return (error); }
/* * This function may be called to get next closable socket . * Paramters: None * Returns: pointer to socket to be closed * */ void *app_glue_get_next_closed() { struct socket *sock; void *user_data; if(!TAILQ_EMPTY(&closed_socket_list_head)) { sock = TAILQ_FIRST(&closed_socket_list_head); sock->closed_queue_present = 0; TAILQ_REMOVE(&closed_socket_list_head,sock,closed_queue_entry); if(sock->sk) user_data = sock->sk->sk_user_data; //kernel_close(sock); return user_data; } return NULL; }
static void destroy_cache_queue_policy(struct cache_queue_policy_ *queue_policy) { struct cache_queue_policy_item_ *queue_item; TRACE_IN(destroy_cache_queue_policy); while (!TAILQ_EMPTY(&queue_policy->head)) { queue_item = TAILQ_FIRST(&queue_policy->head); TAILQ_REMOVE(&queue_policy->head, queue_item, entries); cache_queue_policy_destroy_item( (struct cache_policy_item_ *)queue_item); } free(queue_policy); TRACE_OUT(destroy_cache_queue_policy); }
/* * This function may be called to get next acceptable socket . * Paramters: None * Returns: pointer to socket on which to accept a new connection * */ void *app_glue_get_next_listener() { struct socket *sock; if(!TAILQ_EMPTY(&accept_ready_socket_list_head)) { sock = TAILQ_FIRST(&accept_ready_socket_list_head); sock->accept_queue_present = 0; TAILQ_REMOVE(&accept_ready_socket_list_head,sock,accept_queue_entry); if(sock->sk) return sock->sk->sk_user_data; ipaugenblick_log(IPAUGENBLICK_LOG_ERR,"PANIC: socket->sk is NULL\n"); return NULL; } return NULL; }
void fdstart(struct fd_softc *fd) { struct fdc_softc *fdc = device_private(device_parent(fd->sc_dev)); int active = !TAILQ_EMPTY(&fdc->sc_drives); KASSERT(mutex_owned(&fdc->sc_mtx)); /* Link into controller queue. */ fd->sc_active = 1; TAILQ_INSERT_TAIL(&fdc->sc_drives, fd, sc_drivechain); /* If controller not already active, start it. */ if (!active) fdcstart(fdc); }
void test_ofp_meter_band_list_elem_free(void) { int i; int max_cnt = 4; struct meter_band_list band_list; struct meter_band *band; TAILQ_INIT(&band_list); /* data */ for (i = 0; i < max_cnt; i++) { band = (struct meter_band *) malloc(sizeof(struct meter_band)); TAILQ_INSERT_TAIL(&band_list, band, entry); } TEST_ASSERT_EQUAL_MESSAGE(TAILQ_EMPTY(&band_list), false, "not band_list error."); /* Call func.*/ ofp_meter_band_list_elem_free(&band_list); TEST_ASSERT_EQUAL_MESSAGE(TAILQ_EMPTY(&band_list), true, "band_list error."); }
inline void __rem_from_runqueue(runqueue_t *runq, uthread_struct_t *u_elem) { unsigned int uprio, ugroup; uthread_head_t *uhead; uhead = &runq->tq_head; TAILQ_REMOVE(uhead, u_elem, uthread_runq); runq->uthread_tot--; #if 0 /* Find a position in the runq based on priority and group. * Update the masks. */ uprio = u_elem->uthread_priority; ugroup = u_elem->uthread_gid; /* Insert at the tail */ uhead = &runq->prio_array[uprio].group[ugroup]; TAILQ_REMOVE(uhead, u_elem, uthread_runq); /* Update information */ if(TAILQ_EMPTY(uhead)) RESET_BIT(runq->prio_array[uprio].group_mask, ugroup); runq->uthread_tot--; if(!(--(runq->uthread_prio_tot[uprio]))) RESET_BIT(runq->uthread_mask, uprio); if(!(--(runq->uthread_group_tot[ugroup]))) { assert(TAILQ_EMPTY(uhead)); RESET_BIT(runq->uthread_group_mask[ugroup], uprio); } #endif return; }
static void menu_draw(struct menu_ctx *mc, struct menu_q *menuq, struct menu_q *resultq) { struct screen_ctx *sc = mc->sc; struct menu *mi; struct geom xine; int n, xsave, ysave; if (mc->list) { if (TAILQ_EMPTY(resultq) && mc->list) { /* Copy them all over. */ TAILQ_FOREACH(mi, menuq, entry) TAILQ_INSERT_TAIL(resultq, mi, resultentry); mc->listing = 1; } else if (mc->changed) mc->listing = 0; } mc->num = 0; mc->width = 0; mc->height = 0; if (mc->hasprompt) { (void)snprintf(mc->dispstr, sizeof(mc->dispstr), "%s%s%s%s", mc->promptstr, PROMPT_SCHAR, mc->searchstr, PROMPT_ECHAR); mc->width = xu_xft_width(sc->xftfont, mc->dispstr, strlen(mc->dispstr)); mc->height = sc->xftfont->height + 1; mc->num = 1; } TAILQ_FOREACH(mi, resultq, resultentry) { char *text; if (mc->print != NULL) { (*mc->print)(mi, mc->listing); text = mi->print; } else { mi->print[0] = '\0'; text = mi->text; } mc->width = MAX(mc->width, xu_xft_width(sc->xftfont, text, MIN(strlen(text), MENU_MAXENTRY))); mc->height += sc->xftfont->height + 1; mc->num++; }
static void menu_draw(struct screen_ctx *sc, struct menu_ctx *mc, struct menu_q *menuq, struct menu_q *resultq) { struct menu *mi; XineramaScreenInfo *xine; int xmin, xmax, ymin, ymax; int n, dy, xsave, ysave; if (mc->list) { if (TAILQ_EMPTY(resultq) && mc->list) { /* Copy them all over. */ TAILQ_FOREACH(mi, menuq, entry) TAILQ_INSERT_TAIL(resultq, mi, resultentry); mc->listing = 1; } else if (mc->changed) mc->listing = 0; } mc->num = 0; mc->width = 0; dy = 0; if (mc->hasprompt) { (void)snprintf(mc->dispstr, sizeof(mc->dispstr), "%s%s%s", mc->promptstr, mc->searchstr, PROMPT_ECHAR); mc->width = font_width(sc, mc->dispstr, strlen(mc->dispstr)); dy = font_height(sc); mc->num = 1; } TAILQ_FOREACH(mi, resultq, resultentry) { char *text; if (mc->print != NULL) { (*mc->print)(mi, mc->listing); text = mi->print; } else { mi->print[0] = '\0'; text = mi->text; } mc->width = MAX(mc->width, font_width(sc, text, MIN(strlen(text), MENU_MAXENTRY))); dy += font_height(sc); mc->num++; }
ssize_t usdf_msg_send(struct fid_ep *fep, const void *buf, size_t len, void *desc, fi_addr_t dest_addr, void *context) { struct usdf_ep *ep; struct usdf_tx *tx; struct usdf_msg_qe *wqe; struct usdf_domain *udp; uint64_t op_flags; ep = ep_ftou(fep); tx = ep->ep_tx; udp = ep->ep_domain; if (TAILQ_EMPTY(&tx->t.msg.tx_free_wqe)) { return -FI_EAGAIN; } pthread_spin_lock(&udp->dom_progress_lock); wqe = TAILQ_FIRST(&tx->t.msg.tx_free_wqe); TAILQ_REMOVE(&tx->t.msg.tx_free_wqe, wqe, ms_link); wqe->ms_context = context; wqe->ms_iov[0].iov_base = (void *)buf; wqe->ms_iov[0].iov_len = len; wqe->ms_last_iov = 0; wqe->ms_cur_iov = 0; wqe->ms_cur_ptr = buf; wqe->ms_iov_resid = len; wqe->ms_resid = len; wqe->ms_length = len; op_flags = ep->ep_tx->tx_attr.op_flags; wqe->ms_signal_comp = ep->ep_tx_dflt_signal_comp || (op_flags & FI_COMPLETION) ? 1 : 0; /* add send to EP, and add EP to TX list if not present */ TAILQ_INSERT_TAIL(&ep->e.msg.ep_posted_wqe, wqe, ms_link); usdf_msg_ep_ready(ep); pthread_spin_unlock(&udp->dom_progress_lock); usdf_domain_progress(udp); return 0; }
static struct conn * _conn_get(void) { struct conn *conn; if (!TAILQ_EMPTY(&free_connq)) { ASSERT(nfree_connq > 0); conn = TAILQ_FIRST(&free_connq); nfree_connq--; TAILQ_REMOVE(&free_connq, conn, conn_tqe); } else { conn = nc_alloc(sizeof(*conn)); if (conn == NULL) { return NULL; } } STAILQ_INIT(&conn->recv_queue); STAILQ_INIT(&conn->send_queue); conn->recv_queue_bytes = 0; conn->send_queue_bytes = 0; conn->owner = NULL; conn->data = NULL; conn->fd = -1; conn->send_bytes = 0; conn->recv_bytes = 0; conn->events = 0; conn->err = 0; conn->recv_active = 0; conn->recv_ready = 0; conn->send_active = 0; conn->send_ready = 0; conn->eof = 0; conn->done = 0; /* for client conn */ conn->recv = conn_recv; conn->send = conn_send; conn->close = conn_close; return conn; }
static void req_forward(struct context *ctx, struct conn *c_conn, struct msg *msg) { rstatus_t status; struct conn *s_conn; struct server_pool *pool; uint8_t *key; uint32_t keylen; struct keypos *kpos; ASSERT(c_conn->client && !c_conn->proxy); /* enqueue message (request) into client outq, if response is expected */ if (!msg->noreply) { c_conn->enqueue_outq(ctx, c_conn, msg); } pool = c_conn->owner; ASSERT(array_n(msg->keys) > 0); kpos = array_get(msg->keys, 0); key = kpos->start; keylen = (uint32_t)(kpos->end - kpos->start); s_conn = server_pool_conn(ctx, c_conn->owner, key, keylen); if (s_conn == NULL) { req_forward_error(ctx, c_conn, msg); return; } ASSERT(!s_conn->client && !s_conn->proxy); /* enqueue the message (request) into server inq */ if (TAILQ_EMPTY(&s_conn->imsg_q)) { status = event_add_out(ctx->evb, s_conn); if (status != NC_OK) { req_forward_error(ctx, c_conn, msg); s_conn->err = errno; return; } } s_conn->enqueue_inq(ctx, s_conn, msg); req_forward_stats(ctx, s_conn->owner, msg); log_debug(LOG_VERB, "forward from c %d to s %d req %"PRIu64" len %"PRIu32 " type %d with key '%.*s'", c_conn->sd, s_conn->sd, msg->id, msg->mlen, msg->type, keylen, key); }
// 'unprocessed buf': buf is full and has data unprocessed. // // 1. If last buf is nut full, it is returned. // 2. If `unprocessed` is true and the last buf is the unprocessed buf, // the last buf is returned. // 3. Otherwise a new buf is returned. struct mbuf *conn_get_buf(struct connection *conn, bool unprocessed) { struct mbuf *buf = NULL; struct conn_info *info = conn->info; if (!TAILQ_EMPTY(&info->data)) { buf = TAILQ_LAST(&info->data, mhdr); } if (buf == NULL || (unprocessed ? buf->pos : buf->last) >= buf->end) { buf = mbuf_get(conn->ctx); buf->queue = &info->data; TAILQ_INSERT_TAIL(&info->data, buf, next); } return buf; }
/* * 'unprocessed buf': buf is full and has data unprocessed. * * 1. If last buf is nut full, it is returned. * 2. If `unprocessed` is true and the last buf is the unprocessed buf, * the last buf is returned. * 3. Otherwise a new buf is returned. * * `local` means whether to get buf from `info->local_data` or `info->data`. */ struct mbuf *conn_get_buf(struct connection *conn, bool unprocessed, bool local) { struct mbuf *buf = NULL; struct mhdr *queue = local ? &conn->info->local_data : &conn->info->data; if (!TAILQ_EMPTY(queue)) { buf = TAILQ_LAST(queue, mhdr); } if (buf == NULL || (unprocessed ? buf->pos : buf->last) >= buf->end) { buf = mbuf_get(conn->ctx); buf->queue = queue; TAILQ_INSERT_TAIL(queue, buf, next); } return buf; }
void bin_list_del(struct bin_head *head) { while(!LIST_EMPTY(head)) { struct bin *b = LIST_FIRST(head); while(!TAILQ_EMPTY(&b->entries)) { struct bin_entry *be = TAILQ_FIRST(&b->entries); TAILQ_REMOVE(&b->entries, be, list); free(be->path); free(be); } LIST_REMOVE(b, list); free(b->name); free(b); } }
void sentinel_deinit(struct server *sentinel) { if (sentinel == NULL) { return; } ASSERT(TAILQ_EMPTY(&sentinel->s_conn_q) && sentinel->ns_conn_q == 0); string_deinit(&sentinel->name); string_deinit(&sentinel->pname); if (sentinel->addr != NULL) { nc_free(sentinel->addr); } nc_free(sentinel); }
static int nfslock_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct __lock_msg *lm; mtx_lock(&nfslock_mtx); nfslock_isopen = 0; while (!TAILQ_EMPTY(&nfslock_list)) { lm = TAILQ_FIRST(&nfslock_list); /* XXX: answer request */ TAILQ_REMOVE(&nfslock_list, lm, lm_link); free(lm, M_NFSLOCK); } mtx_unlock(&nfslock_mtx); return (0); }
/* * scenario_free -- free the scenario structure and all its content */ void scenario_free(struct scenario *s) { assert(s != NULL); while (!TAILQ_EMPTY(&s->head)) { struct kv *kv = TAILQ_FIRST(&s->head); TAILQ_REMOVE(&s->head, kv, next); kv_free(kv); } free(s->group); free(s->name); free(s->benchmark); free(s); }
ssize_t usdf_msg_recvv(struct fid_ep *fep, const struct iovec *iov, void **desc, size_t count, fi_addr_t src_addr, void *context) { struct usdf_ep *ep; struct usdf_rx *rx; struct usdf_msg_qe *rqe; struct usdf_domain *udp; size_t tot_len; uint64_t op_flags; uint32_t i; ep = ep_ftou(fep); rx = ep->ep_rx; udp = ep->ep_domain; if (TAILQ_EMPTY(&rx->r.msg.rx_free_rqe)) { return -FI_EAGAIN; } pthread_spin_lock(&udp->dom_progress_lock); rqe = usdf_msg_get_rx_rqe(rx); rqe->ms_context = context; tot_len = 0; for (i = 0; i < count; ++i) { rqe->ms_iov[i].iov_base = (void *)iov[i].iov_base; rqe->ms_iov[i].iov_len = iov[i].iov_len; tot_len += iov[i].iov_len; } rqe->ms_last_iov = count - 1; rqe->ms_cur_iov = 0; rqe->ms_cur_ptr = iov[0].iov_base; rqe->ms_iov_resid = iov[0].iov_len; rqe->ms_resid = tot_len; rqe->ms_length = 0; op_flags = ep->ep_rx->rx_attr.op_flags; rqe->ms_signal_comp = ep->ep_rx_dflt_signal_comp || (op_flags & FI_COMPLETION) ? 1 : 0; TAILQ_INSERT_TAIL(&rx->r.msg.rx_posted_rqe, rqe, ms_link); pthread_spin_unlock(&udp->dom_progress_lock); return 0; }
static void config_tag(void) { struct screen *s; struct tag *t; size_t i, n; struct conf_sec *sec, **ks, **mb; char *name, *tmp; int screenid; /* [tags] */ sec = fetch_section_first(NULL, "tags"); ks = fetch_section(sec, "tag"); n = fetch_section_count(ks); /* [mouse] */ if((mb = fetch_section(sec, "mouse"))) { config_mouse_section(&W->tmp_head.tag, mb); free(mb); } /* [tag] */ for(i = 0; i < n; ++i) { name = fetch_opt_first(ks[i], "tag", "name").str; screenid = fetch_opt_first(ks[i], "-1", "screen").num; SLIST_FOREACH(s, &W->h.screen, next) if(screenid == s->id || screenid == -1) { t = tag_new(s, name); t->statusctx = status_new_ctx(NULL, NULL); ISTRDUP(t->statusctx.status, fetch_opt_first(ks[i], "", "statusline").str); if(t->statusctx.status) status_parse(&t->statusctx); } } /* If no tag at all on a screen, add one anyway */ SLIST_FOREACH(s, &W->h.screen, next) if(TAILQ_EMPTY(&s->tags)) tag_new(s, "tag"); free(ks); }
int nif_thread_receive(nif_thread_state* st, nif_thread_message** msg) { enif_mutex_lock(st->lock); while (TAILQ_EMPTY(st->mailbox)) enif_cond_wait(st->cond, st->lock); *msg = TAILQ_FIRST(st->mailbox); TAILQ_REMOVE(st->mailbox, TAILQ_FIRST(st->mailbox), next_entry); enif_mutex_unlock(st->lock); if ((*msg)->function == NULL) return 0; return 1; }
static void * task_thread(void *arg) { struct kore_task *t; struct kore_task_thread *tt = arg; kore_debug("task_thread: #%d starting", tt->idx); pthread_mutex_lock(&(tt->lock)); pthread_mutex_lock(&task_thread_lock); TAILQ_INSERT_TAIL(&task_threads, tt, list); pthread_mutex_unlock(&task_thread_lock); for (;;) { if (TAILQ_EMPTY(&(tt->tasks))) pthread_cond_wait(&(tt->cond), &(tt->lock)); kore_debug("task_thread#%d: woke up", tt->idx); t = TAILQ_FIRST(&(tt->tasks)); TAILQ_REMOVE(&(tt->tasks), t, list); pthread_mutex_unlock(&(tt->lock)); pthread_mutex_lock(&task_thread_lock); TAILQ_REMOVE(&task_threads, tt, list); pthread_mutex_unlock(&task_thread_lock); kore_debug("task_thread#%d: executing %p", tt->idx, t); kore_task_set_state(t, KORE_TASK_STATE_RUNNING); kore_task_set_result(t, t->entry(t)); kore_task_finish(t); pthread_mutex_lock(&task_thread_lock); TAILQ_INSERT_HEAD(&task_threads, tt, list); pthread_mutex_unlock(&task_thread_lock); pthread_mutex_lock(&(tt->lock)); } pthread_exit(NULL); /* NOTREACHED */ return (NULL); }
/** * Move 'cnt' entries from 'srcq' to 'dstq'. * If 'cnt' == -1 all entries will be moved. * Returns the number of entries moved. */ int rd_kafka_q_move_cnt (rd_kafka_q_t *dstq, rd_kafka_q_t *srcq, int cnt, int do_locks) { rd_kafka_op_t *rko; int mcnt = 0; if (do_locks) { mtx_lock(&srcq->rkq_lock); mtx_lock(&dstq->rkq_lock); } if (!dstq->rkq_fwdq && !srcq->rkq_fwdq) { /* Optimization, if 'cnt' is equal/larger than all * items of 'srcq' we can move the entire queue. */ if (cnt == -1 || cnt >= (int)srcq->rkq_qlen) { rd_dassert(TAILQ_EMPTY(&srcq->rkq_q) || srcq->rkq_qlen > 0); TAILQ_CONCAT(&dstq->rkq_q, &srcq->rkq_q, rko_link); mcnt = srcq->rkq_qlen; dstq->rkq_qlen += srcq->rkq_qlen; dstq->rkq_qsize += srcq->rkq_qsize; rd_kafka_q_reset(srcq); } else { while (mcnt < cnt && (rko = TAILQ_FIRST(&srcq->rkq_q))) { TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link); TAILQ_INSERT_TAIL(&dstq->rkq_q, rko, rko_link); srcq->rkq_qlen--; dstq->rkq_qlen++; srcq->rkq_qsize -= rko->rko_len; dstq->rkq_qsize += rko->rko_len; mcnt++; } } } else mcnt = rd_kafka_q_move_cnt(dstq->rkq_fwdq ? dstq->rkq_fwdq:dstq, srcq->rkq_fwdq ? srcq->rkq_fwdq:srcq, cnt, do_locks); if (do_locks) { mtx_unlock(&dstq->rkq_lock); mtx_unlock(&srcq->rkq_lock); } return mcnt; }
static int audit_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct trigger_info *ti; /* Flush the queue of pending trigger events. */ mtx_lock(&audit_trigger_mtx); audit_isopen = 0; while (!TAILQ_EMPTY(&trigger_list)) { ti = TAILQ_FIRST(&trigger_list); TAILQ_REMOVE(&trigger_list, ti, list); free(ti, M_AUDITTRIGGER); } mtx_unlock(&audit_trigger_mtx); return (0); }
void attach_free(struct attach *atr) { struct attach *at; while (!TAILQ_EMPTY(&atr->children)) { at = TAILQ_FIRST(&atr->children); TAILQ_REMOVE(&atr->children, at, entry); attach_free(at); } if (atr->type != NULL) xfree(atr->type); if (atr->name != NULL) xfree(atr->name); xfree(atr); }
static void nvd_controller_fail(void *ctrlr_arg) { struct nvd_controller *ctrlr = ctrlr_arg; struct nvd_disk *disk; while (!TAILQ_EMPTY(&ctrlr->disk_head)) { disk = TAILQ_FIRST(&ctrlr->disk_head); TAILQ_REMOVE(&disk_head, disk, global_tailq); TAILQ_REMOVE(&ctrlr->disk_head, disk, ctrlr_tailq); destroy_geom_disk(disk); free(disk, M_NVD); } TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq); free(ctrlr, M_NVD); }
int _pthread_mutex_destroy(pthread_mutex_t *mutex) { struct pthread *curthread = _get_curthread(); pthread_mutex_t m; int ret = 0; if (mutex == NULL || *mutex == NULL) ret = EINVAL; else { /* Lock the mutex structure: */ THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock); /* * Check to see if this mutex is in use: */ if (((*mutex)->m_owner != NULL) || (!TAILQ_EMPTY(&(*mutex)->m_queue)) || ((*mutex)->m_refcount != 0)) { ret = EBUSY; /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock); } else { /* * Save a pointer to the mutex so it can be free'd * and set the caller's pointer to NULL: */ m = *mutex; *mutex = NULL; /* Unlock the mutex structure: */ THR_LOCK_RELEASE(curthread, &m->m_lock); /* * Free the memory allocated for the mutex * structure: */ MUTEX_ASSERT_NOT_OWNED(m); MUTEX_DESTROY(m); } } /* Return the completion status: */ return (ret); }
/* * get_dbpath -- * Read the path of the database from man.conf and return. */ char * get_dbpath(const char *manconf) { TAG *tp; char *dbpath; config(manconf); tp = gettag("_mandb", 1); if (!tp) return NULL; if (TAILQ_EMPTY(&tp->entrylist)) return NULL; dbpath = TAILQ_LAST(&tp->entrylist, tqh)->s; return dbpath; }
/** * Finalize this table */ void fini_swins_tbl(void) { struct swins_map_entry *n1; while ((n1 = STAILQ_FIRST(&swins_map)) != NULL) { STAILQ_REMOVE_HEAD(&swins_map, link); if (n1->entry != NULL) { TAILQ_REMOVE(&swins_tbl, n1->entry, link); free(n1->entry->name); free(n1->entry); } free(n1->name); free(n1); } assert(TAILQ_EMPTY(&swins_tbl)); }