Beispiel #1
0
int upthread_cond_signal(upthread_cond_t *c)
{
	if(c == NULL)
		return EINVAL;

	mcs_lock_qnode_t qnode = {0};
	mcs_pdr_lock(&c->lock, &qnode);
	upthread_t upthread = STAILQ_FIRST(&c->queue);
	if(upthread)
		STAILQ_REMOVE_HEAD(&c->queue, next);
	mcs_pdr_unlock(&c->lock, &qnode);

	if (upthread != NULL) {
		uthread_runnable((struct uthread*)upthread);
	}
	return 0;
}
Beispiel #2
0
static struct ntb_queue_entry *
ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list)
{
	struct ntb_queue_entry *entry;

	mtx_lock_spin(lock);
	if (STAILQ_EMPTY(list)) {
		entry = NULL;
		goto out;
	}
	entry = STAILQ_FIRST(list);
	STAILQ_REMOVE_HEAD(list, entry);
out:
	mtx_unlock_spin(lock);

	return (entry);
}
static void *server_worker(void *arg)
{
	struct request *req;

	while (1) {
		pthread_mutex_lock(&req_mutex);
		while (STAILQ_EMPTY(&requestq)) {
			pthread_cond_wait(&req_cond, &req_mutex);
		}
		req = STAILQ_FIRST(&requestq);
		STAILQ_REMOVE_HEAD(&requestq, link);
		pthread_mutex_unlock(&req_mutex);
		server_process_request(req);
	}

	return NULL;
}
Beispiel #4
0
/**
 * Flush a link layer packet queue.
 *
 * @param pktq
 */
static void
ble_ll_flush_pkt_queue(struct ble_ll_pkt_q *pktq)
{
    struct os_mbuf_pkthdr *pkthdr;
    struct os_mbuf *om;

    /* FLush all packets from Link layer queues */
    while (STAILQ_FIRST(pktq)) {
        /* Get mbuf pointer from packet header pointer */
        pkthdr = STAILQ_FIRST(pktq);
        om = OS_MBUF_PKTHDR_TO_MBUF(pkthdr);

        /* Remove from queue and free the mbuf */
        STAILQ_REMOVE_HEAD(pktq, omp_next);
        os_mbuf_free_chain(om);
    }
}
Beispiel #5
0
static EP_STAT
_gdp_event_new(gdp_event_t **gevp)
{
	gdp_event_t *gev = NULL;

	ep_thr_mutex_lock(&FreeListMutex);
	if ((gev = STAILQ_FIRST(&FreeList)) != NULL)
		STAILQ_REMOVE_HEAD(&FreeList, queue);
	ep_thr_mutex_unlock(&FreeListMutex);
	if (gev == NULL)
	{
		gev = ep_mem_zalloc(sizeof *gev);
	}
	*gevp = gev;
	ep_dbg_cprintf(Dbg, 48, "_gdp_event_new => %p\n", gev);
	return EP_STAT_OK;
}
Beispiel #6
0
static INLINE LOG_ENTRY *GetQueueEntry(VOID)
{
    LOG_ENTRY *entry;

    EnterCriticalSection(&logLock);

    // Pop the first entry off the queue
    if (!STAILQ_EMPTY(&logQueue)) {
        entry = STAILQ_FIRST(&logQueue);
        STAILQ_REMOVE_HEAD(&logQueue, link);
    } else {
        entry = NULL;
    }

    LeaveCriticalSection(&logLock);
    return entry;
}
Beispiel #7
0
int
rte_log_add_in_history(const char *buf, size_t size)
{
	struct log_history *hist_buf = NULL;
	static const unsigned hist_buf_size = LOG_ELT_SIZE - sizeof(*hist_buf);
	void *obj;

	if (history_enabled == 0)
		return 0;

	rte_spinlock_lock(&log_list_lock);

	/* get a buffer for adding in history */
	if (log_history_size > RTE_LOG_HISTORY) {
		hist_buf = STAILQ_FIRST(&log_history);
		STAILQ_REMOVE_HEAD(&log_history, next);
	}
	else {
		if (rte_mempool_mc_get(log_history_mp, &obj) < 0)
			obj = NULL;
		hist_buf = obj;
	}

	/* no buffer */
	if (hist_buf == NULL) {
		rte_spinlock_unlock(&log_list_lock);
		return -ENOBUFS;
	}

	/* not enough room for msg, buffer go back in mempool */
	if (size >= hist_buf_size) {
		rte_mempool_mp_put(log_history_mp, hist_buf);
		rte_spinlock_unlock(&log_list_lock);
		return -ENOBUFS;
	}

	/* add in history */
	memcpy(hist_buf->buf, buf, size);
	hist_buf->buf[size] = hist_buf->buf[hist_buf_size-1] = '\0';
	hist_buf->size = size;
	STAILQ_INSERT_TAIL(&log_history, hist_buf, next);
	log_history_size++;
	rte_spinlock_unlock(&log_list_lock);

	return 0;
}
Beispiel #8
0
/**
 * Finalize this table
 */
void
fini_swins_tbl(void)
{
	struct swins_map_entry  *n1;

	while ((n1 = STAILQ_FIRST(&swins_map)) != NULL) {
		STAILQ_REMOVE_HEAD(&swins_map, link);
		if (n1->entry != NULL) {
			TAILQ_REMOVE(&swins_tbl, n1->entry, link);
			free(n1->entry->name);
			free(n1->entry);
		}
		free(n1->name);
		free(n1);
	}
	assert(TAILQ_EMPTY(&swins_tbl));
}
Beispiel #9
0
/*
 * This routine will be called from ida_intr in order to queue up more
 * I/O, meaning that we may be in an interrupt context.  Hence, we should
 * not muck around with spl() in this routine.
 */
static void
ida_start(struct ida_softc *ida)
{
	struct ida_qcb *qcb;

	while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) {
		if (ida->cmd.fifo_full(ida))
			break;
		STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe);
		/*
		 * XXX
		 * place the qcb on an active list and set a timeout?
		 */
		qcb->state = QCB_ACTIVE;
		ida->cmd.submit(ida, qcb);
	}
}
Beispiel #10
0
void buf_time_append(struct context *ctx, struct buf_time_tqh *queue,
        struct mbuf *buf, int64_t read_time)
{
    struct buf_time *t;
    if (!STAILQ_EMPTY(&ctx->free_buf_timeq)) {
        t = STAILQ_FIRST(&ctx->free_buf_timeq);
        STAILQ_REMOVE_HEAD(&ctx->free_buf_timeq, next);
        ctx->mstats.free_buf_times--;
    } else {
        t = cv_calloc(1, sizeof(struct buf_time));
    }
    t->ctx = ctx;
    t->buf = buf;
    t->pos = buf->last;
    t->read_time = read_time;
    STAILQ_INSERT_TAIL(queue, t, next);
    ctx->mstats.buf_times++;
}
Beispiel #11
0
void
enum_pairs_free(struct enum_pairs *headp)
{
	struct enum_pair *e;

	if (headp == NULL)
		return;

	while ((e = STAILQ_FIRST(headp)) != NULL) {
		STAILQ_REMOVE_HEAD(headp, link);

		if (e->enum_str)
			free(e->enum_str);
		free(e);
	}

	free(headp);
}
Beispiel #12
0
/* Wait for a thread in the set to exit, and return its data pointer. */
void *
threads_wait(struct threads *tds)
{
	struct thread *td;
	void *data;

	threads_lock(tds);
	while (STAILQ_EMPTY(&tds->threads_dead)) {
		assert(!LIST_EMPTY(&tds->threads_running));
		pthread_cond_wait(&tds->thread_exited, &tds->threads_mtx);
	}
	td = STAILQ_FIRST(&tds->threads_dead);
	STAILQ_REMOVE_HEAD(&tds->threads_dead, deadlist);
	threads_unlock(tds);
	data = td->data;
	free(td);
	return (data);
}
Beispiel #13
0
int
queue_destroy(struct Queue *q)
{
	struct QueueEntry *qi;

	assert(STAILQ_EMPTY(&q->queue));
	while (!STAILQ_EMPTY(&q->pool))
	{
		qi = STAILQ_FIRST(&q->pool);
		STAILQ_REMOVE_HEAD(&q->pool, entries);
		q->pool_length--;
		free(qi);
	}
	AZ(pthread_cond_destroy(&q->cv));
	AZ(pthread_mutex_destroy(&q->mutex));
	free(q);
	return 1;
}
Beispiel #14
0
struct otus_cmd *
otus_get_cmdbuf(struct otus_softc *sc)
{
	struct otus_cmd *uc;

	OTUS_LOCK_ASSERT(sc);

	uc = STAILQ_FIRST(&sc->sc_cmd_inactive);
	if (uc != NULL) {
		STAILQ_REMOVE_HEAD(&sc->sc_cmd_inactive, next);
		OTUS_STAT_DEC(sc, st_cmd_inactive);
	} else
		uc = NULL;
	if (uc == NULL)
		DPRINTF(sc, OTUS_DEBUG_CMDS, "%s: %s\n", __func__,
		    "out of command xmit buffers");
	return (uc);
}
Beispiel #15
0
/**
 * Get rid of all data
 */
void
fini_fs_tbl(void)
{
	struct fs_map_entry *n1;

     	while ((n1 = STAILQ_FIRST(&fs_map)) != NULL) {
		STAILQ_REMOVE_HEAD(&fs_map, link);
		if (n1->entry != NULL) {
			TAILQ_REMOVE(&fs_tbl, n1->entry, link);
			free(n1->entry->mountPoint);
			free(n1->entry->remoteMountPoint);
			free(n1->entry);
		}
		free(n1->a_name);
		free(n1);
     	}
	assert(TAILQ_EMPTY(&fs_tbl));
}
Beispiel #16
0
static int
virtio_net_cpy_to_user(message *m)
{
	/* Hmm, this looks so similar to cpy_from_user... TODO */
	int i, r, size, ivsz;
	int left = MAX_PACK_SIZE;	/* Try copying the whole packet */
	int bytes = 0;
	iovec_s_t iovec[NR_IOREQS];
	struct packet *p;

	/* This should only be called if recv_list has some entries */
	assert(!STAILQ_EMPTY(&recv_list));

	p = STAILQ_FIRST(&recv_list);
	STAILQ_REMOVE_HEAD(&recv_list, next);

	virtio_net_fetch_iovec(iovec, m, m->m_net_netdrv_dl_readv_s.grant,
		 m->m_net_netdrv_dl_readv_s.count);

	for (i = 0; i < m->m_net_netdrv_dl_readv_s.count && left > 0; i++) {
		ivsz = iovec[i].iov_size;
		size = left > ivsz ? ivsz : left;
		r = sys_safecopyto(m->m_source, iovec[i].iov_grant, 0,
				   (vir_bytes) p->vdata + bytes, size);

		if (r != OK)
			panic("%s: copy to %d failed (%d)", name,
							    m->m_source,
							    r);

		left -= size;
		bytes += size;
	}

	if (left != 0)
		dput(("Uhm... left=%d", left));

	/* Clean the packet */
	memset(p->vhdr, 0, sizeof(*p->vhdr));
	memset(p->vdata, 0, MAX_PACK_SIZE);
	STAILQ_INSERT_HEAD(&free_list, p, next);

	return bytes;
}
int ui_msg_flush(int max)
{
   int i = 0;
   int old = 0;
   struct ui_message *msg;
  

   /* sanity checks */
   if (!GBL_UI->initialized)
      return 0;
     
   if (STAILQ_EMPTY(&messages_queue))
	return 0; 

   // don't allow the thread to cancel while holding the ui mutex
   pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old);

   /* the queue is updated by other threads */
   UI_MSG_LOCK;
   

   while ( (msg = STAILQ_FIRST(&messages_queue)) != NULL) {

      /* diplay the message */
      GBL_UI->msg(msg->message);

      STAILQ_REMOVE_HEAD(&messages_queue, msg, next);
      /* free the message */
      SAFE_FREE(msg->message);
      SAFE_FREE(msg);
      
      /* do not display more then 'max' messages */
      if (++i == max)
         break;
   }
   
   UI_MSG_UNLOCK;

   pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old);

   /* returns the number of displayed messages */
   return i;
   
}
Beispiel #18
0
void
packet_free(packet_t *pkt)
{
	exthdr_t *eh;

	if (pkt->refcnt-- > 0)
		return;

	while ((eh = STAILQ_FIRST(&pkt->extlist)) != NULL) {
		STAILQ_REMOVE_HEAD(&pkt->extlist, next);
		free(eh);
	}

	pkt->chan->refcnt--;
	if (pkt->chan->refcnt == 0)
		channel_free(pkt->chan);

	free(pkt);
}
Beispiel #19
0
static struct mbuf *_mbuf_get(struct context *ctx)
{
    struct mbuf *mbuf;
    uint8_t *buf;

    if (!STAILQ_EMPTY(&ctx->free_mbufq)) {
        mbuf = STAILQ_FIRST(&ctx->free_mbufq);
        STAILQ_REMOVE_HEAD(&ctx->free_mbufq, next);
        ctx->nfree_mbufq--;
    } else {
        buf = (uint8_t*)malloc(config.bufsize);
        if (buf == NULL) {
            return NULL;
        }

        mbuf = (struct mbuf *)(buf + ctx->mbuf_offset);
    }
    return mbuf;
}
Beispiel #20
0
static void
smu_doorbell_intr(void *xdev)
{
	device_t smu;
	struct smu_softc *sc;
	int doorbell_ack;

	smu = xdev;
	doorbell_ack = macgpio_read(smu_doorbell);
	sc = device_get_softc(smu);

	if (doorbell_ack != (GPIO_DDR_OUTPUT | GPIO_LEVEL_RO | GPIO_DATA)) 
		return;

	mtx_lock(&sc->sc_mtx);

	if (sc->sc_cur_cmd == NULL)	/* spurious */
		goto done;

	/* Check result. First invalidate the cache again... */
	__asm __volatile("dcbf 0,%0; sync" :: "r"(sc->sc_cmd) : "memory");
	
	bus_dmamap_sync(sc->sc_dmatag, sc->sc_cmd_dmamap, BUS_DMASYNC_POSTREAD);

	sc->sc_cur_cmd->cmd = sc->sc_cmd->cmd;
	sc->sc_cur_cmd->len = sc->sc_cmd->len;
	memcpy(sc->sc_cur_cmd->data, sc->sc_cmd->data,
	    sizeof(sc->sc_cmd->data));
	wakeup(sc->sc_cur_cmd);
	sc->sc_cur_cmd = NULL;
	if (sc->sc_u3)
		powerpc_pow_enabled = 1;

    done:
	/* Queue next command if one is pending */
	if (STAILQ_FIRST(&sc->sc_cmdq) != NULL) {
		sc->sc_cur_cmd = STAILQ_FIRST(&sc->sc_cmdq);
		STAILQ_REMOVE_HEAD(&sc->sc_cmdq, cmd_q);
		smu_send_cmd(smu, sc->sc_cur_cmd);
	}

	mtx_unlock(&sc->sc_mtx);
}
Beispiel #21
0
void dfs_reset_arq(struct ath_dfs_host *dfs)
{
	struct dfs_event *event;

    if (dfs == NULL) {
        A_PRINTF("%s: sc_dfs is NULL\n", __func__);
        return;
    }
	ATH_ARQ_LOCK(dfs);
	ATH_DFSEVENTQ_LOCK(dfs);
	while (!STAILQ_EMPTY(&(dfs->dfs_arq))) {
		event = STAILQ_FIRST(&(dfs->dfs_arq));
		STAILQ_REMOVE_HEAD(&(dfs->dfs_arq), re_list);
		OS_MEMZERO(event, sizeof(struct dfs_event));
		STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), event, re_list);
	}
	ATH_DFSEVENTQ_UNLOCK(dfs);
	ATH_ARQ_UNLOCK(dfs);
}
Beispiel #22
0
static int
cuda_send(void *cookie, int poll, int length, uint8_t *msg)
{
	struct cuda_softc *sc = cookie;
	device_t dev = sc->sc_dev;
	struct cuda_packet *pkt;

	if (sc->sc_state == CUDA_NOTREADY)
		return (-1);

	mtx_lock(&sc->sc_mutex);

	pkt = STAILQ_FIRST(&sc->sc_freeq);
	if (pkt == NULL) {
		mtx_unlock(&sc->sc_mutex);
		return (-1);
	}

	pkt->len = length - 1;
	pkt->type = msg[0];
	memcpy(pkt->data, &msg[1], pkt->len);

	STAILQ_REMOVE_HEAD(&sc->sc_freeq, pkt_q);
	STAILQ_INSERT_TAIL(&sc->sc_outq, pkt, pkt_q);

	/*
	 * If we already are sending a packet, we should bail now that this
	 * one has been added to the queue.
	 */

	if (sc->sc_waiting) {
		mtx_unlock(&sc->sc_mutex);
		return (0);
	}

	cuda_send_outbound(sc);
	mtx_unlock(&sc->sc_mutex);

	if (sc->sc_polling || poll || cold)
		cuda_poll(dev);

	return (0);
}
Beispiel #23
0
int upthread_cond_broadcast(upthread_cond_t *c)
{
	if(c == NULL)
		return EINVAL;

	mcs_lock_qnode_t qnode = {0};
	while(1) {
		mcs_pdr_lock(&c->lock, &qnode);
		upthread_t upthread = STAILQ_FIRST(&c->queue);
		if(upthread)
			STAILQ_REMOVE_HEAD(&c->queue, next);
		else break;
		mcs_pdr_unlock(&c->lock, &qnode);
		uthread_runnable((struct uthread*)upthread);
		memset(&qnode, 0, sizeof(mcs_lock_qnode_t));
	}
	mcs_pdr_unlock(&c->lock, &qnode);
	return 0;
}
Beispiel #24
0
void
otus_wakeup_waiting_list(struct otus_softc *sc)
{

	struct otus_cmd *c;

	OTUS_LOCK_ASSERT(sc);

	while ( (c = STAILQ_FIRST(&sc->sc_cmd_pending)) != NULL) {
		STAILQ_REMOVE_HEAD(&sc->sc_cmd_pending, next);
		OTUS_STAT_DEC(sc, st_cmd_pending);

		/* Wake up the sleepers */
		wakeup(c);

		STAILQ_INSERT_TAIL(&sc->sc_cmd_inactive, c, next);
		OTUS_STAT_INC(sc, st_cmd_inactive);
	}
}
Beispiel #25
0
static int
fw_read_async(struct fw_drv1 *d, struct uio *uio, int ioflag)
{
	int err = 0, s;
	struct fw_xfer *xfer;
	struct fw_bind *fwb;
	struct fw_pkt *fp;
	struct tcode_info *tinfo;

	FW_GLOCK(d->fc);
	while ((xfer = STAILQ_FIRST(&d->rq)) == NULL && err == 0)
		err = msleep(&d->rq, FW_GMTX(d->fc), FWPRI, "fwra", 0);

	if (err != 0) {
		FW_GUNLOCK(d->fc);
		return (err);
	}

	s = splfw();
	STAILQ_REMOVE_HEAD(&d->rq, link);
	FW_GUNLOCK(xfer->fc);
	splx(s);
	fp = &xfer->recv.hdr;
#if 0 /* for GASP ?? */
	if (fc->irx_post != NULL)
		fc->irx_post(fc, fp->mode.ld);
#endif
	tinfo = &xfer->fc->tcode[fp->mode.hdr.tcode];
	err = uiomove(fp, tinfo->hdr_len, uio);
	if (err)
		goto out;
	err = uiomove(xfer->recv.payload, xfer->recv.pay_len, uio);

out:
	/* recycle this xfer */
	fwb = (struct fw_bind *)xfer->sc;
	fw_xfer_unload(xfer);
	xfer->recv.pay_len = PAGE_SIZE;
	FW_GLOCK(xfer->fc);
	STAILQ_INSERT_TAIL(&fwb->xferlist, xfer, link);
	FW_GUNLOCK(xfer->fc);
	return (err);
}
Beispiel #26
0
/* Called from the rl_task context, it acquires the first
 * task from the rl_op_list and calls the relevant functions according to
 * the needed operation. */
void mlx4_en_async_rl_operation(void *context, int pending)
{
	struct mlx4_en_priv			*priv;
	struct mlx4_en_rl_task_list_element	*rl_item;
	enum mlx4_en_rl_operation		rl_operation;
	int					ring_id;
	u8					rate_index;

	priv = context;

	while(pending){
		/* Check for availble operation in the operation list */
		spin_lock(&priv->rl_op_lock);
		if ((rl_item = STAILQ_FIRST(&priv->rl_op_list_head))) {
			ring_id = rl_item->ring_id;
			rl_operation = rl_item->operation;
			rate_index = rl_item->rate_index;
			STAILQ_REMOVE_HEAD(&priv->rl_op_list_head, entry);
			spin_unlock(&priv->rl_op_lock);
			kfree(rl_item);
		}
		else {
			spin_unlock(&priv->rl_op_lock);
			pr_err("No avaliable rate limit item \n");
			return;
		}

		switch (rl_operation){
			case MLX4_EN_RL_ADD:
				mlx4_en_create_rl_res(priv, ring_id, rate_index);
				break;
			case MLX4_EN_RL_DEL:
				mlx4_en_destroy_rl_res(priv, ring_id);
				break;
			case MLX4_EN_RL_MOD:
				mlx4_en_modify_rl_res(priv, ring_id, rate_index);
				break;
			default:
				pr_err("Not supported operation - %d \n", rl_operation);
		}
		pending--;
	}
}
Beispiel #27
0
static struct ntb_queue_entry *
ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from,
    struct ntb_queue_list *to)
{
	struct ntb_queue_entry *entry;

	mtx_lock_spin(lock);
	if (STAILQ_EMPTY(from)) {
		entry = NULL;
		goto out;
	}
	entry = STAILQ_FIRST(from);
	STAILQ_REMOVE_HEAD(from, entry);
	STAILQ_INSERT_TAIL(to, entry, entry);

out:
	mtx_unlock_spin(lock);
	return (entry);
}
Beispiel #28
0
void
dfs_reset_radarq(struct ath_dfs *dfs)
{
   struct dfs_event *event;
   if (dfs == NULL) {
      DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: sc_dfs is NULL", __func__);
      return;
   }
   ATH_DFSQ_LOCK(dfs);
   ATH_DFSEVENTQ_LOCK(dfs);
   while (!STAILQ_EMPTY(&(dfs->dfs_radarq))) {
      event = STAILQ_FIRST(&(dfs->dfs_radarq));
      STAILQ_REMOVE_HEAD(&(dfs->dfs_radarq), re_list);
      OS_MEMZERO(event, sizeof(struct dfs_event));
      STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), event, re_list);
   }
   ATH_DFSEVENTQ_UNLOCK(dfs);
   ATH_DFSQ_UNLOCK(dfs);
}
Beispiel #29
0
/*
 * Prepare the next command buffer on the top of the
 * active stack for transmission to the target.
 *
 * Return 0 if no buffer was found, 1 if a buffer was
 * found and prepared, and < 0 on error.
 */
int
otus_comp_cmdbuf(struct otus_softc *sc)
{
	struct otus_cmd *cmd;

	OTUS_LOCK_ASSERT(sc);
	cmd = STAILQ_FIRST(&sc->sc_cmd_active);
	/* Nothing there? Fall through */
	if (cmd == NULL)
		return (0);
	STAILQ_REMOVE_HEAD(&sc->sc_cmd_active, next);
	OTUS_STAT_DEC(sc, st_cmd_active);
	STAILQ_INSERT_TAIL((cmd->flags & OTUS_CMD_FLAG_READ) ?
	    &sc->sc_cmd_waiting : &sc->sc_cmd_inactive, cmd, next);
	if (cmd->flags & OTUS_CMD_FLAG_READ)
		OTUS_STAT_INC(sc, st_cmd_waiting);
	else
		OTUS_STAT_INC(sc, st_cmd_inactive);
	return (1);
}
Beispiel #30
0
void server_make_iov(struct conn_info *info)
{
    struct command *cmd;
    int64_t t = get_time();

    while (!STAILQ_EMPTY(&info->ready_queue)) {
        if (info->iov.len - info->iov.cursor > CORVUS_IOV_MAX) {
            break;
        }
        cmd = STAILQ_FIRST(&info->ready_queue);
        STAILQ_REMOVE_HEAD(&info->ready_queue, ready_next);
        STAILQ_NEXT(cmd, ready_next) = NULL;

        if (cmd->stale) {
            cmd_free(cmd);
            continue;
        }

        if (info->readonly) {
            cmd_iov_add(&info->iov, (void*)req_readonly, strlen(req_readonly), NULL);
            info->readonly = false;
            info->readonly_sent = true;
        }

        if (cmd->asking) {
            cmd_iov_add(&info->iov, (void*)req_ask, strlen(req_ask), NULL);
        }
        cmd->rep_time[0] = t;
        if (cmd->parent) {
            int64_t parent_rep_start_time = cmd->parent->rep_time[0];
            if (parent_rep_start_time == 0 || parent_rep_start_time > t)
                cmd->parent->rep_time[0] = t;
        }

        if (cmd->prefix != NULL) {
            cmd_iov_add(&info->iov, (void*)cmd->prefix, strlen(cmd->prefix), NULL);
        }
        cmd_create_iovec(cmd->req_buf, &info->iov);
        STAILQ_INSERT_TAIL(&info->waiting_queue, cmd, waiting_next);
    }
}