static int pipestat(Chan *c, uchar *db, int n) { Pipe *p; Dir dir; p = c->aux; switch(NETTYPE(c->qid.path)){ case Qdir: devdir(c, c->qid, ".", 0, eve, DMDIR|0555, &dir); break; case Qdata0: devdir(c, c->qid, "data", qlen(p->q[0]), eve, p->perm, &dir); break; case Qdata1: devdir(c, c->qid, "data1", qlen(p->q[1]), eve, p->perm, &dir); break; default: panic("pipestat"); } n = convD2M(&dir, db, n); if(n < BIT16SZ) error(Eshortstat); return n; }
static int pipegen(Chan *c, char *name, Dirtab *tab, int ntab, int i, Dir *dp) { int id, len; Qid qid; Pipe *p; USED(name); if(i == DEVDOTDOT){ devdir(c, c->qid, "#|", 0, eve, 0555, dp); return 1; } i++; /* skip . */ if(tab==0 || i>=ntab) return -1; tab += i; p = c->aux; switch(NETTYPE(tab->qid.path)){ case Qdata0: len = qlen(p->q[0]); break; case Qdata1: len = qlen(p->q[1]); break; default: len = tab->length; break; } id = NETID(c->qid.path); qid.path = NETQID(id, tab->qid.path); qid.vers = 0; qid.type = QTFILE; devdir(c, qid, tab->name, len, eve, tab->perm, dp); return 1; }
static int pipegen(Chan *c, char*, Dirtab *tab, int ntab, int i, Dir *dp) { Qid q; int len; Pipe *p; if(i == DEVDOTDOT){ devdir(c, c->qid, "#|", 0, eve, DMDIR|0555, dp); return 1; } i++; /* skip . */ if(tab==0 || i>=ntab) return -1; tab += i; p = c->aux; switch((ulong)tab->qid.path){ case Qdata0: len = qlen(p->q[0]); break; case Qdata1: len = qlen(p->q[1]); break; default: len = tab->length; break; } mkqid(&q, NETQID(NETID(c->qid.path), tab->qid.path), 0, QTFILE); devdir(c, q, tab->name, len, eve, p->perm, dp); return 1; }
static long regressread(struct chan *c, void *va, long n, int64_t off) { uint64_t w, *bp; char *a, *ea; uintptr_t offset = off; uint64_t pc; int snp_ret, ret = 0; switch((int)c->qid.path){ case Monitordirqid: n = devdirread(c, va, n, regresstab, ARRAY_SIZE(regresstab), devgen); break; case Monitorctlqid: n = readstr(off, va, n, ctlcommands); break; case Monitordataqid: if (regress.monitor) { printd("monitordataqid: regress.monitor %p len %p\n", regress.monitor, qlen(kprof.monitor)); if (qlen(regress.monitor) > 0) n = qread(regress.monitor, va, n); else n = 0; } else error(EFAIL, "no monitor queue"); break; default: n = 0; break; } return n; }
static int pipestat(struct chan *c, uint8_t * db, int n) { Pipe *p; struct dir dir; struct dirtab *tab; int perm; p = c->aux; tab = p->pipedir; switch (NETTYPE(c->qid.path)) { case Qdir: devdir(c, c->qid, ".", 0, eve, DMDIR | 0555, &dir); break; case Qdata0: perm = tab[1].perm; perm |= qreadable(p->q[0]) ? DMREADABLE : 0; perm |= qwritable(p->q[0]) ? DMWRITABLE : 0; devdir(c, c->qid, tab[1].name, qlen(p->q[0]), eve, perm, &dir); break; case Qdata1: perm = tab[2].perm; perm |= qreadable(p->q[1]) ? DMREADABLE : 0; perm |= qwritable(p->q[1]) ? DMWRITABLE : 0; devdir(c, c->qid, tab[2].name, qlen(p->q[1]), eve, perm, &dir); break; default: panic("pipestat"); } n = convD2M(&dir, db, n); if (n < BIT16SZ) error(ENODATA, ERROR_FIXME); return n; }
static void tcq_purgeq(struct tcq_if *tif, struct tcq_class *cl, u_int32_t flow, u_int32_t *packets, u_int32_t *bytes) { struct ifclassq *ifq = tif->tif_ifq; u_int32_t cnt = 0, len = 0, qlen; IFCQ_LOCK_ASSERT_HELD(ifq); if ((qlen = qlen(&cl->cl_q)) == 0) goto done; /* become regular mutex before freeing mbufs */ IFCQ_CONVERT_LOCK(ifq); #if CLASSQ_RIO if (q_is_rio(&cl->cl_q)) rio_purgeq(cl->cl_rio, &cl->cl_q, flow, &cnt, &len); else #endif /* CLASSQ_RIO */ #if CLASSQ_RED if (q_is_red(&cl->cl_q)) red_purgeq(cl->cl_red, &cl->cl_q, flow, &cnt, &len); else #endif /* CLASSQ_RED */ #if CLASSQ_BLUE if (q_is_blue(&cl->cl_q)) blue_purgeq(cl->cl_blue, &cl->cl_q, flow, &cnt, &len); else #endif /* CLASSQ_BLUE */ if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len); else _flushq_flow(&cl->cl_q, flow, &cnt, &len); if (cnt > 0) { VERIFY(qlen(&cl->cl_q) == (qlen - cnt)); PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len); IFCQ_DROP_ADD(ifq, cnt, len); VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0); IFCQ_LEN(ifq) -= cnt; if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d " "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n", if_name(TCQIF_IFP(tif)), tcq_style(tif), cl->cl_handle, cl->cl_pri, qlen, qlen(&cl->cl_q), cnt, len, flow); } } done: if (packets != NULL) *packets = cnt; if (bytes != NULL) *bytes = len; }
static int udpstate(Conv *c, char *state, int n) { return snprint(state, n, "%s qin %d qout %d\n", c->inuse ? "Open" : "Closed", c->rq ? qlen(c->rq) : 0, c->wq ? qlen(c->wq) : 0 ); }
int codel_add_altq(struct ifnet *ifp, struct pf_altq *a) { struct codel_if *cif; struct codel_opts *opts; if (ifp == NULL) return (EINVAL); if (!ALTQ_IS_READY(&ifp->if_snd)) return (ENODEV); opts = &a->pq_u.codel_opts; cif = malloc(sizeof(struct codel_if), M_DEVBUF, M_NOWAIT | M_ZERO); if (cif == NULL) return (ENOMEM); cif->cif_bandwidth = a->ifbandwidth; cif->cif_ifq = &ifp->if_snd; cif->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (cif->cl_q == NULL) { free(cif, M_DEVBUF); return (ENOMEM); } if (a->qlimit == 0) a->qlimit = 50; /* use default. */ qlimit(cif->cl_q) = a->qlimit; qtype(cif->cl_q) = Q_CODEL; qlen(cif->cl_q) = 0; qsize(cif->cl_q) = 0; if (opts->target == 0) opts->target = 5; if (opts->interval == 0) opts->interval = 100; cif->codel.params.target = machclk_freq * opts->target / 1000; cif->codel.params.interval = machclk_freq * opts->interval / 1000; cif->codel.params.ecn = opts->ecn; cif->codel.stats.maxpacket = 256; cif->cl_stats.qlength = qlen(cif->cl_q); cif->cl_stats.qlimit = qlimit(cif->cl_q); /* keep the state in pf_altq */ a->altq_disc = cif; return (0); }
static void uartsetlength(int i) { Uart *p; if(i > 0){ p = uart[i]; if(p && p->opens && p->iq) uartdir[1+3*i].length = qlen(p->iq); } else for(i = 0; i < uartnuart; i++){ p = uart[i]; if(p && p->opens && p->iq) uartdir[1+3*i].length = qlen(p->iq); } }
int consactive(void) { if(serialoq) return qlen(serialoq) > 0; return 0; }
/* ---------------------------------------------------------------------- */ void intr_print_info(void) { printf("sigio_blocked = %d\n", sigio_blocked); printf("num_block_sigio = %d\n", num_block_sigio); printf("num_unblock_sigio = %d\n", num_unblock_sigio); printf("new_connections_on = %d\n", new_connections_on); printf("intr_depth = %d\n", intr_depth); #ifdef ONE_LISTENER #ifdef TBB_QLENS printf("qlen_listenq = %d\n", qlen_listenq(server_sd)); printf(" qlen_young = %d qlen = %d\n", qlen_young(server_sd), qlen(server_sd)); #endif /* TBB_QLENS */ #endif /* ONE_LISTENER */ #ifdef SEND #ifdef ARRAY_OF_BUFS send_print_info_array(); #else send_print_info(); #endif /* ARRAY_OF_BUFS */ #endif /* SEND */ printf("-------------------------------------------------------------\n"); }
int codel_addq(struct codel *c, class_queue_t *q, struct mbuf *m) { struct m_tag *mtag; uint64_t *enqueue_time; if (qlen(q) < qlimit(q)) { mtag = m_tag_locate(m, MTAG_CODEL, 0, NULL); if (mtag == NULL) mtag = m_tag_alloc(MTAG_CODEL, 0, sizeof(uint64_t), M_NOWAIT); if (mtag == NULL) { m_freem(m); return (-1); } enqueue_time = (uint64_t *)(mtag + 1); *enqueue_time = read_machclk(); m_tag_prepend(m, mtag); _addq(q, m); return (0); } c->drop_overlimit++; m_freem(m); return (-1); }
int ethertxpkt(int ctlrno, Etherpkt *pkt, int len, int) { Ctlr *ctlr; Block *b; int s; if((ctlr = attach(ctlrno)) == 0) return 0; if(qlen(ctlr->oq) > 16*1024){ print("ether%d: tx queue full\n", ctlrno); return 0; } b = iallocb(sizeof(Etherpkt)); memmove(b->wp, pkt, len); memmove(((Etherpkt*)b->wp)->s, ctlr->card.ea, Eaddrlen); b->wp += len; qbwrite(ctlr->oq, b); s = splhi(); (*ctlr->card.transmit)(ctlr); splx(s); return 1; }
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) { struct usb_request *req; unsigned long flags; int req_cnt = 0; /* fill unused rxq slots with some skb */ spin_lock_irqsave(&dev->req_lock, flags); while (!list_empty(&dev->rx_reqs)) { /* break the nexus of continuous completion and re-submission*/ if (++req_cnt > qlen(dev->gadget)) break; req = container_of(dev->rx_reqs.next, struct usb_request, list); list_del_init(&req->list); spin_unlock_irqrestore(&dev->req_lock, flags); if (rx_submit(dev, req, gfp_flags) < 0) { spin_lock_irqsave(&dev->req_lock, flags); list_add(&req->list, &dev->rx_reqs); spin_unlock_irqrestore(&dev->req_lock, flags); defer_kevent(dev, WORK_RX_MEMORY); return; } spin_lock_irqsave(&dev->req_lock, flags); } spin_unlock_irqrestore(&dev->req_lock, flags); }
/* copy the stats info in rm_class to class_states_t */ static void get_class_stats(class_stats_t *statsp, struct rm_class *cl) { statsp->xmit_cnt = cl->stats_.xmit_cnt; statsp->drop_cnt = cl->stats_.drop_cnt; statsp->over = cl->stats_.over; statsp->borrows = cl->stats_.borrows; statsp->overactions = cl->stats_.overactions; statsp->delays = cl->stats_.delays; statsp->depth = cl->depth_; statsp->priority = cl->pri_; statsp->maxidle = cl->maxidle_; statsp->minidle = cl->minidle_; statsp->offtime = cl->offtime_; statsp->qmax = qlimit(cl->q_); statsp->ns_per_byte = cl->ns_per_byte_; statsp->wrr_allot = cl->w_allotment_; statsp->qcnt = qlen(cl->q_); statsp->avgidle = cl->avgidle_; statsp->qtype = qtype(cl->q_); #ifdef ALTQ_RED if (q_is_red(cl->q_)) red_getstats(cl->red_, &statsp->red[0]); #endif #ifdef ALTQ_RIO if (q_is_rio(cl->q_)) rio_getstats((rio_t *)cl->red_, &statsp->red[0]); #endif }
static int priq_addq(struct priq_class *cl, struct mbuf *m) { #ifdef ALTQ_RIO if (q_is_rio(cl->cl_q)) return rio_addq((rio_t *)cl->cl_red, cl->cl_q, m, cl->cl_pktattr); #endif #ifdef ALTQ_RED if (q_is_red(cl->cl_q)) return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr); #endif if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) { m_freem(m); return (-1); } if (cl->cl_flags & PRCF_CLEARDSCP) write_dsfield(m, cl->cl_pktattr, 0); _addq(cl->cl_q, m); return (0); }
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) { struct usb_request *req; unsigned long flags; int req_cnt = 0; spin_lock_irqsave(&dev->req_lock, flags); while (!list_empty(&dev->rx_reqs)) { if (++req_cnt > qlen(dev->gadget)) break; req = container_of(dev->rx_reqs.next, struct usb_request, list); list_del_init(&req->list); spin_unlock_irqrestore(&dev->req_lock, flags); if (rx_submit(dev, req, gfp_flags) < 0) { spin_lock_irqsave(&dev->req_lock, flags); list_add(&req->list, &dev->rx_reqs); spin_unlock_irqrestore(&dev->req_lock, flags); defer_kevent(dev, WORK_RX_MEMORY); return; } spin_lock_irqsave(&dev->req_lock, flags); } spin_unlock_irqrestore(&dev->req_lock, flags); }
static int consactive(void) { if(printq) return qlen(printq) > 0; return 0; }
static int uartdrained(void* arg) { Uart *p; p = arg; return qlen(p->oq) == 0 && p->op == p->oe; }
static int ip3gen(Chan *c, int i, Dir *dp) { Qid q; Conv *cv; char *p; cv = ipfs[c->dev]->p[PROTO(c->qid)]->conv[CONV(c->qid)]; if(cv->owner == nil) kstrdup(&cv->owner, eve); mkqid(&q, QID(PROTO(c->qid), CONV(c->qid), i), 0, QTFILE); switch(i) { default: return -1; case Qctl: devdir(c, q, "ctl", 0, cv->owner, cv->perm, dp); return 1; case Qdata: devdir(c, q, "data", qlen(cv->rq), cv->owner, cv->perm, dp); return 1; case Qerr: devdir(c, q, "err", qlen(cv->eq), cv->owner, cv->perm, dp); return 1; case Qlisten: devdir(c, q, "listen", 0, cv->owner, cv->perm, dp); return 1; case Qlocal: p = "local"; break; case Qremote: p = "remote"; break; case Qsnoop: if(strcmp(cv->p->name, "ipifc") != 0) return -1; devdir(c, q, "snoop", qlen(cv->sq), cv->owner, 0400, dp); return 1; case Qstatus: p = "status"; break; } devdir(c, q, p, 0, cv->owner, 0444, dp); return 1; }
Intervald Sphere::m_rlrp(Intervald t) { double* m_rold = (double *) malloc(qlen() * sizeof(double)); getqold(m_rold); // m_r resides in q[3] = m_dold[3] Intervald retval = t * (m_r - m_rold[3]) + Intervald(m_r); delete m_rold; return retval; }
static int regressstat(struct chan *c, uint8_t *db, int n) { if (regress.monitor) regresstab[Monitordataqid].length = qlen(regress.monitor); else regresstab[Monitordataqid].length = 0; return devstat(c, db, n, regresstab, ARRAY_SIZE(regresstab), devgen); }
/** * Convenience function for the calculation of a LRP over a time interval * for a particular m_x. We use the "current" and the "last" coefficients * to calculate an interval of the coefficients over a given time interval. * This is used for 4D critical point finding. */ Box3d Sphere::m_xlrp(Intervald t) { double* m_xold = (double *) malloc(qlen() * sizeof(double)); Box3d retval; // m_x resides in q[0],q[1],q[2] = m_xold[0..2] for (int i = 0; i < 3; i++) retval[i] = t * (m_x[i] - m_xold[i]) + Intervald(m_xold[i]); delete m_xold; return retval; }
static void priq_purgeq(struct priq_class *cl) { struct mbuf *m; if (qempty(cl->cl_q)) return; while ((m = _getq(cl->cl_q)) != NULL) { PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m)); m_freem(m); } KKASSERT(qlen(cl->cl_q) == 0); }
/* Throw away the next 'len' bytes in the queue returning the number actually * discarded. * * If the bytes are in the queue, then they must be discarded. The only time to * return less than len is if the q itself has less than len bytes. */ size_t qdiscard(struct queue *q, size_t len) { struct block *blist; size_t removed_amt; size_t sofar = 0; /* This is racy. There could be multiple qdiscarders or other consumers, * where the consumption could be interleaved. */ while (qlen(q) && len) { blist = __qbread(q, len, 0, MEM_WAIT); removed_amt = freeblist(blist); sofar += removed_amt; len -= removed_amt; } return sofar; }
//When loading, increase degree if number of parameters exceed qlen bool Algebraic::readImplicit(std::ifstream &file,bool verbose) { std::vector<double> params; readParameters(file,params); if(params.size() != qlen()) { //params doesn't match qlen, change degree int newdegree = 0; while (Algebraic::coefficients(newdegree) < (int)params.size()) newdegree++; degree(newdegree); } setq(params); return true; }
static void i8250kick(Uart* uart) { int i; Ctlr *ctlr; if(/* uart->cts == 0 || */ uart->blocked) return; if(!normalprint) { /* early */ if (uart->op < uart->oe) emptyoutstage(uart, uart->oe - uart->op); while ((i = uartstageoutput(uart)) > 0) emptyoutstage(uart, i); return; } /* nothing more to send? then disable xmit intr */ ctlr = uart->regs; if (uart->op >= uart->oe && qlen(uart->oq) == 0 && csr8r(ctlr, Lsr) & Temt) { ctlr->sticky[Ier] &= ~Ethre; csr8w(ctlr, Ier, 0); return; } /* * 128 here is an arbitrary limit to make sure * we don't stay in this loop too long. If the * chip's output queue is longer than 128, too * bad -- presotto */ for(i = 0; i < 128; i++){ if(!(csr8r(ctlr, Lsr) & Thre)) break; if(uart->op >= uart->oe && uartstageoutput(uart) == 0) break; csr8o(ctlr, Thr, *uart->op++); /* start tx */ ctlr->sticky[Ier] |= Ethre; csr8w(ctlr, Ier, 0); /* intr when done */ } }
static void get_class_stats(struct priq_classstats *sp, struct priq_class *cl) { sp->class_handle = cl->cl_handle; sp->qlength = qlen(cl->cl_q); sp->qlimit = qlimit(cl->cl_q); sp->period = cl->cl_period; sp->xmitcnt = cl->cl_xmitcnt; sp->dropcnt = cl->cl_dropcnt; sp->qtype = qtype(cl->cl_q); #ifdef ALTQ_RED if (q_is_red(cl->cl_q)) red_getstats(cl->cl_red, &sp->red[0]); #endif #ifdef ALTQ_RIO if (q_is_rio(cl->cl_q)) rio_getstats((rio_t *)cl->cl_red, &sp->red[0]); #endif }
static int priq_stat_sc(struct priq_if *pif, cqrq_stat_sc_t *sr) { struct ifclassq *ifq = pif->pif_ifq; struct priq_class *cl; u_int32_t i; IFCQ_LOCK_ASSERT_HELD(ifq); VERIFY(sr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(sr->sc)); i = MBUF_SCIDX(sr->sc); VERIFY(i < IFCQ_SC_MAX); cl = ifq->ifcq_disc_slots[i].cl; sr->packets = qlen(&cl->cl_q); sr->bytes = qsize(&cl->cl_q); return (0); }
int priq_get_class_stats(struct priq_if *pif, u_int32_t qid, struct priq_classstats *sp) { struct priq_class *cl; IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq); if ((cl = priq_clh_to_clp(pif, qid)) == NULL) return (EINVAL); sp->class_handle = cl->cl_handle; sp->priority = cl->cl_pri; sp->qlength = qlen(&cl->cl_q); sp->qlimit = qlimit(&cl->cl_q); sp->period = cl->cl_period; sp->xmitcnt = cl->cl_xmitcnt; sp->dropcnt = cl->cl_dropcnt; sp->qtype = qtype(&cl->cl_q); sp->qstate = qstate(&cl->cl_q); #if CLASSQ_RED if (q_is_red(&cl->cl_q)) red_getstats(cl->cl_red, &sp->red[0]); #endif /* CLASSQ_RED */ #if CLASSQ_RIO if (q_is_rio(&cl->cl_q)) rio_getstats(cl->cl_rio, &sp->red[0]); #endif /* CLASSQ_RIO */ #if CLASSQ_BLUE if (q_is_blue(&cl->cl_q)) blue_getstats(cl->cl_blue, &sp->blue); #endif /* CLASSQ_BLUE */ if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) sfb_getstats(cl->cl_sfb, &sp->sfb); return (0); }