/* * priq_enqueue is an enqueue function to be registered to * (*altq_enqueue) in struct ifaltq. */ static int priq_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, struct altq_pktattr *pktattr) { struct ifaltq *ifq = ifsq->ifsq_altq; struct priq_if *pif = (struct priq_if *)ifq->altq_disc; struct priq_class *cl; int error; int len; if (ifsq_get_index(ifsq) != PRIQ_SUBQ_INDEX) { /* * Race happened, the unrelated subqueue was * picked during the packet scheduler transition. */ ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); m_freem(m); return ENOBUFS; } crit_enter(); /* grab class set by classifier */ if ((m->m_flags & M_PKTHDR) == 0) { /* should not happen */ if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n"); m_freem(m); error = ENOBUFS; goto done; } if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE) cl = clh_to_clp(pif, m->m_pkthdr.pf.qid); else cl = NULL; if (cl == NULL) { cl = pif->pif_default; if (cl == NULL) { m_freem(m); error = ENOBUFS; goto done; } } cl->cl_pktattr = NULL; len = m_pktlen(m); if (priq_addq(cl, m) != 0) { /* drop occurred. mbuf was freed in priq_addq. */ PKTCNTR_ADD(&cl->cl_dropcnt, len); error = ENOBUFS; goto done; } ifsq->ifq_len++; error = 0; done: crit_exit(); return (error); }
struct mbuf * tbr_dequeue(struct ifaltq_subque *ifsq, int op) { struct ifaltq *ifq = ifsq->ifsq_altq; struct tb_regulator *tbr; struct mbuf *m; int64_t interval; uint64_t now; if (ifsq_get_index(ifsq) != ALTQ_SUBQ_INDEX_DEFAULT) { /* * Race happened, the unrelated subqueue was * picked during the packet scheduler transition. */ ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); return NULL; } crit_enter(); tbr = ifq->altq_tbr; if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) { /* if this is a remove after poll, bypass tbr check */ } else { /* update token only when it is negative */ if (tbr->tbr_token <= 0) { now = read_machclk(); interval = now - tbr->tbr_last; if (interval >= tbr->tbr_filluptime) tbr->tbr_token = tbr->tbr_depth; else { tbr->tbr_token += interval * tbr->tbr_rate; if (tbr->tbr_token > tbr->tbr_depth) tbr->tbr_token = tbr->tbr_depth; } tbr->tbr_last = now; } /* if token is still negative, don't allow dequeue */ if (tbr->tbr_token <= 0) { crit_exit(); return (NULL); } } if (ifq_is_enabled(ifq)) m = (*ifsq->ifsq_dequeue)(ifsq, op); else m = ifsq_classic_dequeue(ifsq, op); if (m != NULL && op == ALTDQ_REMOVE) tbr->tbr_token -= TBR_SCALE(m_pktlen(m)); tbr->tbr_lastop = op; crit_exit(); return (m); }
/* * priq_dequeue is a dequeue function to be registered to * (*altq_dequeue) in struct ifaltq. * * note: ALTDQ_POLL returns the next packet without removing the packet * from the queue. ALTDQ_REMOVE is a normal dequeue operation. * ALTDQ_REMOVE must return the same packet if called immediately * after ALTDQ_POLL. */ static struct mbuf * priq_dequeue(struct ifaltq_subque *ifsq, struct mbuf *mpolled, int op) { struct ifaltq *ifq = ifsq->ifsq_altq; struct priq_if *pif = (struct priq_if *)ifq->altq_disc; struct priq_class *cl; struct mbuf *m; int pri; if (ifsq_get_index(ifsq) != PRIQ_SUBQ_INDEX) { /* * Race happened, the unrelated subqueue was * picked during the packet scheduler transition. */ ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); return NULL; } if (ifsq_is_empty(ifsq)) { /* no packet in the queue */ KKASSERT(mpolled == NULL); return (NULL); } crit_enter(); m = NULL; for (pri = pif->pif_maxpri; pri >= 0; pri--) { if ((cl = pif->pif_classes[pri]) != NULL && !qempty(cl->cl_q)) { if (op == ALTDQ_POLL) { m = priq_pollq(cl); break; } m = priq_getq(cl); if (m != NULL) { ifsq->ifq_len--; if (qempty(cl->cl_q)) cl->cl_period++; PKTCNTR_ADD(&cl->cl_xmitcnt, m_pktlen(m)); } break; } } crit_exit(); KKASSERT(mpolled == NULL || mpolled == m); return (m); }
static int cbq_request(struct ifaltq_subque *ifsq, int req, void *arg) { struct ifaltq *ifq = ifsq->ifsq_altq; cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc; crit_enter(); switch (req) { case ALTRQ_PURGE: if (ifsq_get_index(ifsq) == CBQ_SUBQ_INDEX) { cbq_purge(cbqp); } else { /* * Race happened, the unrelated subqueue was * picked during the packet scheduler transition. */ ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL); } break; } crit_exit(); return (0); }