Exemplo n.º 1
0
int
codel_add_altq(struct ifnet *ifp, struct pf_altq *a)
{
	struct codel_if	*cif;
	struct codel_opts	*opts;

	if (ifp == NULL)
		return (EINVAL);
	if (!ALTQ_IS_READY(&ifp->if_snd))
		return (ENODEV);

	opts = &a->pq_u.codel_opts;

	cif = malloc(sizeof(struct codel_if), M_DEVBUF, M_NOWAIT | M_ZERO);
	if (cif == NULL)
		return (ENOMEM);
	cif->cif_bandwidth = a->ifbandwidth;
	cif->cif_ifq = &ifp->if_snd;

	cif->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO);
	if (cif->cl_q == NULL) {
		free(cif, M_DEVBUF);
		return (ENOMEM);
	}

	if (a->qlimit == 0)
		a->qlimit = 50;	/* use default. */
	qlimit(cif->cl_q) = a->qlimit;
	qtype(cif->cl_q) = Q_CODEL;
	qlen(cif->cl_q) = 0;
	qsize(cif->cl_q) = 0;

	if (opts->target == 0)
		opts->target = 5;
	if (opts->interval == 0)
		opts->interval = 100;
	cif->codel.params.target = machclk_freq * opts->target / 1000;
	cif->codel.params.interval = machclk_freq * opts->interval / 1000;
	cif->codel.params.ecn = opts->ecn;
	cif->codel.stats.maxpacket = 256;

	cif->cl_stats.qlength = qlen(cif->cl_q);
	cif->cl_stats.qlimit = qlimit(cif->cl_q);

	/* keep the state in pf_altq */
	a->altq_disc = cif;

	return (0);
}
Exemplo n.º 2
0
static int
priq_addq(struct priq_class *cl, struct mbuf *m)
{

#ifdef ALTQ_RIO
	if (q_is_rio(cl->cl_q))
		return rio_addq((rio_t *)cl->cl_red, cl->cl_q, m,
				cl->cl_pktattr);
#endif
#ifdef ALTQ_RED
	if (q_is_red(cl->cl_q))
		return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
#endif
	if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
		m_freem(m);
		return (-1);
	}

	if (cl->cl_flags & PRCF_CLEARDSCP)
		write_dsfield(m, cl->cl_pktattr, 0);

	_addq(cl->cl_q, m);

	return (0);
}
Exemplo n.º 3
0
/* copy the stats info in rm_class to class_states_t */
static void
get_class_stats(class_stats_t *statsp, struct rm_class *cl)
{
	statsp->xmit_cnt	= cl->stats_.xmit_cnt;
	statsp->drop_cnt	= cl->stats_.drop_cnt;
	statsp->over		= cl->stats_.over;
	statsp->borrows		= cl->stats_.borrows;
	statsp->overactions	= cl->stats_.overactions;
	statsp->delays		= cl->stats_.delays;

	statsp->depth		= cl->depth_;
	statsp->priority	= cl->pri_;
	statsp->maxidle		= cl->maxidle_;
	statsp->minidle		= cl->minidle_;
	statsp->offtime		= cl->offtime_;
	statsp->qmax		= qlimit(cl->q_);
	statsp->ns_per_byte	= cl->ns_per_byte_;
	statsp->wrr_allot	= cl->w_allotment_;
	statsp->qcnt		= qlen(cl->q_);
	statsp->avgidle		= cl->avgidle_;

	statsp->qtype		= qtype(cl->q_);
#ifdef ALTQ_RED
	if (q_is_red(cl->q_))
		red_getstats(cl->red_, &statsp->red[0]);
#endif
#ifdef ALTQ_RIO
	if (q_is_rio(cl->q_))
		rio_getstats((rio_t *)cl->red_, &statsp->red[0]);
#endif
}
Exemplo n.º 4
0
int
codel_addq(struct codel *c, class_queue_t *q, struct mbuf *m)
{
	struct m_tag *mtag;
	uint64_t *enqueue_time;

	if (qlen(q) < qlimit(q)) {
		mtag = m_tag_locate(m, MTAG_CODEL, 0, NULL);
		if (mtag == NULL)
			mtag = m_tag_alloc(MTAG_CODEL, 0, sizeof(uint64_t),
			    M_NOWAIT);
		if (mtag == NULL) {
			m_freem(m);
			return (-1);
		}
		enqueue_time = (uint64_t *)(mtag + 1);
		*enqueue_time = read_machclk();
		m_tag_prepend(m, mtag);
		_addq(q, m);
		return (0);
	}
	c->drop_overlimit++;
	m_freem(m);

	return (-1);
}
int
rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
    int minidle, u_int offtime, int pktsize)
{
	struct rm_ifdat	*ifd;
	u_int		 old_allotment;
	int		 s;

	ifd = cl->ifdat_;
	old_allotment = cl->allotment_;

#ifdef __NetBSD__
	s = splnet();
#else
	s = splimp();
#endif
	IFQ_LOCK(ifd->ifq_);
	cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
	cl->qthresh_ = 0;
	cl->ns_per_byte_ = nsecPerByte;

	qlimit(cl->q_) = maxq;

#if 1 /* minidle is also scaled in ALTQ */
	cl->minidle_ = (minidle * nsecPerByte) / 8;
	if (cl->minidle_ > 0)
		cl->minidle_ = 0;
#else
	cl->minidle_ = minidle;
#endif
	cl->maxidle_ = (maxidle * nsecPerByte) / 8;
	if (cl->maxidle_ == 0)
		cl->maxidle_ = 1;
#if 1 /* offtime is also scaled in ALTQ */
	cl->avgidle_ = cl->maxidle_;
	cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
	if (cl->offtime_ == 0)
		cl->offtime_ = 1;
#else
	cl->avgidle_ = 0;
	cl->offtime_ = (offtime * nsecPerByte) / 8;
#endif

	/*
	 * If CBQ's WRR is enabled, then initialize the class WRR state.
	 */
	if (ifd->wrr_) {
		ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
		rmc_wrr_set_weights(ifd);
	}
	IFQ_UNLOCK(ifd->ifq_);
	splx(s);
	return (0);
}
Exemplo n.º 6
0
static void
get_class_stats(struct priq_classstats *sp, struct priq_class *cl)
{
	sp->class_handle = cl->cl_handle;
	sp->qlength = qlen(cl->cl_q);
	sp->qlimit = qlimit(cl->cl_q);
	sp->period = cl->cl_period;
	sp->xmitcnt = cl->cl_xmitcnt;
	sp->dropcnt = cl->cl_dropcnt;

	sp->qtype = qtype(cl->cl_q);
#ifdef ALTQ_RED
	if (q_is_red(cl->cl_q))
		red_getstats(cl->cl_red, &sp->red[0]);
#endif
#ifdef ALTQ_RIO
	if (q_is_rio(cl->cl_q))
		rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
#endif
}
Exemplo n.º 7
0
int
priq_get_class_stats(struct priq_if *pif, u_int32_t qid,
    struct priq_classstats *sp)
{
	struct priq_class *cl;

	IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);

	if ((cl = priq_clh_to_clp(pif, qid)) == NULL)
		return (EINVAL);

	sp->class_handle = cl->cl_handle;
	sp->priority = cl->cl_pri;
	sp->qlength = qlen(&cl->cl_q);
	sp->qlimit = qlimit(&cl->cl_q);
	sp->period = cl->cl_period;
	sp->xmitcnt = cl->cl_xmitcnt;
	sp->dropcnt = cl->cl_dropcnt;

	sp->qtype = qtype(&cl->cl_q);
	sp->qstate = qstate(&cl->cl_q);
#if CLASSQ_RED
	if (q_is_red(&cl->cl_q))
		red_getstats(cl->cl_red, &sp->red[0]);
#endif /* CLASSQ_RED */
#if CLASSQ_RIO
	if (q_is_rio(&cl->cl_q))
		rio_getstats(cl->cl_rio, &sp->red[0]);
#endif /* CLASSQ_RIO */
#if CLASSQ_BLUE
	if (q_is_blue(&cl->cl_q))
		blue_getstats(cl->cl_blue, &sp->blue);
#endif /* CLASSQ_BLUE */
	if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
		sfb_getstats(cl->cl_sfb, &sp->sfb);

	return (0);
}
Exemplo n.º 8
0
static inline int
fairq_addq(struct fairq_class *cl, struct mbuf *m, struct pf_mtag *t)
{
	struct ifclassq *ifq = cl->cl_fif->fif_ifq;
	fairq_bucket_t *b;
	u_int32_t hash = m->m_pkthdr.pkt_flowid;
	u_int32_t hindex;
	u_int64_t bw;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	/*
	 * If the packet doesn't have any keep state put it on the end of
	 * our queue.  XXX this can result in out of order delivery.
	 */
	if (hash == 0) {
		if (cl->cl_head)
			b = cl->cl_head->prev;
		else
			b = &cl->cl_buckets[0];
	} else {
		hindex = (hash & cl->cl_nbucket_mask);
		b = &cl->cl_buckets[hindex];
	}

	/*
	 * Add the bucket to the end of the circular list of active buckets.
	 *
	 * As a special case we add the bucket to the beginning of the list
	 * instead of the end if it was not previously on the list and if
	 * its traffic is less then the hog level.
	 */
	if (b->in_use == 0) {
		b->in_use = 1;
		if (cl->cl_head == NULL) {
			cl->cl_head = b;
			b->next = b;
			b->prev = b;
		} else {
			b->next = cl->cl_head;
			b->prev = cl->cl_head->prev;
			b->prev->next = b;
			b->next->prev = b;

			if (b->bw_delta && cl->cl_hogs_m1) {
				bw = b->bw_bytes * machclk_freq / b->bw_delta;
				if (bw < cl->cl_hogs_m1)
					cl->cl_head = b;
			}
		}
	}

#if CLASSQ_RIO
	if (cl->cl_qtype == Q_RIO)
		return (rio_addq(cl->cl_rio, &b->queue, m, t));
	else
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
	if (cl->cl_qtype == Q_RED)
		return (red_addq(cl->cl_red, &b->queue, m, t));
	else
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
	if (cl->cl_qtype == Q_BLUE)
		return (blue_addq(cl->cl_blue, &b->queue, m, t));
	else
#endif /* CLASSQ_BLUE */
	if (cl->cl_qtype == Q_SFB) {
		if (cl->cl_sfb == NULL) {
			struct ifnet *ifp = FAIRQIF_IFP(cl->cl_fif);

			VERIFY(cl->cl_flags & FARF_LAZY);
			IFCQ_CONVERT_LOCK(ifq);

			cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
			    cl->cl_qlimit, cl->cl_qflags);
			if (cl->cl_sfb == NULL) {
				/* fall back to droptail */
				cl->cl_qtype = Q_DROPTAIL;
				cl->cl_flags &= ~FARF_SFB;
				cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);

				log(LOG_ERR, "%s: %s SFB lazy allocation "
				    "failed for qid=%d pri=%d, falling back "
				    "to DROPTAIL\n", if_name(ifp),
				    fairq_style(cl->cl_fif), cl->cl_handle,
				    cl->cl_pri);
			}
		}
		if (cl->cl_sfb != NULL)
			return (sfb_addq(cl->cl_sfb, &b->queue, m, t));
	} else if (qlen(&b->queue) >= qlimit(&b->queue)) {
		IFCQ_CONVERT_LOCK(ifq);
		m_freem(m);
		return (CLASSQEQ_DROPPED);
	}

#if PF_ECN
	if (cl->cl_flags & FARF_CLEARDSCP)
		write_dsfield(m, t, 0);
#endif /* PF_ECN */

	_addq(&b->queue, m);

	return (0);
}
Exemplo n.º 9
0
static struct priq_class *
priq_class_create(struct priq_if *pif, int pri, int qlimit, int flags, int qid)
{
	struct priq_class *cl;
	int s;

#ifndef ALTQ_RED
	if (flags & PRCF_RED) {
#ifdef ALTQ_DEBUG
		printf("priq_class_create: RED not configured for PRIQ!\n");
#endif
		return (NULL);
	}
#endif

	if ((cl = pif->pif_classes[pri]) != NULL) {
		/* modify the class instead of creating a new one */
		s = splnet();
		if (!qempty(cl->cl_q))
			priq_purgeq(cl);
		splx(s);
#ifdef ALTQ_RIO
		if (q_is_rio(cl->cl_q))
			rio_destroy((rio_t *)cl->cl_red);
#endif
#ifdef ALTQ_RED
		if (q_is_red(cl->cl_q))
			red_destroy(cl->cl_red);
#endif
	} else {
		cl = malloc(sizeof(struct priq_class), M_DEVBUF,
		    M_WAITOK|M_ZERO);
		if (cl == NULL)
			return (NULL);

		cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF,
		    M_WAITOK|M_ZERO);
		if (cl->cl_q == NULL)
			goto err_ret;
	}

	pif->pif_classes[pri] = cl;
	if (flags & PRCF_DEFAULTCLASS)
		pif->pif_default = cl;
	if (qlimit == 0)
		qlimit = 50;  /* use default */
	qlimit(cl->cl_q) = qlimit;
	qtype(cl->cl_q) = Q_DROPTAIL;
	qlen(cl->cl_q) = 0;
	cl->cl_flags = flags;
	cl->cl_pri = pri;
	if (pri > pif->pif_maxpri)
		pif->pif_maxpri = pri;
	cl->cl_pif = pif;
	cl->cl_handle = qid;

#ifdef ALTQ_RED
	if (flags & (PRCF_RED|PRCF_RIO)) {
		int red_flags, red_pkttime;

		red_flags = 0;
		if (flags & PRCF_ECN)
			red_flags |= REDF_ECN;
#ifdef ALTQ_RIO
		if (flags & PRCF_CLEARDSCP)
			red_flags |= RIOF_CLEARDSCP;
#endif
		if (pif->pif_bandwidth < 8)
			red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
		else
			red_pkttime = (int64_t)pif->pif_ifq->altq_ifp->if_mtu
			  * 1000 * 1000 * 1000 / (pif->pif_bandwidth / 8);
#ifdef ALTQ_RIO
		if (flags & PRCF_RIO) {
			cl->cl_red = (red_t *)rio_alloc(0, NULL,
						red_flags, red_pkttime);
			if (cl->cl_red != NULL)
				qtype(cl->cl_q) = Q_RIO;
		} else
#endif
		if (flags & PRCF_RED) {
			cl->cl_red = red_alloc(0, 0,
			    qlimit(cl->cl_q) * 10/100,
			    qlimit(cl->cl_q) * 30/100,
			    red_flags, red_pkttime);
			if (cl->cl_red != NULL)
				qtype(cl->cl_q) = Q_RED;
		}
	}
#endif /* ALTQ_RED */

	return (cl);

 err_ret:
	if (cl->cl_red != NULL) {
#ifdef ALTQ_RIO
		if (q_is_rio(cl->cl_q))
			rio_destroy((rio_t *)cl->cl_red);
#endif
#ifdef ALTQ_RED
		if (q_is_red(cl->cl_q))
			red_destroy(cl->cl_red);
#endif
	}
	if (cl->cl_q != NULL)
		free(cl->cl_q, M_DEVBUF);
	free(cl, M_DEVBUF);
	return (NULL);
}
Exemplo n.º 10
0
/*
 * rm_class_t *
 * rmc_newclass(...) - Create a new resource management class at priority
 * 'pri' on the interface given by 'ifd'.
 *
 * nsecPerByte  is the data rate of the interface in nanoseconds/byte.
 *              E.g., 800 for a 10Mb/s ethernet.  If the class gets less
 *              than 100% of the bandwidth, this number should be the
 *              'effective' rate for the class.  Let f be the
 *              bandwidth fraction allocated to this class, and let
 *              nsPerByte be the data rate of the output link in
 *              nanoseconds/byte.  Then nsecPerByte is set to
 *              nsPerByte / f.  E.g., 1600 (= 800 / .5)
 *              for a class that gets 50% of an ethernet's bandwidth.
 *
 * action       the routine to call when the class is over limit.
 *
 * maxq         max allowable queue size for class (in packets).
 *
 * parent       parent class pointer.
 *
 * borrow       class to borrow from (should be either 'parent' or null).
 *
 * maxidle      max value allowed for class 'idle' time estimate (this
 *              parameter determines how large an initial burst of packets
 *              can be before overlimit action is invoked.
 *
 * offtime      how long 'delay' action will delay when class goes over
 *              limit (this parameter determines the steady-state burst
 *              size when a class is running over its limit).
 *
 * Maxidle and offtime have to be computed from the following:  If the
 * average packet size is s, the bandwidth fraction allocated to this
 * class is f, we want to allow b packet bursts, and the gain of the
 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
 *
 *   ptime = s * nsPerByte * (1 - f) / f
 *   maxidle = ptime * (1 - g^b) / g^b
 *   minidle = -ptime * (1 / (f - 1))
 *   offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
 *
 * Operationally, it's convenient to specify maxidle & offtime in units
 * independent of the link bandwidth so the maxidle & offtime passed to
 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
 * (The constant factor is a scale factor needed to make the parameters
 * integers.  This scaling also means that the 'unscaled' values of
 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
 * not nanoseconds.)  Also note that the 'idle' filter computation keeps
 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
 * maxidle also must be scaled upward by this value.  Thus, the passed
 * values for maxidle and offtime can be computed as follows:
 *
 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
 * offtime = offtime * 8 / (1000 * nsecPerByte)
 *
 * When USE_HRTIME is employed, then maxidle and offtime become:
 * 	maxidle = maxilde * (8.0 / nsecPerByte);
 * 	offtime = offtime * (8.0 / nsecPerByte);
 */
struct rm_class *
rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
	     void (*action)(rm_class_t *, rm_class_t *), int maxq,
	     struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
	     int minidle, u_int offtime, int pktsize, int flags)
{
	struct rm_class *cl;
	struct rm_class *peer;

	if (pri >= RM_MAXPRIO)
		return (NULL);
#ifndef ALTQ_RED
	if (flags & RMCF_RED) {
#ifdef ALTQ_DEBUG
		kprintf("rmc_newclass: RED not configured for CBQ!\n");
#endif
		return (NULL);
	}
#endif
#ifndef ALTQ_RIO
	if (flags & RMCF_RIO) {
#ifdef ALTQ_DEBUG
		kprintf("rmc_newclass: RIO not configured for CBQ!\n");
#endif
		return (NULL);
	}
#endif

	cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
	callout_init(&cl->callout_);
	cl->q_ = kmalloc(sizeof(*cl->q_), M_ALTQ, M_WAITOK | M_ZERO);

	/*
	 * Class initialization.
	 */
	cl->children_ = NULL;
	cl->parent_ = parent;
	cl->borrow_ = borrow;
	cl->leaf_ = 1;
	cl->ifdat_ = ifd;
	cl->pri_ = pri;
	cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
	cl->depth_ = 0;
	cl->qthresh_ = 0;
	cl->ns_per_byte_ = nsecPerByte;

	qlimit(cl->q_) = maxq;
	qtype(cl->q_) = Q_DROPHEAD;
	qlen(cl->q_) = 0;
	cl->flags_ = flags;

#if 1 /* minidle is also scaled in ALTQ */
	cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
	if (cl->minidle_ > 0)
		cl->minidle_ = 0;
#else
	cl->minidle_ = minidle;
#endif
	cl->maxidle_ = (maxidle * nsecPerByte) / 8;
	if (cl->maxidle_ == 0)
		cl->maxidle_ = 1;
#if 1 /* offtime is also scaled in ALTQ */
	cl->avgidle_ = cl->maxidle_;
	cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
	if (cl->offtime_ == 0)
		cl->offtime_ = 1;
#else
	cl->avgidle_ = 0;
	cl->offtime_ = (offtime * nsecPerByte) / 8;
#endif
	cl->overlimit = action;

#ifdef ALTQ_RED
	if (flags & (RMCF_RED|RMCF_RIO)) {
		int red_flags, red_pkttime;

		red_flags = 0;
		if (flags & RMCF_ECN)
			red_flags |= REDF_ECN;
#ifdef ALTQ_RIO
		if (flags & RMCF_CLEARDSCP)
			red_flags |= RIOF_CLEARDSCP;
#endif
		red_pkttime = nsecPerByte * pktsize  / 1000;

		if (flags & RMCF_RED) {
			cl->red_ = red_alloc(0, 0,
			    qlimit(cl->q_) * 10/100,
			    qlimit(cl->q_) * 30/100,
			    red_flags, red_pkttime);
			if (cl->red_ != NULL)
				qtype(cl->q_) = Q_RED;
		}
#ifdef ALTQ_RIO
		else {
			cl->red_ = (red_t *)rio_alloc(0, NULL,
						      red_flags, red_pkttime);
			if (cl->red_ != NULL)
				qtype(cl->q_) = Q_RIO;
		}
#endif
	}
#endif /* ALTQ_RED */

	/*
	 * put the class into the class tree
	 */
	crit_enter();
	if ((peer = ifd->active_[pri]) != NULL) {
		/* find the last class at this pri */
		cl->peer_ = peer;
		while (peer->peer_ != ifd->active_[pri])
			peer = peer->peer_;
		peer->peer_ = cl;
	} else {
		ifd->active_[pri] = cl;
		cl->peer_ = cl;
	}

	if (cl->parent_) {
		cl->next_ = parent->children_;
		parent->children_ = cl;
		parent->leaf_ = 0;
	}

	/*
	 * Compute the depth of this class and its ancestors in the class
	 * hierarchy.
	 */
	rmc_depth_compute(cl);

	/*
	 * If CBQ's WRR is enabled, then initialize the class WRR state.
	 */
	if (ifd->wrr_) {
		ifd->num_[pri]++;
		ifd->alloc_[pri] += cl->allotment_;
		rmc_wrr_set_weights(ifd);
	}
	crit_exit();
	return (cl);
}
Exemplo n.º 11
0
struct hfsc_class *
hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
    struct service_curve *fsc, struct service_curve *usc,
    struct hfsc_class *parent, int qlimit, int flags, int qid)
{
	struct hfsc_class *cl, *p;
	int i, s;

	if (hif->hif_classes >= HFSC_MAX_CLASSES)
		return (NULL);

#ifndef ALTQ_RED
	if (flags & HFCF_RED) {
#ifdef ALTQ_DEBUG
		printf("hfsc_class_create: RED not configured for HFSC!\n");
#endif
		return (NULL);
	}
#endif

	MALLOC(cl, struct hfsc_class *, sizeof(struct hfsc_class),
	       M_DEVBUF, M_WAITOK);
	if (cl == NULL)
		return (NULL);
	bzero(cl, sizeof(struct hfsc_class));

	MALLOC(cl->cl_q, class_queue_t *, sizeof(class_queue_t),
	       M_DEVBUF, M_WAITOK);
	if (cl->cl_q == NULL)
		goto err_ret;
	bzero(cl->cl_q, sizeof(class_queue_t));

	cl->cl_actc = actlist_alloc();
	if (cl->cl_actc == NULL)
		goto err_ret;

	if (qlimit == 0)
		qlimit = 50;  /* use default */
	qlimit(cl->cl_q) = qlimit;
	qtype(cl->cl_q) = Q_DROPTAIL;
	qlen(cl->cl_q) = 0;
	cl->cl_flags = flags;
#ifdef ALTQ_RED
	if (flags & (HFCF_RED|HFCF_RIO)) {
		int red_flags, red_pkttime;
		u_int m2;

		m2 = 0;
		if (rsc != NULL && rsc->m2 > m2)
			m2 = rsc->m2;
		if (fsc != NULL && fsc->m2 > m2)
			m2 = fsc->m2;
		if (usc != NULL && usc->m2 > m2)
			m2 = usc->m2;

		red_flags = 0;
		if (flags & HFCF_ECN)
			red_flags |= REDF_ECN;
#ifdef ALTQ_RIO
		if (flags & HFCF_CLEARDSCP)
			red_flags |= RIOF_CLEARDSCP;
#endif
		if (m2 < 8)
			red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
		else
			red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
				* 1000 * 1000 * 1000 / (m2 / 8);
		if (flags & HFCF_RED) {
			cl->cl_red = red_alloc(0, 0,
			    qlimit(cl->cl_q) * 10/100,
			    qlimit(cl->cl_q) * 30/100,
			    red_flags, red_pkttime);
			if (cl->cl_red != NULL)
				qtype(cl->cl_q) = Q_RED;
		}
#ifdef ALTQ_RIO
		else {
			cl->cl_red = (red_t *)rio_alloc(0, NULL,
			    red_flags, red_pkttime);
			if (cl->cl_red != NULL)
				qtype(cl->cl_q) = Q_RIO;
		}
#endif
	}
#endif /* ALTQ_RED */

	if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
		MALLOC(cl->cl_rsc, struct internal_sc *,
		    sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
		if (cl->cl_rsc == NULL)
			goto err_ret;
		sc2isc(rsc, cl->cl_rsc);
		rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
		rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
	}
Exemplo n.º 12
0
static struct qfq_class *
qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit,
    u_int32_t flags, u_int32_t maxsz, u_int32_t qid, classq_pkt_type_t ptype)
{
	struct ifnet *ifp;
	struct ifclassq *ifq;
	struct qfq_group *grp;
	struct qfq_class *cl;
	u_int32_t w;			/* approximated weight */
	int i;

	IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq);

	if (qif->qif_classes >= qif->qif_maxclasses) {
		log(LOG_ERR, "%s: %s out of classes! (max %d)\n",
		    if_name(QFQIF_IFP(qif)), qfq_style(qif),
		    qif->qif_maxclasses);
		return (NULL);
	}

	ifq = qif->qif_ifq;
	ifp = QFQIF_IFP(qif);

	cl = zalloc(qfq_cl_zone);
	if (cl == NULL)
		return (NULL);

	bzero(cl, qfq_cl_size);

	if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
		qlimit = IFCQ_MAXLEN(ifq);
		if (qlimit == 0)
			qlimit = DEFAULT_QLIMIT;  /* use default */
	}
	_qinit(&cl->cl_q, Q_DROPTAIL, qlimit, ptype);
	cl->cl_qif = qif;
	cl->cl_flags = flags;
	cl->cl_handle = qid;

	/*
	 * Find a free slot in the class table.  If the slot matching
	 * the lower bits of qid is free, use this slot.  Otherwise,
	 * use the first free slot.
	 */
	i = qid % qif->qif_maxclasses;
	if (qif->qif_class_tbl[i] == NULL) {
		qif->qif_class_tbl[i] = cl;
	} else {
		for (i = 0; i < qif->qif_maxclasses; i++) {
			if (qif->qif_class_tbl[i] == NULL) {
				qif->qif_class_tbl[i] = cl;
				break;
			}
		}
		if (i == qif->qif_maxclasses) {
			zfree(qfq_cl_zone, cl);
			return (NULL);
		}
	}

	w = weight;
	VERIFY(w > 0 && w <= QFQ_MAX_WEIGHT);
	cl->cl_lmax = maxsz;
	cl->cl_inv_w = (QFQ_ONE_FP / w);
	w = (QFQ_ONE_FP / cl->cl_inv_w);
	VERIFY(qif->qif_wsum + w <= QFQ_MAX_WSUM);

	i = qfq_calc_index(cl, cl->cl_inv_w, cl->cl_lmax);
	VERIFY(i <= QFQ_MAX_INDEX);
	grp = qif->qif_groups[i];
	if (grp == NULL) {
		grp = _MALLOC(sizeof (*grp), M_DEVBUF, M_WAITOK|M_ZERO);
		if (grp != NULL) {
			grp->qfg_index = i;
			grp->qfg_slot_shift =
			    QFQ_MTU_SHIFT + QFQ_FRAC_BITS - (QFQ_MAX_INDEX - i);
			grp->qfg_slots = _MALLOC(sizeof (struct qfq_class *) *
			    qif->qif_maxslots, M_DEVBUF, M_WAITOK|M_ZERO);
			if (grp->qfg_slots == NULL) {
				log(LOG_ERR, "%s: %s unable to allocate group "
				    "slots for index %d\n", if_name(ifp),
				    qfq_style(qif), i);
			}
		} else {
			log(LOG_ERR, "%s: %s unable to allocate group for "
			    "qid=%d\n", if_name(ifp), qfq_style(qif),
			    cl->cl_handle);
		}
		if (grp == NULL || grp->qfg_slots == NULL) {
			qif->qif_class_tbl[qid % qif->qif_maxclasses] = NULL;
			if (grp != NULL)
				_FREE(grp, M_DEVBUF);
			zfree(qfq_cl_zone, cl);
			return (NULL);
		} else {
			qif->qif_groups[i] = grp;
		}
	}
	cl->cl_grp = grp;
	qif->qif_wsum += w;
	/* XXX cl->cl_S = qif->qif_V; ? */
	/* XXX compute qif->qif_i_wsum */

	qif->qif_classes++;

	if (flags & QFCF_DEFAULTCLASS)
		qif->qif_default = cl;

	if (flags & QFCF_SFB) {
		cl->cl_qflags = 0;
		if (flags & QFCF_ECN) {
			cl->cl_qflags |= SFBF_ECN;
		}
		if (flags & QFCF_FLOWCTL) {
			cl->cl_qflags |= SFBF_FLOWCTL;
		}
		if (flags & QFCF_DELAYBASED) {
			cl->cl_qflags |= SFBF_DELAYBASED;
		}
		if (!(cl->cl_flags & QFCF_LAZY))
			cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
			    qlimit(&cl->cl_q), cl->cl_qflags);
		if (cl->cl_sfb != NULL || (cl->cl_flags & QFCF_LAZY))
			qtype(&cl->cl_q) = Q_SFB;
	}

	if (pktsched_verbose) {
		log(LOG_DEBUG, "%s: %s created qid=%d grp=%d weight=%d "
		    "qlimit=%d flags=%b\n", if_name(ifp), qfq_style(qif),
		    cl->cl_handle, cl->cl_grp->qfg_index, weight, qlimit,
		    flags, QFCF_BITS);
	}

	return (cl);
}
Exemplo n.º 13
0
static inline int
priq_addq(struct priq_class *cl, struct mbuf *m, struct pf_mtag *t)
{
	struct priq_if *pif = cl->cl_pif;
	struct ifclassq *ifq = pif->pif_ifq;

	IFCQ_LOCK_ASSERT_HELD(ifq);

#if CLASSQ_RIO
	if (q_is_rio(&cl->cl_q))
		return (rio_addq(cl->cl_rio, &cl->cl_q, m, t));
	else
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
	if (q_is_red(&cl->cl_q))
		return (red_addq(cl->cl_red, &cl->cl_q, m, t));
	else
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
	if (q_is_blue(&cl->cl_q))
		return (blue_addq(cl->cl_blue, &cl->cl_q, m, t));
	else
#endif /* CLASSQ_BLUE */
	if (q_is_sfb(&cl->cl_q)) {
		if (cl->cl_sfb == NULL) {
			struct ifnet *ifp = PRIQIF_IFP(pif);

			VERIFY(cl->cl_flags & PRCF_LAZY);
			cl->cl_flags &= ~PRCF_LAZY;
			IFCQ_CONVERT_LOCK(ifq);

			cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
			    qlimit(&cl->cl_q), cl->cl_qflags);
			if (cl->cl_sfb == NULL) {
				/* fall back to droptail */
				qtype(&cl->cl_q) = Q_DROPTAIL;
				cl->cl_flags &= ~PRCF_SFB;
				cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);

				log(LOG_ERR, "%s: %s SFB lazy allocation "
				    "failed for qid=%d pri=%d, falling back "
				    "to DROPTAIL\n", if_name(ifp),
				    priq_style(pif), cl->cl_handle,
				    cl->cl_pri);
			} else if (pif->pif_throttle != IFNET_THROTTLE_OFF) {
				/* if there's pending throttling, set it */
				cqrq_throttle_t tr = { 1, pif->pif_throttle };
				int err = priq_throttle(pif, &tr);

				if (err == EALREADY)
					err = 0;
				if (err != 0) {
					tr.level = IFNET_THROTTLE_OFF;
					(void) priq_throttle(pif, &tr);
				}
			}
		}
		if (cl->cl_sfb != NULL)
			return (sfb_addq(cl->cl_sfb, &cl->cl_q, m, t));
	} else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) {
		IFCQ_CONVERT_LOCK(ifq);
		m_freem(m);
		return (CLASSQEQ_DROPPED);
	}

#if PF_ECN
	if (cl->cl_flags & PRCF_CLEARDSCP)
		write_dsfield(m, t, 0);
#endif /* PF_ECN */

	_addq(&cl->cl_q, m);

	return (0);
}
Exemplo n.º 14
0
static struct priq_class *
priq_class_create(struct priq_if *pif, int pri, u_int32_t qlimit,
    int flags, u_int32_t qid)
{
	struct ifnet *ifp;
	struct ifclassq *ifq;
	struct priq_class *cl;

	IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);

	/* Sanitize flags unless internally configured */
	if (pif->pif_flags & PRIQIFF_ALTQ)
		flags &= PRCF_USERFLAGS;

#if !CLASSQ_RED
	if (flags & PRCF_RED) {
		log(LOG_ERR, "%s: %s RED not available!\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}
#endif /* !CLASSQ_RED */

#if !CLASSQ_RIO
	if (flags & PRCF_RIO) {
		log(LOG_ERR, "%s: %s RIO not available!\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}
#endif /* CLASSQ_RIO */

#if !CLASSQ_BLUE
	if (flags & PRCF_BLUE) {
		log(LOG_ERR, "%s: %s BLUE not available!\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}
#endif /* CLASSQ_BLUE */

	/* These are mutually exclusive */
	if ((flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_RED &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_RIO &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_BLUE &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_SFB) {
		log(LOG_ERR, "%s: %s more than one RED|RIO|BLUE|SFB\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}

	ifq = pif->pif_ifq;
	ifp = PRIQIF_IFP(pif);

	if ((cl = pif->pif_classes[pri]) != NULL) {
		/* modify the class instead of creating a new one */
		if (!qempty(&cl->cl_q))
			priq_purgeq(pif, cl, 0, NULL, NULL);
#if CLASSQ_RIO
		if (q_is_rio(&cl->cl_q))
			rio_destroy(cl->cl_rio);
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
		if (q_is_red(&cl->cl_q))
			red_destroy(cl->cl_red);
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
		if (q_is_blue(&cl->cl_q))
			blue_destroy(cl->cl_blue);
#endif /* CLASSQ_BLUE */
		if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
			sfb_destroy(cl->cl_sfb);
		cl->cl_qalg.ptr = NULL;
		qtype(&cl->cl_q) = Q_DROPTAIL;
		qstate(&cl->cl_q) = QS_RUNNING;
	} else {
		cl = zalloc(priq_cl_zone);
		if (cl == NULL)
			return (NULL);

		bzero(cl, priq_cl_size);
	}

	pif->pif_classes[pri] = cl;
	if (flags & PRCF_DEFAULTCLASS)
		pif->pif_default = cl;
	if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
		qlimit = IFCQ_MAXLEN(ifq);
		if (qlimit == 0)
			qlimit = DEFAULT_QLIMIT;  /* use default */
	}
	_qinit(&cl->cl_q, Q_DROPTAIL, qlimit);
	cl->cl_flags = flags;
	cl->cl_pri = pri;
	if (pri > pif->pif_maxpri)
		pif->pif_maxpri = pri;
	cl->cl_pif = pif;
	cl->cl_handle = qid;

	if (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) {
#if CLASSQ_RED || CLASSQ_RIO
		u_int64_t ifbandwidth = ifnet_output_linkrate(ifp);
		int pkttime;
#endif /* CLASSQ_RED || CLASSQ_RIO */

		cl->cl_qflags = 0;
		if (flags & PRCF_ECN) {
			if (flags & PRCF_BLUE)
				cl->cl_qflags |= BLUEF_ECN;
			else if (flags & PRCF_SFB)
				cl->cl_qflags |= SFBF_ECN;
			else if (flags & PRCF_RED)
				cl->cl_qflags |= REDF_ECN;
			else if (flags & PRCF_RIO)
				cl->cl_qflags |= RIOF_ECN;
		}
		if (flags & PRCF_FLOWCTL) {
			if (flags & PRCF_SFB)
				cl->cl_qflags |= SFBF_FLOWCTL;
		}
		if (flags & PRCF_CLEARDSCP) {
			if (flags & PRCF_RIO)
				cl->cl_qflags |= RIOF_CLEARDSCP;
		}
#if CLASSQ_RED || CLASSQ_RIO
		/*
		 * XXX: RED & RIO should be watching link speed and MTU
		 *	events and recompute pkttime accordingly.
		 */
		if (ifbandwidth < 8)
			pkttime = 1000 * 1000 * 1000; /* 1 sec */
		else
			pkttime = (int64_t)ifp->if_mtu * 1000 * 1000 * 1000 /
			    (ifbandwidth / 8);

		/* Test for exclusivity {RED,RIO,BLUE,SFB} was done above */
#if CLASSQ_RED
		if (flags & PRCF_RED) {
			cl->cl_red = red_alloc(ifp, 0, 0,
			    qlimit(&cl->cl_q) * 10/100,
			    qlimit(&cl->cl_q) * 30/100,
			    cl->cl_qflags, pkttime);
			if (cl->cl_red != NULL)
				qtype(&cl->cl_q) = Q_RED;
		}
#endif /* CLASSQ_RED */
#if CLASSQ_RIO
		if (flags & PRCF_RIO) {
			cl->cl_rio =
			    rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime);
			if (cl->cl_rio != NULL)
				qtype(&cl->cl_q) = Q_RIO;
		}
#endif /* CLASSQ_RIO */
#endif /* CLASSQ_RED || CLASSQ_RIO */
#if CLASSQ_BLUE
		if (flags & PRCF_BLUE) {
			cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags);
			if (cl->cl_blue != NULL)
				qtype(&cl->cl_q) = Q_BLUE;
		}
#endif /* CLASSQ_BLUE */
		if (flags & PRCF_SFB) {
			if (!(cl->cl_flags & PRCF_LAZY))
				cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
				    qlimit(&cl->cl_q), cl->cl_qflags);
			if (cl->cl_sfb != NULL || (cl->cl_flags & PRCF_LAZY))
				qtype(&cl->cl_q) = Q_SFB;
		}
	}

	if (pktsched_verbose) {
		log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
		    "flags=%b\n", if_name(ifp), priq_style(pif),
		    cl->cl_handle, cl->cl_pri, qlimit, flags, PRCF_BITS);
	}

	return (cl);
}