コード例 #1
0
ファイル: sch_tbf.c プロジェクト: avagin/linux
static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
{
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	skb = q->qdisc->ops->peek(q->qdisc);

	if (skb) {
		s64 now;
		s64 toks;
		s64 ptoks = 0;
		unsigned int len = qdisc_pkt_len(skb);

		now = ktime_get_ns();
		toks = min_t(s64, now - q->t_c, q->buffer);

		if (tbf_peak_present(q)) {
			ptoks = toks + q->ptokens;
			if (ptoks > q->mtu)
				ptoks = q->mtu;
			ptoks -= (s64) psched_l2t_ns(&q->peak, len);
		}
		toks += q->tokens;
		if (toks > q->buffer)
			toks = q->buffer;
		toks -= (s64) psched_l2t_ns(&q->rate, len);

		if ((toks|ptoks) >= 0) {
			skb = qdisc_dequeue_peeked(q->qdisc);
			if (unlikely(!skb))
				return NULL;

			q->t_c = now;
			q->tokens = toks;
			q->ptokens = ptoks;
			qdisc_qstats_backlog_dec(sch, skb);
			sch->q.qlen--;
			qdisc_bstats_update(sch, skb);
			return skb;
		}

		qdisc_watchdog_schedule_ns(&q->watchdog,
					   now + max_t(long, -toks, -ptoks));

		/* Maybe we have a shorter packet in the queue,
		   which can be sent now. It sounds cool,
		   but, however, this is wrong in principle.
		   We MUST NOT reorder packets under these circumstances.

		   Really, if we split the flow into independent
		   subflows, it would be a very good solution.
		   This is the main idea of all FQ algorithms
		   (cf. CSZ, HPFQ, HFSC)
		 */

		qdisc_qstats_overlimit(sch);
	}
	return NULL;
}
コード例 #2
0
ファイル: sch_choke.c プロジェクト: AlexShiLucky/linux
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
			 struct sk_buff **to_free)
{
	struct choke_sched_data *q = qdisc_priv(sch);
	const struct red_parms *p = &q->parms;

	choke_skb_cb(skb)->keys_valid = 0;
	/* Compute average queue usage (see RED) */
	q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
	if (red_is_idling(&q->vars))
		red_end_of_idle_period(&q->vars);

	/* Is queue small? */
	if (q->vars.qavg <= p->qth_min)
		q->vars.qcount = -1;
	else {
		unsigned int idx;

		/* Draw a packet at random from queue and compare flow */
		if (choke_match_random(q, skb, &idx)) {
			q->stats.matched++;
			choke_drop_by_idx(sch, idx, to_free);
			goto congestion_drop;
		}

		/* Queue is large, always mark/drop */
		if (q->vars.qavg > p->qth_max) {
			q->vars.qcount = -1;

			qdisc_qstats_overlimit(sch);
			if (use_harddrop(q) || !use_ecn(q) ||
			    !INET_ECN_set_ce(skb)) {
				q->stats.forced_drop++;
				goto congestion_drop;
			}

			q->stats.forced_mark++;
		} else if (++q->vars.qcount) {
			if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
				q->vars.qcount = 0;
				q->vars.qR = red_random(p);

				qdisc_qstats_overlimit(sch);
				if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
					q->stats.prob_drop++;
					goto congestion_drop;
				}

				q->stats.prob_mark++;
			}
		} else
			q->vars.qR = red_random(p);
	}

	/* Admit new packet */
	if (sch->q.qlen < q->limit) {
		q->tab[q->tail] = skb;
		q->tail = (q->tail + 1) & q->tab_mask;
		++sch->q.qlen;
		qdisc_qstats_backlog_inc(sch, skb);
		return NET_XMIT_SUCCESS;
	}

	q->stats.pdrop++;
	return qdisc_drop(skb, sch, to_free);

congestion_drop:
	qdisc_drop(skb, sch, to_free);
	return NET_XMIT_CN;
}
コード例 #3
0
ファイル: sch_gred.c プロジェクト: avagin/linux
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
			struct sk_buff **to_free)
{
	struct gred_sched_data *q = NULL;
	struct gred_sched *t = qdisc_priv(sch);
	unsigned long qavg = 0;
	u16 dp = tc_index_to_dp(skb);

	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
		dp = t->def;

		q = t->tab[dp];
		if (!q) {
			/* Pass through packets not assigned to a DP
			 * if no default DP has been configured. This
			 * allows for DP flows to be left untouched.
			 */
			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
					sch->limit))
				return qdisc_enqueue_tail(skb, sch);
			else
				goto drop;
		}

		/* fix tc_index? --could be controversial but needed for
		   requeueing */
		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
	}

	/* sum up all the qaves of prios < ours to get the new qave */
	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
		int i;

		for (i = 0; i < t->DPs; i++) {
			if (t->tab[i] && t->tab[i]->prio < q->prio &&
			    !red_is_idling(&t->tab[i]->vars))
				qavg += t->tab[i]->vars.qavg;
		}

	}

	q->packetsin++;
	q->bytesin += qdisc_pkt_len(skb);

	if (gred_wred_mode(t))
		gred_load_wred_set(t, q);

	q->vars.qavg = red_calc_qavg(&q->parms,
				     &q->vars,
				     gred_backlog(t, q, sch));

	if (red_is_idling(&q->vars))
		red_end_of_idle_period(&q->vars);

	if (gred_wred_mode(t))
		gred_store_wred_set(t, q);

	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
	case RED_DONT_MARK:
		break;

	case RED_PROB_MARK:
		qdisc_qstats_overlimit(sch);
		if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
			q->stats.prob_drop++;
			goto congestion_drop;
		}

		q->stats.prob_mark++;
		break;

	case RED_HARD_MARK:
		qdisc_qstats_overlimit(sch);
		if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
		    !INET_ECN_set_ce(skb)) {
			q->stats.forced_drop++;
			goto congestion_drop;
		}
		q->stats.forced_mark++;
		break;
	}

	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
		q->backlog += qdisc_pkt_len(skb);
		return qdisc_enqueue_tail(skb, sch);
	}

	q->stats.pdrop++;
drop:
	return qdisc_drop(skb, sch, to_free);

congestion_drop:
	qdisc_drop(skb, sch, to_free);
	return NET_XMIT_CN;
}
コード例 #4
0
ファイル: sch_sfq.c プロジェクト: asmalldev/linux
static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned int hash, dropped;
	sfq_index x, qlen;
	struct sfq_slot *slot;
	int uninitialized_var(ret);
	struct sk_buff *head;
	int delta;

	hash = sfq_classify(skb, sch, &ret);
	if (hash == 0) {
		if (ret & __NET_XMIT_BYPASS)
			qdisc_qstats_drop(sch);
		kfree_skb(skb);
		return ret;
	}
	hash--;

	x = q->ht[hash];
	slot = &q->slots[x];
	if (x == SFQ_EMPTY_SLOT) {
		x = q->dep[0].next; /* get a free slot */
		if (x >= SFQ_MAX_FLOWS)
			return qdisc_drop(skb, sch, to_free);
		q->ht[hash] = x;
		slot = &q->slots[x];
		slot->hash = hash;
		slot->backlog = 0; /* should already be 0 anyway... */
		red_set_vars(&slot->vars);
		goto enqueue;
	}
	if (q->red_parms) {
		slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
							&slot->vars,
							slot->backlog);
		switch (red_action(q->red_parms,
				   &slot->vars,
				   slot->vars.qavg)) {
		case RED_DONT_MARK:
			break;

		case RED_PROB_MARK:
			qdisc_qstats_overlimit(sch);
			if (sfq_prob_mark(q)) {
				/* We know we have at least one packet in queue */
				if (sfq_headdrop(q) &&
				    INET_ECN_set_ce(slot->skblist_next)) {
					q->stats.prob_mark_head++;
					break;
				}
				if (INET_ECN_set_ce(skb)) {
					q->stats.prob_mark++;
					break;
				}
			}
			q->stats.prob_drop++;
			goto congestion_drop;

		case RED_HARD_MARK:
			qdisc_qstats_overlimit(sch);
			if (sfq_hard_mark(q)) {
				/* We know we have at least one packet in queue */
				if (sfq_headdrop(q) &&
				    INET_ECN_set_ce(slot->skblist_next)) {
					q->stats.forced_mark_head++;
					break;
				}
				if (INET_ECN_set_ce(skb)) {
					q->stats.forced_mark++;
					break;
				}
			}
			q->stats.forced_drop++;
			goto congestion_drop;
		}
	}

	if (slot->qlen >= q->maxdepth) {
congestion_drop:
		if (!sfq_headdrop(q))
			return qdisc_drop(skb, sch, to_free);

		/* We know we have at least one packet in queue */
		head = slot_dequeue_head(slot);
		delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
		sch->qstats.backlog -= delta;
		slot->backlog -= delta;
		qdisc_drop(head, sch, to_free);

		slot_queue_add(slot, skb);
		return NET_XMIT_CN;
	}

enqueue:
	qdisc_qstats_backlog_inc(sch, skb);
	slot->backlog += qdisc_pkt_len(skb);
	slot_queue_add(slot, skb);
	sfq_inc(q, x);
	if (slot->qlen == 1) {		/* The flow is new */
		if (q->tail == NULL) {	/* It is the first flow */
			slot->next = x;
		} else {
			slot->next = q->tail->next;
			q->tail->next = x;
		}
		/* We put this flow at the end of our flow list.
		 * This might sound unfair for a new flow to wait after old ones,
		 * but we could endup servicing new flows only, and freeze old ones.
		 */
		q->tail = slot;
		/* We could use a bigger initial quantum for new flows */
		slot->allot = q->scaled_quantum;
	}
	if (++sch->q.qlen <= q->limit)
		return NET_XMIT_SUCCESS;

	qlen = slot->qlen;
	dropped = sfq_drop(sch);
	/* Return Congestion Notification only if we dropped a packet
	 * from this flow.
	 */
	if (qlen != slot->qlen)
		return NET_XMIT_CN;

	/* As we dropped a packet, better let upper stack know this */
	qdisc_tree_reduce_backlog(sch, 1, dropped);
	return NET_XMIT_SUCCESS;
}
コード例 #5
0
ファイル: sch_choke.c プロジェクト: lovejavaee/linux-2
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
	struct choke_sched_data *q = qdisc_priv(sch);
	const struct red_parms *p = &q->parms;

	if (rcu_access_pointer(q->filter_list)) {
		/* If using external classifiers, get result and record it. */
		if (!choke_classify(skb, sch, &ret))
			goto other_drop;	/* Packet was eaten by filter */
	}

	choke_skb_cb(skb)->keys_valid = 0;
	/* Compute average queue usage (see RED) */
	q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
	if (red_is_idling(&q->vars))
		red_end_of_idle_period(&q->vars);

	/* Is queue small? */
	if (q->vars.qavg <= p->qth_min)
		q->vars.qcount = -1;
	else {
		unsigned int idx;

		/* Draw a packet at random from queue and compare flow */
		if (choke_match_random(q, skb, &idx)) {
			q->stats.matched++;
			choke_drop_by_idx(sch, idx);
			goto congestion_drop;
		}

		/* Queue is large, always mark/drop */
		if (q->vars.qavg > p->qth_max) {
			q->vars.qcount = -1;

			qdisc_qstats_overlimit(sch);
			if (use_harddrop(q) || !use_ecn(q) ||
			    !INET_ECN_set_ce(skb)) {
				q->stats.forced_drop++;
				goto congestion_drop;
			}

			q->stats.forced_mark++;
		} else if (++q->vars.qcount) {
			if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
				q->vars.qcount = 0;
				q->vars.qR = red_random(p);

				qdisc_qstats_overlimit(sch);
				if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
					q->stats.prob_drop++;
					goto congestion_drop;
				}

				q->stats.prob_mark++;
			}
		} else
			q->vars.qR = red_random(p);
	}

	/* Admit new packet */
	if (sch->q.qlen < q->limit) {
		q->tab[q->tail] = skb;
		q->tail = (q->tail + 1) & q->tab_mask;
		++sch->q.qlen;
		qdisc_qstats_backlog_inc(sch, skb);
		return NET_XMIT_SUCCESS;
	}

	q->stats.pdrop++;
	return qdisc_drop(skb, sch);

congestion_drop:
	qdisc_drop(skb, sch);
	return NET_XMIT_CN;

other_drop:
	if (ret & __NET_XMIT_BYPASS)
		qdisc_qstats_drop(sch);
	kfree_skb(skb);
	return ret;
}
コード例 #6
0
ファイル: sch_sfb.c プロジェクト: 020gzh/linux
static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{

	struct sfb_sched_data *q = qdisc_priv(sch);
	struct Qdisc *child = q->qdisc;
	struct tcf_proto *fl;
	int i;
	u32 p_min = ~0;
	u32 minqlen = ~0;
	u32 r, sfbhash;
	u32 slot = q->slot;
	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;

	if (unlikely(sch->q.qlen >= q->limit)) {
		qdisc_qstats_overlimit(sch);
		q->stats.queuedrop++;
		goto drop;
	}

	if (q->rehash_interval > 0) {
		unsigned long limit = q->rehash_time + q->rehash_interval;

		if (unlikely(time_after(jiffies, limit))) {
			sfb_swap_slot(q);
			q->rehash_time = jiffies;
		} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
				    time_after(jiffies, limit - q->warmup_time))) {
			q->double_buffering = true;
		}
	}

	fl = rcu_dereference_bh(q->filter_list);
	if (fl) {
		u32 salt;

		/* If using external classifiers, get result and record it. */
		if (!sfb_classify(skb, fl, &ret, &salt))
			goto other_drop;
		sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
	} else {
		sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
	}


	if (!sfbhash)
		sfbhash = 1;
	sfb_skb_cb(skb)->hashes[slot] = sfbhash;

	for (i = 0; i < SFB_LEVELS; i++) {
		u32 hash = sfbhash & SFB_BUCKET_MASK;
		struct sfb_bucket *b = &q->bins[slot].bins[i][hash];

		sfbhash >>= SFB_BUCKET_SHIFT;
		if (b->qlen == 0)
			decrement_prob(b, q);
		else if (b->qlen >= q->bin_size)
			increment_prob(b, q);
		if (minqlen > b->qlen)
			minqlen = b->qlen;
		if (p_min > b->p_mark)
			p_min = b->p_mark;
	}

	slot ^= 1;
	sfb_skb_cb(skb)->hashes[slot] = 0;

	if (unlikely(minqlen >= q->max)) {
		qdisc_qstats_overlimit(sch);
		q->stats.bucketdrop++;
		goto drop;
	}

	if (unlikely(p_min >= SFB_MAX_PROB)) {
		/* Inelastic flow */
		if (q->double_buffering) {
			sfbhash = skb_get_hash_perturb(skb,
			    q->bins[slot].perturbation);
			if (!sfbhash)
				sfbhash = 1;
			sfb_skb_cb(skb)->hashes[slot] = sfbhash;

			for (i = 0; i < SFB_LEVELS; i++) {
				u32 hash = sfbhash & SFB_BUCKET_MASK;
				struct sfb_bucket *b = &q->bins[slot].bins[i][hash];

				sfbhash >>= SFB_BUCKET_SHIFT;
				if (b->qlen == 0)
					decrement_prob(b, q);
				else if (b->qlen >= q->bin_size)
					increment_prob(b, q);
			}
		}
		if (sfb_rate_limit(skb, q)) {
			qdisc_qstats_overlimit(sch);
			q->stats.penaltydrop++;
			goto drop;
		}
		goto enqueue;
	}
コード例 #7
0
ファイル: main.c プロジェクト: HKUST-SING/TCN-Software
static struct sk_buff *wfq_dequeue(struct Qdisc *sch)
{
        struct wfq_sched_data *q = qdisc_priv(sch);
        int i, weight;
        struct wfq_class *cl = NULL;
        u64 min_time;
        struct sk_buff *skb = NULL;
        struct sk_buff *next_pkt = NULL;
        unsigned int len;
        s64 bucket_ns = (s64)l2t_ns(&q->rate, wfq_bucket_bytes);
        s64 result, now;
        int prio = prio_schedule(q);

        if (prio < 0)
                return NULL;

        /* Find the active queue with the smallest head finish time */
        for (i = 0; i < wfq_max_queues; i++)
        {
                if (q->queues[i].prio != prio || q->queues[i].len_bytes == 0 )
                        continue;

                if (!cl || wfq_time_before(q->queues[i].head_fin_time,
                                           min_time))
                {
                        cl = &q->queues[i];
                        min_time = cl->head_fin_time;
                }
        }

        /* get head packet */
        skb = cl->qdisc->ops->peek(cl->qdisc);
        if (unlikely(!skb))
                return NULL;

        len = skb_size(skb);
        now = ktime_get_ns();
        result = tbf_schedule(len, q, now);

        /* We don't have enough tokens */
        if (result < 0)
        {
                /* For hrtimer absolute mode, we use now + t */
                qdisc_watchdog_schedule_ns(&q->watchdog, now - result, true);
                qdisc_qstats_overlimit(sch);
                return NULL;
        }


        skb = qdisc_dequeue_peeked(cl->qdisc);
        if (unlikely(!skb))
                return NULL;

        q->sum_len_bytes -= len;
        sch->q.qlen--;
        cl->len_bytes -= len;
        q->prio_len_bytes[prio] -= len;

        /* Set the head_fin_time for the remaining head packet */
        if (cl->len_bytes > 0)
        {
                /* Get the current head packet */
                next_pkt = cl->qdisc->ops->peek(cl->qdisc);
                weight = wfq_queue_weight[cl->id];
                if (likely(next_pkt && weight))
                {
                        len = skb_size(next_pkt);
                        cl->head_fin_time += div_u64((u64)len, (u32)weight);
                        if (wfq_time_before(q->virtual_time[prio],
                                            cl->head_fin_time))
                                q->virtual_time[prio] = cl->head_fin_time;
                }
        }

        /* Bucket */
        q->time_ns = now;
        q->tokens = min_t(s64, result, bucket_ns);
        qdisc_unthrottled(sch);
        qdisc_bstats_update(sch, skb);

        /* TCN */
        if (wfq_ecn_scheme == wfq_tcn)
                tcn_marking(skb);
        /* CoDel */
        else if (wfq_ecn_scheme == wfq_codel)
                codel_marking(skb, cl);
        /* dequeue equeue length based ECN marking */
        else if (wfq_enable_dequeue_ecn == wfq_enable)
                wfq_qlen_marking(skb, q, cl);

        return skb;
}