Exemplo n.º 1
0
/*
 * Enqueue a packet in q, subject to space and queue management policy
 * (whose parameters are in q->fs).
 * Update stats for the queue and the scheduler.
 * Return 0 on success, 1 on drop. The packet is consumed anyways.
 */
int
dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop)
{   
	struct dn_fs *f;
	struct dn_flow *ni;	/* stats for scheduler instance */
	uint64_t len;

	if (q->fs == NULL || q->_si == NULL) {
		printf("%s fs %p si %p, dropping\n",
			__FUNCTION__, q->fs, q->_si);
		FREE_PKT(m);
		return 1;
	}
	f = &(q->fs->fs);
	ni = &q->_si->ni;
	len = m->m_pkthdr.len;
	/* Update statistics, then check reasons to drop pkt. */
	q->ni.tot_bytes += len;
	q->ni.tot_pkts++;
	ni->tot_bytes += len;
	ni->tot_pkts++;
	if (drop)
		goto drop;
	if (f->plr && random() < f->plr)
		goto drop;
	if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len))
		goto drop;
	if (f->flags & DN_QSIZE_BYTES) {
		if (q->ni.len_bytes > f->qsize)
			goto drop;
	} else if (q->ni.length >= f->qsize) {
		goto drop;
	}
	mq_append(&q->mq, m);
	q->ni.length++;
	q->ni.len_bytes += len;
	ni->length++;
	ni->len_bytes += len;
	return 0;

drop:
	io_pkt_drop++;
	q->ni.drops++;
	ni->drops++;
	FREE_PKT(m);
	return 1;
}
Exemplo n.º 2
0
static int
ipfw_log_output(struct ifnet *ifp, struct mbuf *m,
	struct sockaddr *dst, struct route *ro)
{
	if (m != NULL)
		FREE_PKT(m);
	return EINVAL;
}
Exemplo n.º 3
0
static int
ipfw_bpf_output(struct ifnet *ifp, struct mbuf *m,
	const struct sockaddr *dst, struct route *ro)
{

	if (m != NULL)
		FREE_PKT(m);
	return (0);
}
Exemplo n.º 4
0
/*
 * Dispose a list of packet. Use a functions so if we need to do
 * more work, this is a central point to do it.
 */
void dn_free_pkts(struct mbuf *mnext)
{
        struct mbuf *m;
    
        while ((m = mnext) != NULL) {
                mnext = m->m_nextpkt;
                FREE_PKT(m);
        }
}
Exemplo n.º 5
0
/* Drop a packet form the head of codel queue */
static void
codel_drop_head(struct fq_codel_flow *q, struct fq_codel_si *si)
{
	struct mbuf *m = q->mq.head;

	if (m == NULL)
		return;
	q->mq.head = m->m_nextpkt;

	fq_update_stats(q, si, -m->m_pkthdr.len, 1);

	if (si->main_q.ni.length == 0) /* queue is now idle */
			si->main_q.q_time = dn_cfg.curr_time;

	FREE_PKT(m);
}
Exemplo n.º 6
0
void
netmap_enqueue(struct mbuf *m, int proto)
{
	struct my_netmap_port *peer = m->__m_peer;
	struct txq_entry *x;


	if (peer == NULL) {
		D("error missing peer in %p", m);
		FREE_PKT(m);
	}
	ND(1, "start with %d packets", peer->cur_txq);
	if (peer->cur_txq >= MY_TXQ_LEN)
		netmap_fwd(peer);
	x = peer->q + peer->cur_txq;
	x->ring_or_mbuf = m;
	x->flags = TXQ_IS_MBUF;
	peer->cur_txq++;
	peer->sess->flags |= WANT_RUN;
	ND("end, queued %d on %s", peer->cur_txq, peer->ifname);
}
Exemplo n.º 7
0
/*
 * dummynet hook for packets.
 * We use the argument to locate the flowset fs and the sched_set sch
 * associated to it. The we apply flow_mask and sched_mask to
 * determine the queue and scheduler instances.
 *
 * dir		where shall we send the packet after dummynet.
 * *m0		the mbuf with the packet
 * ifp		the 'ifp' parameter from the caller.
 *		NULL in ip_input, destination interface in ip_output,
 */
int
dummynet_io(struct mbuf **m0, int dir, struct ip_fw_args *fwa)
{
	struct mbuf *m = *m0;
	struct dn_fsk *fs = NULL;
	struct dn_sch_inst *si;
	struct dn_queue *q = NULL;	/* default */

	int fs_id = (fwa->rule.info & IPFW_INFO_MASK) +
		((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0);
	DN_BH_WLOCK();
	io_pkt++;
	/* we could actually tag outside the lock, but who cares... */
	if (tag_mbuf(m, dir, fwa))
		goto dropit;
	if (dn_cfg.busy) {
		/* if the upper half is busy doing something expensive,
		 * lets queue the packet and move forward
		 */
		mq_append(&dn_cfg.pending, m);
		m = *m0 = NULL; /* consumed */
		goto done; /* already active, nothing to do */
	}
	/* XXX locate_flowset could be optimised with a direct ref. */
	fs = dn_ht_find(dn_cfg.fshash, fs_id, 0, NULL);
	if (fs == NULL)
		goto dropit;	/* This queue/pipe does not exist! */
	if (fs->sched == NULL)	/* should not happen */
		goto dropit;
	/* find scheduler instance, possibly applying sched_mask */
	si = ipdn_si_find(fs->sched, &(fwa->f_id));
	if (si == NULL)
		goto dropit;
	/*
	 * If the scheduler supports multiple queues, find the right one
	 * (otherwise it will be ignored by enqueue).
	 */
	if (fs->sched->fp->flags & DN_MULTIQUEUE) {
		q = ipdn_q_find(fs, si, &(fwa->f_id));
		if (q == NULL)
			goto dropit;
	}
	if (fs->sched->fp->enqueue(si, q, m)) {
		/* packet was dropped by enqueue() */
		m = *m0 = NULL;
		goto dropit;
	}

	if (si->kflags & DN_ACTIVE) {
		m = *m0 = NULL; /* consumed */
		goto done; /* already active, nothing to do */
	}

	/* compute the initial allowance */
	if (si->idle_time < dn_cfg.curr_time) {
	    /* Do this only on the first packet on an idle pipe */
	    struct dn_link *p = &fs->sched->link;

	    si->sched_time = dn_cfg.curr_time;
	    si->credit = dn_cfg.io_fast ? p->bandwidth : 0;
	    if (p->burst) {
		uint64_t burst = (dn_cfg.curr_time - si->idle_time) * p->bandwidth;
		if (burst > p->burst)
			burst = p->burst;
		si->credit += burst;
	    }
	}
	/* pass through scheduler and delay line */
	m = serve_sched(NULL, si, dn_cfg.curr_time);

	/* optimization -- pass it back to ipfw for immediate send */
	/* XXX Don't call dummynet_send() if scheduler return the packet
	 *     just enqueued. This avoid a lock order reversal.
	 *     
	 */
	if (/*dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) {
		/* fast io, rename the tag * to carry reinject info. */
		struct m_tag *tag = m_tag_first(m);

		tag->m_tag_cookie = MTAG_IPFW_RULE;
		tag->m_tag_id = 0;
		io_pkt_fast++;
		if (m->m_nextpkt != NULL) {
			printf("dummynet: fast io: pkt chain detected!\n");
			m->m_nextpkt = NULL;
		}
		m = NULL;
	} else {
		*m0 = NULL;
	}
done:
	DN_BH_WUNLOCK();
	if (m)
		dummynet_send(m);
	return 0;

dropit:
	io_pkt_drop++;
	DN_BH_WUNLOCK();
	if (m)
		FREE_PKT(m);
	*m0 = NULL;
	return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS;
}
Exemplo n.º 8
0
/*
 * forward a chain of packets to the proper destination.
 * This runs outside the dummynet lock.
 */
static void
dummynet_send(struct mbuf *m)
{
	struct mbuf *n;

	for (; m != NULL; m = n) {
		struct ifnet *ifp = NULL;	/* gcc 3.4.6 complains */
        	struct m_tag *tag;
		int dst;

		n = m->m_nextpkt;
		m->m_nextpkt = NULL;
		tag = m_tag_first(m);
		if (tag == NULL) { /* should not happen */
			dst = DIR_DROP;
		} else {
			struct dn_pkt_tag *pkt = dn_tag_get(m);
			/* extract the dummynet info, rename the tag
			 * to carry reinject info.
			 */
			dst = pkt->dn_dir;
			ifp = pkt->ifp;
			tag->m_tag_cookie = MTAG_IPFW_RULE;
			tag->m_tag_id = 0;
		}

		switch (dst) {
		case DIR_OUT:
			ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
			break ;

		case DIR_IN :
			netisr_dispatch(NETISR_IP, m);
			break;

#ifdef INET6
		case DIR_IN | PROTO_IPV6:
			netisr_dispatch(NETISR_IPV6, m);
			break;

		case DIR_OUT | PROTO_IPV6:
			ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
			break;
#endif

		case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */
			if (bridge_dn_p != NULL)
				((*bridge_dn_p)(m, ifp));
			else
				printf("dummynet: if_bridge not loaded\n");

			break;

		case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */
			/*
			 * The Ethernet code assumes the Ethernet header is
			 * contiguous in the first mbuf header.
			 * Insure this is true.
			 */
			if (m->m_len < ETHER_HDR_LEN &&
			    (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
				printf("dummynet/ether: pullup failed, "
				    "dropping packet\n");
				break;
			}
			ether_demux(m->m_pkthdr.rcvif, m);
			break;

		case DIR_OUT | PROTO_LAYER2: /* N_TO_ETH_OUT: */
			ether_output_frame(ifp, m);
			break;

		case DIR_DROP:
			/* drop the packet after some time */
			FREE_PKT(m);
			break;

		default:
			printf("dummynet: bad switch %d!\n", dst);
			FREE_PKT(m);
			break;
		}
	}
}
Exemplo n.º 9
0
/*
 * txq[] has a batch of n packets that possibly need to be forwarded.
 */
int
netmap_fwd(struct my_netmap_port *port)
{
	u_int dr; /* destination ring */
	u_int i = 0;
	const u_int n = port->cur_txq;	/* how many queued packets */
	struct txq_entry *x = port->q;
	int retry = 5;	/* max retries */
	struct nm_desc *dst = port->d;

	if (n == 0) {
		D("nothing to forward to %s", port->ifp.if_xname);
		return 0;
	}

again:
	/* scan all output rings; dr is the destination ring index */
        for (dr = dst->first_tx_ring; i < n && dr <= dst->last_tx_ring; dr++) {
		struct netmap_ring *ring = NETMAP_TXRING(dst->nifp, dr);

		__builtin_prefetch(ring);
		if (nm_ring_empty(ring))
			continue;
		/*
		 * We have different ways to transfer from src->dst
		 *
		 * src	dst	Now		Eventually (not done)
		 *
		 * PHYS	PHYS	buf swap
		 * PHYS VIRT	NS_INDIRECT
		 * VIRT	PHYS	copy		NS_INDIRECT
		 * VIRT	VIRT	NS_INDIRECT
		 * MBUF	PHYS	copy		NS_INDIRECT
		 * MBUF	VIRT	NS_INDIRECT
		 *
		 * The "eventually" depends on implementing NS_INDIRECT
		 * on physical device drivers.
		 * Note we do not yet differentiate PHYS/VIRT.
		 */
		for  (; i < n && !nm_ring_empty(ring); i++) {
			struct netmap_slot *dst, *src;

			dst = &ring->slot[ring->cur];
			if (x[i].flags == TXQ_IS_SLOT) {
				struct netmap_ring *sr = x[i].ring_or_mbuf;

				src = &sr->slot[x[i].slot_idx];
				dst->len = src->len;

				if (port->can_swap_bufs) {
					ND("pkt %d len %d", i, src->len);
					u_int tmp = dst->buf_idx;
					dst->flags = src->flags = NS_BUF_CHANGED;
					dst->buf_idx = src->buf_idx;
					src->buf_idx = tmp;
				} else if (port->peer->allocator_id == 1) { // no indirect
					nm_pkt_copy(NETMAP_BUF(sr, src->buf_idx),
						NETMAP_BUF(ring, dst->buf_idx),
						dst->len);
				} else {
					dst->ptr = (uintptr_t)NETMAP_BUF(sr, src->buf_idx);
					dst->flags = NS_INDIRECT;
				}
			} else if (x[i].flags == TXQ_IS_MBUF) {
				struct mbuf *m = (void *)x[i].ring_or_mbuf;

				ND("copy from mbuf");
				dst->len = m->__m_extlen;
				nm_pkt_copy(m->__m_extbuf,
					NETMAP_BUF(ring, dst->buf_idx),
					dst->len);
				FREE_PKT(m);
			} else {
				panic("bad slot");
			}
			x[i].flags = 0;
			ring->head = ring->cur = nm_ring_next(ring, ring->cur);
		}
	}
	if (i < n) {
		if (retry-- > 0) {
			ioctl(port->d->fd, NIOCTXSYNC);
			goto again;
		}
		RD(1, "%d buffers leftover", n - i);
		for (;i < n; i++) {
			if (x[i].flags == TXQ_IS_MBUF) {
				FREE_PKT(x[i].ring_or_mbuf);
			}
		}
	}
	port->cur_txq = 0;
	return 0;
}