Ejemplo n.º 1
0
rte_port_ring_writer_tx_bulk_internal(void *port,
		struct rte_mbuf **pkts,
		uint64_t pkts_mask,
		uint32_t is_multi)
{
	struct rte_port_ring_writer *p =
		(struct rte_port_ring_writer *) port;

	uint64_t bsz_mask = p->bsz_mask;
	uint32_t tx_buf_count = p->tx_buf_count;
	uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
			((pkts_mask & bsz_mask) ^ bsz_mask);

	if (expr == 0) {
		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
		uint32_t n_pkts_ok;

		if (tx_buf_count) {
			if (is_multi)
				send_burst_mp(p);
			else
				send_burst(p);
		}

		RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
		if (is_multi)
			n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring, (void **)pkts,
				n_pkts);
		else
			n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts,
				n_pkts);

		RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
		for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
			struct rte_mbuf *pkt = pkts[n_pkts_ok];

			rte_pktmbuf_free(pkt);
		}
	} else {
		for ( ; pkts_mask; ) {
			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
			uint64_t pkt_mask = 1LLU << pkt_index;
			struct rte_mbuf *pkt = pkts[pkt_index];

			p->tx_buf[tx_buf_count++] = pkt;
			RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, 1);
			pkts_mask &= ~pkt_mask;
		}

		p->tx_buf_count = tx_buf_count;
		if (tx_buf_count >= p->tx_burst_sz) {
			if (is_multi)
				send_burst_mp(p);
			else
				send_burst(p);
		}
	}

	return 0;
}
Ejemplo n.º 2
0
static int
rte_port_ethdev_writer_tx_bulk(void *port,
		struct rte_mbuf **pkts,
		uint64_t pkts_mask)
{
	struct rte_port_ethdev_writer *p =
		(struct rte_port_ethdev_writer *) port;

	if ((pkts_mask & (pkts_mask + 1)) == 0) {
		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
		uint32_t i;

		for (i = 0; i < n_pkts; i++) {
			struct rte_mbuf *pkt = pkts[i];

			p->tx_buf[p->tx_buf_count++] = pkt;
			if (p->tx_buf_count >= p->tx_burst_sz)
				send_burst(p);
		}
	} else {
		for ( ; pkts_mask; ) {
			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
			uint64_t pkt_mask = 1LLU << pkt_index;
			struct rte_mbuf *pkt = pkts[pkt_index];

			p->tx_buf[p->tx_buf_count++] = pkt;
			if (p->tx_buf_count >= p->tx_burst_sz)
				send_burst(p);
			pkts_mask &= ~pkt_mask;
		}
	}

	return 0;
}
Ejemplo n.º 3
0
static int
rte_port_fd_writer_tx_bulk(void *port,
	struct rte_mbuf **pkts,
	uint64_t pkts_mask)
{
	struct rte_port_fd_writer *p =
		port;
	uint32_t tx_buf_count = p->tx_buf_count;

	if ((pkts_mask & (pkts_mask + 1)) == 0) {
		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
		uint32_t i;

		for (i = 0; i < n_pkts; i++)
			p->tx_buf[tx_buf_count++] = pkts[i];
		RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
	} else
		for ( ; pkts_mask; ) {
			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
			uint64_t pkt_mask = 1LLU << pkt_index;
			struct rte_mbuf *pkt = pkts[pkt_index];

			p->tx_buf[tx_buf_count++] = pkt;
			RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(p, 1);
			pkts_mask &= ~pkt_mask;
		}

	p->tx_buf_count = tx_buf_count;
	if (tx_buf_count >= p->tx_burst_sz)
		send_burst(p);

	return 0;
}
Ejemplo n.º 4
0
static int
rte_port_ethdev_writer_tx_bulk(void *port,
		struct rte_mbuf **pkts,
		uint64_t pkts_mask)
{
	struct rte_port_ethdev_writer *p =
		(struct rte_port_ethdev_writer *) port;
	uint64_t bsz_mask = p->bsz_mask;
	uint32_t tx_buf_count = p->tx_buf_count;
	uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
			((pkts_mask & bsz_mask) ^ bsz_mask);

	if (expr == 0) {
		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
		uint32_t n_pkts_ok;

		if (tx_buf_count)
			send_burst(p);

		RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
		n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts,
			n_pkts);

		RTE_PORT_ETHDEV_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
		for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
			struct rte_mbuf *pkt = pkts[n_pkts_ok];

			rte_pktmbuf_free(pkt);
		}
	} else {
		for ( ; pkts_mask; ) {
			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
			uint64_t pkt_mask = 1LLU << pkt_index;
			struct rte_mbuf *pkt = pkts[pkt_index];

			p->tx_buf[tx_buf_count++] = pkt;
			RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
			pkts_mask &= ~pkt_mask;
		}

		p->tx_buf_count = tx_buf_count;
		if (tx_buf_count >= p->tx_burst_sz)
			send_burst(p);
	}

	return 0;
}
Ejemplo n.º 5
0
static int
rte_port_ring_writer_flush(void *port)
{
	struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;

	if (p->tx_buf_count > 0)
		send_burst(p);

	return 0;
}
static int
dpdk_knidev_writer_flush(void *port)
{
    struct dpdk_knidev_writer *p = (struct dpdk_knidev_writer *) port;

    if (p->tx_buf_count > 0)
        send_burst(p);

    return 0;
}
Ejemplo n.º 7
0
static int
rte_port_ring_writer_tx(void *port, struct rte_mbuf *pkt)
{
	struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;

	p->tx_buf[p->tx_buf_count++] = pkt;
	RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, 1);
	if (p->tx_buf_count >= p->tx_burst_sz)
		send_burst(p);

	return 0;
}
static int
dpdk_knidev_writer_tx(void *port, struct rte_mbuf *pkt)
{
    struct dpdk_knidev_writer *p = (struct dpdk_knidev_writer *) port;

    p->tx_buf[p->tx_buf_count++] = pkt;
    DPDK_KNIDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
    if (p->tx_buf_count >= p->tx_burst_sz)
        send_burst(p);

    return 0;
}
Ejemplo n.º 9
0
static int
rte_port_ethdev_writer_tx(void *port, struct rte_mbuf *pkt)
{
	struct rte_port_ethdev_writer *p =
		(struct rte_port_ethdev_writer *) port;

	p->tx_buf[p->tx_buf_count++] = pkt;
	if (p->tx_buf_count >= p->tx_burst_sz)
		send_burst(p);

	return 0;
}
Ejemplo n.º 10
0
static int
rte_port_ring_writer_ras_tx(void *port, struct rte_mbuf *pkt)
{
	struct rte_port_ring_writer_ras *p =
			port;

	RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(p, 1);
	p->f_ras(p, pkt);
	if (p->tx_buf_count >= p->tx_burst_sz)
		send_burst(p);

	return 0;
}
Ejemplo n.º 11
0
static int
dpdk_knidev_writer_tx(void *port, struct rte_mbuf *pkt)
{
    struct dpdk_knidev_writer *p = (struct dpdk_knidev_writer *) port;
    struct rte_mbuf *pkt_copy;

    /*
     * KNI kernel module uses a trick to speed up packet processing. It takes
     * a physical address of a memory pool, converts it to the kernel virtual
     * address with phys_to_virt() and saves the address.
     *
     * Then in kni_net_rx_normal() instead of using phys_to_virt() per each
     * packet, KNI just calculates the difference between the previously
     * converted physical address of the given mempool and the packets
     * physical address.
     *
     * It works well for the mbufs from the same mempool. It also works fine
     * with any mempool allocated from the same physically contiguous memory
     * segment.
     *
     * As soon as we get a mempool allocated from another memory segment, the
     * difference calculations fail and thus we might have a crash.
     *
     * So we make sure the packet is from the RSS mempool. If not, we make
     * a copy to the RSS mempool.
     */
    if (unlikely(pkt->pool != vr_dpdk.rss_mempool ||
            /* Check indirect mbuf's data is within the RSS mempool. */
            rte_pktmbuf_mtod(pkt, uintptr_t) < vr_dpdk.rss_mempool->elt_va_start ||
            rte_pktmbuf_mtod(pkt, uintptr_t) > vr_dpdk.rss_mempool->elt_va_end
            )) {
        pkt_copy = vr_dpdk_pktmbuf_copy(pkt, vr_dpdk.rss_mempool);
        /* The original mbuf is no longer needed. */
        vr_dpdk_pfree(pkt, VP_DROP_CLONED_ORIGINAL);

        if (unlikely(pkt_copy == NULL)) {
            DPDK_KNIDEV_WRITER_STATS_PKTS_DROP_ADD(p, 1);
            return -1;
        }

        pkt = pkt_copy;
    }

    p->tx_buf[p->tx_buf_count++] = pkt;
    DPDK_KNIDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
    if (p->tx_buf_count >= p->tx_burst_sz)
        send_burst(p);

    return 0;
}
Ejemplo n.º 12
0
/* Queue the packets for each port */
static inline void
flood_send_pkt(struct rte_mbuf *pkt, uint8_t port)
{
	uint16_t len;

	/* Put new packet into the output queue */
	len = port_tx_conf.tx_mbufs[port].len;
	port_tx_conf.tx_mbufs[port].m_table[len] = pkt;
	port_tx_conf.tx_mbufs[port].len = ++len;

	/* Transmit packets */
	if (unlikely(BURST_SIZE == len))
		send_burst(port);
}
Ejemplo n.º 13
0
static int
rte_port_ring_writer_ras_tx_bulk(void *port,
		struct rte_mbuf **pkts,
		uint64_t pkts_mask)
{
	struct rte_port_ring_writer_ras *p =
			port;

	if ((pkts_mask & (pkts_mask + 1)) == 0) {
		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
		uint32_t i;

		for (i = 0; i < n_pkts; i++) {
			struct rte_mbuf *pkt = pkts[i];

			RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(p, 1);
			p->f_ras(p, pkt);
			if (p->tx_buf_count >= p->tx_burst_sz)
				send_burst(p);
		}
	} else {
		for ( ; pkts_mask; ) {
			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
			uint64_t pkt_mask = 1LLU << pkt_index;
			struct rte_mbuf *pkt = pkts[pkt_index];

			RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(p, 1);
			p->f_ras(p, pkt);
			if (p->tx_buf_count >= p->tx_burst_sz)
				send_burst(p);

			pkts_mask &= ~pkt_mask;
		}
	}

	return 0;
}
Ejemplo n.º 14
0
/* Send burst of outgoing packet, if timeout expires. */
static inline void
send_timeout_burst(void)
{
	uint64_t cur_tsc;
	uint8_t port;
	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;

	cur_tsc = rte_rdtsc();
	if (likely (cur_tsc < port_tx_conf.tx_tsc + drain_tsc))
		return;

	for (port = 0; port < MAX_PORTS; port++) {
		if (port_tx_conf.tx_mbufs[port].len != 0)
			send_burst(port);
	}
	port_tx_conf.tx_tsc = cur_tsc;
}
Ejemplo n.º 15
0
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
send_single_packet(lcore_conf_t *qconf,
                   struct rte_mbuf *m, uint8_t port)
{
    uint16_t len;

    len = qconf->tx_mbufs[port].len;
    qconf->tx_mbufs[port].m_table[len] = m;
    len++;

    /* enough pkts to be sent */
    if (unlikely(len == MAX_PKT_BURST)) {
        send_burst(qconf, MAX_PKT_BURST, port);
        len = 0;
    }

    qconf->tx_mbufs[port].len = len;
    return 0;
}
Ejemplo n.º 16
0
struct timeval single_req_resp_cycle (int sockfd, int req_size, 
int res_size, struct timespec ts)
{
	struct timeval ts1, ts2, ts_diff;
        char buffer[MAX_TRSF];
	int byterx, tmp, size;
	bzero(buffer, MAX_TRSF);

	/* Builds the header */
	tmp = htonl(req_size);
        memcpy(buffer, &tmp, sizeof(int));
	tmp = htonl(res_size);
        memcpy(buffer+sizeof(int), &tmp, sizeof(int));
        

        size = req_size + 2*sizeof(uint32_t);

	/* timestamp before request */
	gettimeofday(&ts1, NULL);

	/* Client send request */
        send_burst(sockfd, buffer, size);

	/* Client read response */
	for(byterx=0; byterx<res_size;) 
		byterx += read(sockfd, buffer, MAX_TRSF);
	

	/* timestamp after request */
	gettimeofday(&ts2, NULL);
	timersub(&ts2, &ts1, &ts_diff);

        nanosleep(&ts, NULL);

	return ts_diff;
}
Ejemplo n.º 17
0
int cc1100_send(radio_address_t addr, protocol_t protocol, int priority, char *payload, int payload_len)
{
    bool result;
    int return_code;
    uint8_t address;
    uint8_t retries;

    /* Lock mutex, nobody else should send now */
    cc1100_phy_mutex_lock();

    /* TX state machine lock -> no timers (WOR), no packets (only ACKs) */
    rflags.TX = true;

    /* Set chip to idle state */
    cc1100_set_idle();

    /* CC1100 radio layer only supports 8-bit addresses */
    address = addr;

    /* Loopback not supported */
    if (address == cc1100_get_address()) {
        return_code = RADIO_ADDR_OUT_OF_RANGE;
        goto mode_before_final;
    }

    /* Check address */
    if (address > MAX_UID) {
        return_code = RADIO_ADDR_OUT_OF_RANGE;
        goto mode_before_final;
    }

    /* Packet too long */
    if (payload_len > MAX_DATA_LENGTH) {
        return_code = RADIO_PAYLOAD_TOO_LONG;
        goto mode_before_final;
    }

    if (radio_state == RADIO_PWD) {
        return_code = RADIO_WRONG_MODE;
        goto mode_before_final;
    }

    /* Set number of transmission retries */
    retries = (address == CC1100_BROADCAST_ADDRESS) ?
              cc1100_retransmission_count_bc : cc1100_retransmission_count_uc;

    memset(tx_buffer.data, 0, MAX_DATA_LENGTH);			/* Clean data */

    /* TODO: If packets are shorter than max packet size, WOR interval is too long.
     *       This must be solved in some way. */
    tx_buffer.length = 3 + payload_len;				/* 3 bytes (A&PS&F) + data length */
    tx_buffer.address = address;						/* Copy destination address */
    tx_buffer.flags = 0x00;								/* Set clean state */
    tx_buffer.flags = W_FLAGS_PROTOCOL(protocol);		/* Copy protocol identifier */
    tx_buffer.phy_src = (uint8_t) cc1100_get_address();	/* Copy sender address */

    /* Set identification number of packet */
    tx_buffer.flags |= rflags.SEQ;						/* Set flags.identification (bit 0) */
    rflags.SEQ = !rflags.SEQ;							/* Toggle value of layer 0 sequence number bit */

    memcpy(tx_buffer.data, payload, payload_len);		/* Copy data */

    /* Send the packet */
    cc1100_spi_write_reg(CC1100_MCSM0, 0x08);			/* Turn off FS-Autocal */
    result = send_burst(&tx_buffer, retries, 0);		/* Send raw burst */
    return_code = result ? payload_len : RADIO_OP_FAILED;

    /* Collect statistics */
    if (address != CC1100_BROADCAST_ADDRESS) {
        cc1100_statistic.packets_out++;

        if (result) {
            cc1100_statistic.packets_out_acked++;
        }
    }
    else {
        cc1100_statistic.packets_out_broadcast++;
    }

    goto final;

mode_before_final:
    rflags.TX = false;
    /* Definitely set secure mode (CONST_RX -> RX, WOR -> WOR) */
    cc1100_go_after_tx();

final:
Ejemplo n.º 18
0
static bool send_burst(cc1100_packet_layer0_t *packet, uint8_t retries, uint8_t rtc)
{
    int i;
    radio_state = RADIO_SEND_BURST;
    rflags.LL_ACK = false;

    for (i = 1; i <= cc1100_burst_count; i++) {
        /*
         * Number of bytes to send is:
         * length of phy payload (packet->length)
         * + size of length field (1 byte)
         */
        extern unsigned long hwtimer_now(void);
        timer_tick_t t = hwtimer_now() + RTIMER_TICKS(T_PACKET_INTERVAL);
        cc1100_send_raw((uint8_t *)packet, packet->length + 1);	/* RX -> TX (9.6 us) */

        cc1100_statistic.raw_packets_out++;

        /* Delay until predefined "send" interval has passed */
        timer_tick_t now = hwtimer_now();

        if (t > now) {
            hwtimer_wait(t - now);
        }

        /**
         * After sending the packet the CC1100 goes automatically
         * into RX mode (21.5 us) (listening for an ACK).
         * Do not interrupt burst if send to broadcast address (a node may
         * have the broadcast address at startup and would stop the burst
         * by sending an ACK).
         */
        if (rflags.LL_ACK && packet->address != CC1100_BROADCAST_ADDRESS) {
            cc1100_statistic.raw_packets_out_acked += i;
            break;
        }
    }

    /* No link level ACK -> do retry if retry counter greater zero
     * Note: Event broadcast packets can be sent repeatedly if in
     *       constant RX mode. In WOR mode it is not necessary, so
     *       set retry count to zero.*/
    if (!rflags.LL_ACK && retries > 0) {
        return send_burst(packet, retries - 1, rtc + 1);
    }

    /* Store number of transmission retries */
    rflags.RETC = rtc;
    rflags.RPS = rtc * cc1100_burst_count + i;

    if (i > cc1100_burst_count) {
        rflags.RPS--;
    }

    rflags.TX = false;

    /* Go to mode after TX (CONST_RX -> RX, WOR -> WOR) */
    cc1100_go_after_tx();

    /* Burst from any other node is definitely over */
    last_seq_num = 0;

    if (packet->address != CC1100_BROADCAST_ADDRESS && !rflags.LL_ACK) {
        return false;
    }

    return true;
}