Exemplo n.º 1
0
	void
init_hdr_templates(void)
{
	memset(ip_hdr_template, 0, sizeof(ip_hdr_template));
	memset(l2_hdr_template, 0, sizeof(l2_hdr_template));

	ip_hdr_template[0].version_ihl = IP_VHL_DEF;
	ip_hdr_template[0].type_of_service = (2 << 2); // default DSCP 2 
	ip_hdr_template[0].total_length = 0; 
	ip_hdr_template[0].packet_id = 0;
	ip_hdr_template[0].fragment_offset = IP_DN_FRAGMENT_FLAG;
	ip_hdr_template[0].time_to_live = IP_DEFTTL;
	ip_hdr_template[0].next_proto_id = IPPROTO_IP;
	ip_hdr_template[0].hdr_checksum = 0;
	ip_hdr_template[0].src_addr = rte_cpu_to_be_32(0x00000000);
	ip_hdr_template[0].dst_addr = rte_cpu_to_be_32(0x07010101);

	l2_hdr_template[0].d_addr.addr_bytes[0] = 0x0a;
	l2_hdr_template[0].d_addr.addr_bytes[1] = 0x00;
	l2_hdr_template[0].d_addr.addr_bytes[2] = 0x27;
	l2_hdr_template[0].d_addr.addr_bytes[3] = 0x00;
	l2_hdr_template[0].d_addr.addr_bytes[4] = 0x00;
	l2_hdr_template[0].d_addr.addr_bytes[5] = 0x01;

	l2_hdr_template[0].s_addr.addr_bytes[0] = 0x08;
	l2_hdr_template[0].s_addr.addr_bytes[1] = 0x00;
	l2_hdr_template[0].s_addr.addr_bytes[2] = 0x27;
	l2_hdr_template[0].s_addr.addr_bytes[3] = 0x7d;
	l2_hdr_template[0].s_addr.addr_bytes[4] = 0xc7;
	l2_hdr_template[0].s_addr.addr_bytes[5] = 0x68;

	l2_hdr_template[0].ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);

	return;
}
Exemplo n.º 2
0
/* byteswap to cpu or network order */
static void
bswap_test_data(struct ipv4_7tuple *data, int len, int to_be)
{
	int i;

	for (i = 0; i < len; i++) {

		if (to_be) {
			/* swap all bytes so that they are in network order */
			data[i].ip_dst = rte_cpu_to_be_32(data[i].ip_dst);
			data[i].ip_src = rte_cpu_to_be_32(data[i].ip_src);
			data[i].port_dst = rte_cpu_to_be_16(data[i].port_dst);
			data[i].port_src = rte_cpu_to_be_16(data[i].port_src);
			data[i].vlan = rte_cpu_to_be_16(data[i].vlan);
			data[i].domain = rte_cpu_to_be_16(data[i].domain);
		} else {
			data[i].ip_dst = rte_be_to_cpu_32(data[i].ip_dst);
			data[i].ip_src = rte_be_to_cpu_32(data[i].ip_src);
			data[i].port_dst = rte_be_to_cpu_16(data[i].port_dst);
			data[i].port_src = rte_be_to_cpu_16(data[i].port_src);
			data[i].vlan = rte_be_to_cpu_16(data[i].vlan);
			data[i].domain = rte_be_to_cpu_16(data[i].domain);
		}
	}
}
Exemplo n.º 3
0
static void
send_paxos_message(paxos_message *pm) {
    uint8_t port_id = 0;
    struct rte_mbuf *created_pkt = rte_pktmbuf_alloc(mbuf_pool);
    created_pkt->l2_len = sizeof(struct ether_hdr);
    created_pkt->l3_len = sizeof(struct ipv4_hdr);
    created_pkt->l4_len = sizeof(struct udp_hdr) + sizeof(paxos_message);
    craft_new_packet(&created_pkt, IPv4(192,168,4,99), ACCEPTOR_ADDR,
                     PROPOSER_PORT, ACCEPTOR_PORT, sizeof(paxos_message), port_id);
    //struct udp_hdr *udp;
    size_t udp_offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr);
    //udp  = rte_pktmbuf_mtod_offset(created_pkt, struct udp_hdr *, udp_offset);
    size_t paxos_offset = udp_offset + sizeof(struct udp_hdr);
    struct paxos_hdr *px = rte_pktmbuf_mtod_offset(created_pkt, struct paxos_hdr *, paxos_offset);
    px->msgtype = rte_cpu_to_be_16(pm->type);
    px->inst = rte_cpu_to_be_32(pm->u.accept.iid);
    px->inst = rte_cpu_to_be_32(pm->u.accept.iid);
    px->rnd = rte_cpu_to_be_16(pm->u.accept.ballot);
    px->vrnd = rte_cpu_to_be_16(pm->u.accept.value_ballot);
    px->acptid = 0;
    rte_memcpy(px->paxosval, pm->u.accept.value.paxos_value_val, pm->u.accept.value.paxos_value_len);
    created_pkt->ol_flags = PKT_TX_IPV4 | PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM;
    const uint16_t nb_tx = rte_eth_tx_burst(port_id, 0, &created_pkt, 1);
    rte_pktmbuf_free(created_pkt);
    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "Send %d messages\n", nb_tx);
}
Exemplo n.º 4
0
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
		unsigned nb_entries, unsigned inbound)
{
	struct ipsec_sa *sa;
	unsigned i, idx;

	for (i = 0; i < nb_entries; i++) {
		idx = SPI2IDX(entries[i].spi);
		sa = &sa_ctx->sa[idx];
		if (sa->spi != 0) {
			printf("Index %u already in use by SPI %u\n",
					idx, sa->spi);
			return -EINVAL;
		}
		*sa = entries[i];
		sa->src = rte_cpu_to_be_32(sa->src);
		sa->dst = rte_cpu_to_be_32(sa->dst);
		if (inbound) {
			if (sa->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
				sa_ctx->xf[idx].a = null_auth_xf;
				sa_ctx->xf[idx].b = null_cipher_xf;
			} else {
				sa_ctx->xf[idx].a = sha1hmac_verify_xf;
				sa_ctx->xf[idx].b = aescbc_dec_xf;
			}
		} else { /* outbound */
			if (sa->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
				sa_ctx->xf[idx].a = null_cipher_xf;
				sa_ctx->xf[idx].b = null_auth_xf;
			} else {
				sa_ctx->xf[idx].a = aescbc_enc_xf;
				sa_ctx->xf[idx].b = sha1hmac_gen_xf;
			}
		}
		sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
		sa_ctx->xf[idx].b.next = NULL;
		sa->xforms = &sa_ctx->xf[idx].a;
	}

	return 0;
}
Exemplo n.º 5
0
Arquivo: scsi.c Projeto: emmericp/dpdk
int
vhost_bdev_process_scsi_commands(struct vhost_block_dev *bdev,
				 struct vhost_scsi_task *task)
{
	int len;
	uint8_t *data;
	uint64_t *temp64, fmt_lun = 0;
	uint32_t *temp32;
	const uint8_t *lun;
	uint8_t *cdb = (uint8_t *)task->req->cdb;

	lun = (const uint8_t *)task->req->lun;
	/* only 1 LUN supported */
	if (lun[0] != 1 || lun[1] >= 1)
		return -1;

	switch (cdb[0]) {
	case SPC_INQUIRY:
		len = vhost_bdev_scsi_inquiry_command(bdev, task);
		task->data_len = len;
		break;
	case SPC_REPORT_LUNS:
		data = (uint8_t *)task->iovs[0].iov_base;
		fmt_lun |= (0x0ULL & 0x00ffULL) << 48;
		temp64 = (uint64_t *)&data[8];
		*temp64 = rte_cpu_to_be_64(fmt_lun);
		temp32 = (uint32_t *)data;
		*temp32 = rte_cpu_to_be_32(8);
		task->data_len = 16;
		scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
		break;
	case SPC_MODE_SELECT_6:
	case SPC_MODE_SELECT_10:
		/* don't support it now */
		scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
		break;
	case SPC_MODE_SENSE_6:
	case SPC_MODE_SENSE_10:
		/* don't support it now */
		scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
		break;
	case SPC_TEST_UNIT_READY:
		scsi_task_set_status(task, SCSI_STATUS_GOOD, 0, 0, 0);
		break;
	default:
		len = vhost_bdev_scsi_process_block(bdev, task);
		task->data_len = len;
	}

	return 0;
}
Exemplo n.º 6
0
static void
populate_flow_distributor_table(void)
{
	unsigned int i;
	int32_t ret;
	uint32_t ip_dst;
	uint8_t socket_id = rte_socket_id();
	uint64_t node_id;

	/* Add flows in table */
	for (i = 0; i < num_flows; i++) {
		node_id = i % num_nodes;

		ip_dst = rte_cpu_to_be_32(i);
		ret = rte_efd_update(efd_table, socket_id,
				(void *)&ip_dst, (efd_value_t)node_id);
		if (ret < 0)
			rte_exit(EXIT_FAILURE, "Unable to add entry %u in "
					"flow distributor table\n", i);
	}

	printf("EFD table: Adding 0x%x keys\n", num_flows);
}
Exemplo n.º 7
0
Arquivo: scsi.c Projeto: emmericp/dpdk
static int
vhost_bdev_scsi_process_block(struct vhost_block_dev *bdev,
			      struct vhost_scsi_task *task)
{
	uint64_t lba, *temp64;
	uint32_t xfer_len, *temp32;
	uint16_t *temp16;
	uint8_t *cdb = (uint8_t *)task->req->cdb;

	switch (cdb[0]) {
	case SBC_READ_6:
	case SBC_WRITE_6:
		lba = (uint64_t)cdb[1] << 16;
		lba |= (uint64_t)cdb[2] << 8;
		lba |= (uint64_t)cdb[3];
		xfer_len = cdb[4];
		if (xfer_len == 0)
			xfer_len = 256;
		return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);

	case SBC_READ_10:
	case SBC_WRITE_10:
		temp32 = (uint32_t *)&cdb[2];
		lba = rte_be_to_cpu_32(*temp32);
		temp16 = (uint16_t *)&cdb[7];
		xfer_len = rte_be_to_cpu_16(*temp16);
		return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);

	case SBC_READ_12:
	case SBC_WRITE_12:
		temp32 = (uint32_t *)&cdb[2];
		lba = rte_be_to_cpu_32(*temp32);
		temp32 = (uint32_t *)&cdb[6];
		xfer_len = rte_be_to_cpu_32(*temp32);
		return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);

	case SBC_READ_16:
	case SBC_WRITE_16:
		temp64 = (uint64_t *)&cdb[2];
		lba = rte_be_to_cpu_64(*temp64);
		temp32 = (uint32_t *)&cdb[10];
		xfer_len = rte_be_to_cpu_32(*temp32);
		return vhost_bdev_scsi_readwrite(bdev, task, lba, xfer_len);

	case SBC_READ_CAPACITY_10: {
		uint8_t buffer[8];

		if (bdev->blockcnt - 1 > 0xffffffffULL)
			memset(buffer, 0xff, 4);
		else {
			temp32 = (uint32_t *)buffer;
			*temp32 = rte_cpu_to_be_32(bdev->blockcnt - 1);
		}
		temp32 = (uint32_t *)&buffer[4];
		*temp32 = rte_cpu_to_be_32(bdev->blocklen);
		memcpy(task->iovs[0].iov_base, buffer, sizeof(buffer));
		task->resp->status = SCSI_STATUS_GOOD;
		return sizeof(buffer);
	}

	case SBC_SYNCHRONIZE_CACHE_10:
	case SBC_SYNCHRONIZE_CACHE_16:
		task->resp->status = SCSI_STATUS_GOOD;
		return 0;
	}

	scsi_task_set_status(task, SCSI_STATUS_CHECK_CONDITION,
			     SCSI_SENSE_ILLEGAL_REQUEST,
			     SCSI_ASC_INVALID_FIELD_IN_CDB,
			     SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
	return 0;
}
Exemplo n.º 8
0
static int neigh_update(struct nlmsghdr *nlh)
{
    int err;
    int len = nlh->nlmsg_len;
    uint32_t index;
    char *pifname;
    char if_name[IF_NAMESIZE];
    char buf[512] = {0};
    struct ndmsg *ndm;
    struct rtattr *rta;
    struct rtattr *tb[NDA_MAX+1];
    struct nda_cacheinfo *ci = NULL;
    struct msg_hdr *hdr;
    struct arp_add *arp_add;
    struct arp_del *arp_del;
    struct route_add *rt_add;
    struct route_del *rt_del;

    len -= NLMSG_LENGTH(sizeof(*ndm));
    if (len < 0)
        return -1;
    
    ndm = NLMSG_DATA(nlh);
    hdr = (struct msg_hdr *)buf;

    if (ndm->ndm_type != RTN_UNICAST)
        return 0;

    if (AF_INET != ndm->ndm_family && AF_INET6 != ndm->ndm_family) {
        fastpath_log_debug("family %d error.\n", ndm->ndm_family);
        return 0;
    }

    index = get_port_map(ndm->ndm_ifindex);
    if (index >= ROUTE_MAX_LINK) {
        fastpath_log_debug("ifidx %d not concerned\n", ndm->ndm_ifindex);
        return 0;
    }

    pifname = if_indextoname(ndm->ndm_ifindex, if_name);
    if (pifname == NULL) {
        fastpath_log_error("%s:get if name by ifindex:%d err\n", 
                  __func__, ndm->ndm_ifindex);
        return -EIO;
    }

    rta = (struct rtattr*)((char*)ndm + NLMSG_ALIGN(sizeof(struct ndmsg)));
    
    rtattr_parse(tb, NDA_MAX, rta, len);

    if (NULL == tb[NDA_DST]) {
        fastpath_log_error( "nda dst is null.\n");
        return -EINVAL;
    }
    
    if (NULL != tb[NDA_CACHEINFO]) {
        ci = RTA_DATA(tb[NDA_CACHEINFO]);
    }

    fastpath_log_debug( "%s: neigh update, family %d, ifidx %d, eif%d, state 0x%02x\n",
        __func__, ndm->ndm_family, ndm->ndm_ifindex, index, ndm->ndm_state);

    if (ndm->ndm_state & NUD_FAILED || (ci && (ci->ndm_refcnt == 0))) {
        hdr->cmd = ROUTE_MSG_DEL_NEIGH;
        arp_del = (struct arp_del *)hdr->data;
        arp_del->nh_iface = rte_cpu_to_be_32(index);
        memcpy(&arp_del->nh_ip, RTA_DATA(tb[NDA_DST]), RTA_PAYLOAD(tb[NDA_DST]));
        err = route_send(hdr);
        if (err != 0) {
            fastpath_log_error( "neigh_update: send neigh failed\n", __func__);
        }

        hdr->cmd = ROUTE_MSG_DEL_NH;
        rt_del = (struct route_del *)hdr->data;
        memcpy(&rt_del->ip, RTA_DATA(tb[NDA_DST]), RTA_PAYLOAD(tb[NDA_DST]));
        rt_del->depth = 32;
        err = route_send(hdr);
        if (err != 0) {
            fastpath_log_error( "neigh_update: send nh failed\n");
        }
        
    } else /* if (ndm->ndm_state & (NUD_REACHABLE | NUD_PERMANENT)) */ {
        hdr->cmd = ROUTE_MSG_ADD_NEIGH;
        arp_add = (struct arp_add *)hdr->data;
        arp_add->nh_iface = rte_cpu_to_be_32(index);
        memcpy(&arp_add->nh_ip, RTA_DATA(tb[NDA_DST]), RTA_PAYLOAD(tb[NDA_DST]));
        arp_add->type = rte_cpu_to_be_16(NEIGH_TYPE_REACHABLE);
        if (NULL != tb[NDA_LLADDR]) {
            memcpy(&arp_add->nh_arp, (char*)RTA_DATA(tb[NDA_LLADDR]), RTA_PAYLOAD(tb[NDA_LLADDR]));
        }
        err = route_send(hdr);
        if (err != 0) {
            fastpath_log_error( "neigh_update: send neigh failed\n", __func__);
        }

        hdr->cmd = ROUTE_MSG_ADD_NH;
        rt_add = (struct route_add *)hdr->data;
        memcpy(&rt_add->ip, RTA_DATA(tb[NDA_DST]), RTA_PAYLOAD(tb[NDA_DST]));
        rt_add->depth = 32;
        memcpy(&rt_add->nh_ip, RTA_DATA(tb[NDA_DST]), RTA_PAYLOAD(tb[NDA_DST]));
        rt_add->nh_iface = rte_cpu_to_be_32(index);
        err = route_send(hdr);
        if (err != 0) {
            fastpath_log_error( "neigh_update: send nh failed\n");
        }
    }
#if 0
    else {
static int
app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
	uint8_t *key_out,
	uint32_t *signature)
{
	uint8_t buffer[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
	void *key_buffer = (key_out) ? key_out : buffer;

	switch (key_in->type) {
	case FLOW_KEY_QINQ:
	{
		struct pkt_key_qinq *qinq = key_buffer;

		qinq->ethertype_svlan = 0;
		qinq->svlan = rte_cpu_to_be_16(key_in->key.qinq.svlan);
		qinq->ethertype_cvlan = 0;
		qinq->cvlan = rte_cpu_to_be_16(key_in->key.qinq.cvlan);

		if (signature)
			*signature = (uint32_t) hash_default_key8(qinq, 8, 0);
		return 0;
	}

	case FLOW_KEY_IPV4_5TUPLE:
	{
		struct pkt_key_ipv4_5tuple *ipv4 = key_buffer;

		ipv4->ttl = 0;
		ipv4->proto = key_in->key.ipv4_5tuple.proto;
		ipv4->checksum = 0;
		ipv4->ip_src = rte_cpu_to_be_32(key_in->key.ipv4_5tuple.ip_src);
		ipv4->ip_dst = rte_cpu_to_be_32(key_in->key.ipv4_5tuple.ip_dst);
		ipv4->port_src = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_src);
		ipv4->port_dst = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_dst);

		if (signature)
			*signature = (uint32_t) hash_default_key16(ipv4, 16, 0);
		return 0;
	}

	case FLOW_KEY_IPV6_5TUPLE:
	{
		struct pkt_key_ipv6_5tuple *ipv6 = key_buffer;

		memset(ipv6, 0, 64);
		ipv6->payload_length = 0;
		ipv6->proto = key_in->key.ipv6_5tuple.proto;
		ipv6->hop_limit = 0;
		memcpy(&ipv6->ip_src, &key_in->key.ipv6_5tuple.ip_src, 16);
		memcpy(&ipv6->ip_dst, &key_in->key.ipv6_5tuple.ip_dst, 16);
		ipv6->port_src = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_src);
		ipv6->port_dst = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_dst);

		if (signature)
			*signature = (uint32_t) hash_default_key64(ipv6, 64, 0);
		return 0;
	}

	default:
		return -1;
	}
}