コード例 #1
0
ファイル: init.c プロジェクト: emmericp/dpdk
static void
app_init_kni(struct app_params *app) {
	uint32_t i;

	if (app->n_pktq_kni == 0)
		return;

	rte_kni_init(app->n_pktq_kni);

	for (i = 0; i < app->n_pktq_kni; i++) {
		struct app_pktq_kni_params *p_kni = &app->kni_params[i];
		struct app_link_params *p_link;
		struct rte_eth_dev_info dev_info;
		struct app_mempool_params *mempool_params;
		struct rte_mempool *mempool;
		struct rte_kni_conf conf;
		struct rte_kni_ops ops;

		/* LINK */
		p_link = app_get_link_for_kni(app, p_kni);
		memset(&dev_info, 0, sizeof(dev_info));
		rte_eth_dev_info_get(p_link->pmd_id, &dev_info);

		/* MEMPOOL */
		mempool_params = &app->mempool_params[p_kni->mempool_id];
		mempool = app->mempool[p_kni->mempool_id];

		/* KNI */
		memset(&conf, 0, sizeof(conf));
		snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
		conf.force_bind = p_kni->force_bind;
		if (conf.force_bind) {
			int lcore_id;

			lcore_id = cpu_core_map_get_lcore_id(app->core_map,
				p_kni->socket_id,
				p_kni->core_id,
				p_kni->hyper_th_id);

			if (lcore_id < 0)
				rte_panic("%s invalid CPU core\n", p_kni->name);

			conf.core_id = (uint32_t) lcore_id;
		}
		conf.group_id = p_link->pmd_id;
		conf.mbuf_size = mempool_params->buffer_size;
		conf.addr = dev_info.pci_dev->addr;
		conf.id = dev_info.pci_dev->id;

		memset(&ops, 0, sizeof(ops));
		ops.port_id = (uint8_t) p_link->pmd_id;
		ops.change_mtu = kni_change_mtu;
		ops.config_network_if = kni_config_network_interface;

		APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
		app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
		if (!app->kni[i])
			rte_panic("%s init error\n", p_kni->name);
	}
}
コード例 #2
0
ファイル: kni.c プロジェクト: Gandi/packet-journey
/* Initialize KNI subsystem */
void
init_kni(void)
{
	unsigned int num_of_kni_ports = 0, i;
	struct kni_port_params** params = kni_port_params_array;

	/* Calculate the maximum number of KNI interfaces that will be used */
	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
		if (kni_port_params_array[i]) {
			RTE_LOG(INFO, KNI, "number of kni lcore %d\n",
				params[i]->nb_lcore_k);
			num_of_kni_ports +=
			    (params[i]->nb_lcore_k ? params[i]->nb_lcore_k : 1);
		}
	}

	RTE_LOG(INFO, KNI, "number of kni %d\n", num_of_kni_ports);
	/* Invoke rte KNI init to preallocate the ports */
	rte_kni_init(num_of_kni_ports);
	RTE_LOG(INFO, KNI, "finished init_kni\n");
}
コード例 #3
0
/* Init KNI RX queue */
struct vr_dpdk_queue *
vr_dpdk_kni_rx_queue_init(unsigned lcore_id, struct vr_interface *vif,
    unsigned host_lcore_id)
{
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
    const unsigned socket_id = rte_lcore_to_socket_id(lcore_id);
    uint8_t port_id = 0;
    unsigned vif_idx = vif->vif_idx;
    struct vr_dpdk_queue *rx_queue = &lcore->lcore_rx_queues[vif_idx];
    struct vr_dpdk_queue_params *rx_queue_params
                    = &lcore->lcore_rx_queue_params[vif_idx];

    if (vif->vif_type == VIF_TYPE_HOST) {
        port_id = (((struct vr_dpdk_ethdev *)(vif->vif_bridge->vif_os))->
                ethdev_port_id);
    }

    /* init queue */
    rx_queue->rxq_ops = dpdk_knidev_reader_ops;
    rx_queue->q_queue_h = NULL;
    rx_queue->q_vif = vrouter_get_interface(vif->vif_rid, vif_idx);

    /* create the queue */
    struct dpdk_knidev_reader_params reader_params = {
        .kni = vif->vif_os,
    };
    rx_queue->q_queue_h = rx_queue->rxq_ops.f_create(&reader_params, socket_id);
    if (rx_queue->q_queue_h == NULL) {
        RTE_LOG(ERR, VROUTER, "    error creating KNI device %s RX queue"
            " at eth device %" PRIu8 "\n", vif->vif_name, port_id);
        return NULL;
    }

    /* store queue params */
    rx_queue_params->qp_release_op = &dpdk_kni_rx_queue_release;

    return rx_queue;
}

/* Release KNI TX queue */
static void
dpdk_kni_tx_queue_release(unsigned lcore_id, struct vr_interface *vif)
{
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
    struct vr_dpdk_queue *tx_queue = &lcore->lcore_tx_queues[vif->vif_idx];
    struct vr_dpdk_queue_params *tx_queue_params
                        = &lcore->lcore_tx_queue_params[vif->vif_idx];

    tx_queue->txq_ops.f_tx = NULL;
    rte_wmb();

    /* flush and free the queue */
    if (tx_queue->txq_ops.f_free(tx_queue->q_queue_h)) {
        RTE_LOG(ERR, VROUTER, "    error freeing lcore %u KNI device TX queue\n",
                    lcore_id);
    }

    /* reset the queue */
    vrouter_put_interface(tx_queue->q_vif);
    memset(tx_queue, 0, sizeof(*tx_queue));
    memset(tx_queue_params, 0, sizeof(*tx_queue_params));
}

/* Init KNI TX queue */
struct vr_dpdk_queue *
vr_dpdk_kni_tx_queue_init(unsigned lcore_id, struct vr_interface *vif,
    unsigned host_lcore_id)
{
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
    const unsigned socket_id = rte_lcore_to_socket_id(lcore_id);
    uint8_t port_id = 0;
    unsigned vif_idx = vif->vif_idx;
    struct vr_dpdk_queue *tx_queue = &lcore->lcore_tx_queues[vif_idx];
    struct vr_dpdk_queue_params *tx_queue_params
                    = &lcore->lcore_tx_queue_params[vif_idx];
    struct vr_dpdk_ethdev *ethdev;

    if (vif->vif_type == VIF_TYPE_HOST) {
        ethdev = vif->vif_bridge->vif_os;
        if (ethdev == NULL) {
            RTE_LOG(ERR, VROUTER, "    error creating KNI device %s TX queue:"
                " bridge vif %u ethdev is not initialized\n",
                vif->vif_name, vif->vif_bridge->vif_idx);
            return NULL;
        }
        port_id = ethdev->ethdev_port_id;
    }

    /* init queue */
    tx_queue->txq_ops = dpdk_knidev_writer_ops;
    tx_queue->q_queue_h = NULL;
    tx_queue->q_vif = vrouter_get_interface(vif->vif_rid, vif_idx);

    /* create the queue */
    struct dpdk_knidev_writer_params writer_params = {
        .kni = vif->vif_os,
        .tx_burst_sz = VR_DPDK_TX_BURST_SZ,
    };
    tx_queue->q_queue_h = tx_queue->txq_ops.f_create(&writer_params, socket_id);
    if (tx_queue->q_queue_h == NULL) {
        RTE_LOG(ERR, VROUTER, "    error creating KNI device %s TX queue"
            " at eth device %" PRIu8 "\n", vif->vif_name, port_id);
        return NULL;
    }

    /* store queue params */
    tx_queue_params->qp_release_op = &dpdk_kni_tx_queue_release;

    return tx_queue;
}

/* Change KNI MTU size callback */
static int
dpdk_knidev_change_mtu(uint8_t port_id, unsigned new_mtu)
{
    struct vrouter *router = vrouter_get(0);
    struct vr_interface *vif;
    int i, ret;
    uint8_t ethdev_port_id, slave_port_id;
    struct vr_dpdk_ethdev *ethdev = NULL;

    RTE_LOG(INFO, VROUTER, "Changing eth device %" PRIu8 " MTU to %u\n",
                    port_id, new_mtu);
    if (port_id >= rte_eth_dev_count()) {
        RTE_LOG(ERR, VROUTER, "Error changing eth device %"PRIu8" MTU: invalid eth device\n", port_id);
        return -EINVAL;
    }

    /*
     * TODO: DPDK bond PMD does not implement mtu_set op, so we need to
     * set the MTU manually for all the slaves.
     */
    /* Bond vif uses first slave port ID. */
    if (router->vr_eth_if) {
        ethdev = (struct vr_dpdk_ethdev *)router->vr_eth_if->vif_os;
        if (ethdev && ethdev->ethdev_nb_slaves > 0) {
            for (i = 0; i < ethdev->ethdev_nb_slaves; i++) {
                if (port_id == ethdev->ethdev_slaves[i])
                    break;
            }
            /* Clear ethdev if no port match. */
            if (i >= ethdev->ethdev_nb_slaves)
                ethdev = NULL;
        }
    }
    if (ethdev && ethdev->ethdev_nb_slaves > 0) {
        for (i = 0; i < ethdev->ethdev_nb_slaves; i++) {
            slave_port_id = ethdev->ethdev_slaves[i];
            RTE_LOG(INFO, VROUTER, "    changing bond member eth device %" PRIu8
                " MTU to %u\n", slave_port_id, new_mtu);

            ret =  rte_eth_dev_set_mtu(slave_port_id, new_mtu);
            if (ret < 0) {
                RTE_LOG(ERR, VROUTER, "    error changing bond member eth device %" PRIu8
                    " MTU: %s (%d)\n", slave_port_id, rte_strerror(-ret), -ret);
                return ret;
            }
        }
    } else {
        ret =  rte_eth_dev_set_mtu(port_id, new_mtu);
        if (ret < 0) {
            RTE_LOG(ERR, VROUTER, "Error changing eth device %" PRIu8
                " MTU: %s (%d)\n", port_id, rte_strerror(-ret), -ret);
        }
        return ret;
    }

    /* On success, inform vrouter about new MTU */
    for (i = 0; i < router->vr_max_interfaces; i++) {
        vif = __vrouter_get_interface(router, i);
        if (vif && (vif->vif_type == VIF_TYPE_PHYSICAL)) {
            ethdev_port_id = (((struct vr_dpdk_ethdev *)(vif->vif_os))->
                        ethdev_port_id);
            if (ethdev_port_id == port_id) {
                /* Ethernet header size */
                new_mtu += sizeof(struct vr_eth);
                if (vr_dpdk.vlan_tag != VLAN_ID_INVALID) {
                    /* 802.1q header size */
                    new_mtu += sizeof(uint32_t);
                }
                vif->vif_mtu = new_mtu;
                if (vif->vif_bridge)
                    vif->vif_bridge->vif_mtu = new_mtu;
            }
        }
    }

    return 0;
}


/* Configure KNI state callback */
static int
dpdk_knidev_config_network_if(uint8_t port_id, uint8_t if_up)
{
    int ret = 0;

    RTE_LOG(INFO, VROUTER, "Configuring eth device %" PRIu8 " %s\n",
                    port_id, if_up ? "UP" : "DOWN");
    if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
        RTE_LOG(ERR, VROUTER, "Invalid eth device %" PRIu8 "\n", port_id);
        return -EINVAL;
    }

    if (if_up)
        ret = rte_eth_dev_start(port_id);
    else
        rte_eth_dev_stop(port_id);

    if (ret < 0) {
        RTE_LOG(ERR, VROUTER, "Configuring eth device %" PRIu8 " UP"
                    "failed (%d)\n", port_id, ret);
    }

    return ret;
}

/* Init KNI */
int
vr_dpdk_knidev_init(uint8_t port_id, struct vr_interface *vif)
{
    int i;
    struct rte_eth_dev_info dev_info;
    struct rte_kni_conf kni_conf;
    struct rte_kni_ops kni_ops;
    struct rte_kni *kni;
    struct rte_config *rte_conf = rte_eal_get_configuration();

    if (!vr_dpdk.kni_inited) {
        /*
         * If the host does not support KNIs (i.e. RedHat), we'll get
         * a panic here.
         */
        rte_kni_init(VR_DPDK_MAX_KNI_INTERFACES);
        vr_dpdk.kni_inited = true;
    }

    /* get eth device info */
    memset(&dev_info, 0, sizeof(dev_info));
    rte_eth_dev_info_get(port_id, &dev_info);

    /* create KNI configuration */
    memset(&kni_conf, 0, sizeof(kni_conf));
    strncpy(kni_conf.name, (char *)vif->vif_name, sizeof(kni_conf.name) - 1);

    kni_conf.addr = dev_info.pci_dev->addr;
    kni_conf.id = dev_info.pci_dev->id;
    kni_conf.group_id = port_id;
    kni_conf.mbuf_size = VR_DPDK_MAX_PACKET_SZ;
    /*
     * Due to DPDK commit 41a6ebd, now to prevent packet reordering in KNI
     * we have to bind KNI kernel thread to a first online unused CPU.
     */
    for (i = 0; i < RTE_MAX_LCORE; i++) {
        if (lcore_config[i].detected
                && rte_conf->lcore_role[VR_DPDK_FWD_LCORE_ID + i] == ROLE_OFF) {
            kni_conf.force_bind = 1;
            kni_conf.core_id = i;
            RTE_LOG(INFO, VROUTER, "    bind KNI kernel thread to CPU %d\n", i);
            break;
        }
    }

    /* KNI options
     *
     * Changing state of the KNI interface can change state of the physical
     * interface. This is useful for the vhost, but not for the VLAN
     * forwarding interface.
     */
    if (vif->vif_type == VIF_TYPE_VLAN) {
        memset(&kni_ops, 0, sizeof(kni_ops));
    } else {
        kni_ops.port_id = port_id;
        kni_ops.change_mtu = dpdk_knidev_change_mtu;
        kni_ops.config_network_if = dpdk_knidev_config_network_if;
    }

    /* allocate KNI device */
    kni = rte_kni_alloc(vr_dpdk.rss_mempool, &kni_conf, &kni_ops);
    if (kni == NULL) {
        RTE_LOG(ERR, VROUTER, "    error allocation KNI device %s"
            " at eth device %" PRIu8 "\n", vif->vif_name, port_id);
        return -ENOMEM;
    }

    /* store pointer to KNI for further use */
    vif->vif_os = kni;

    /* add interface to the table of KNIs */
    for (i = 0; i < VR_DPDK_MAX_KNI_INTERFACES; i++) {
        if (vr_dpdk.knis[i] == NULL) {
            vr_dpdk.knis[i] = vif->vif_os;
            break;
        }
    }

    return 0;
}
コード例 #4
0
ファイル: rw_piot_api.c プロジェクト: RIFTIO/RIFT.ware
/*
 * Initialize PIOT Module
 */
int
rw_piot_init(int argc, char **argv, void *instance_ptr, f_rw_piot_log_t log_fn)
{

  int rc, ret;
  int i, no_huge = 0, memory_req = 0;
  struct rte_config *cfg = rte_eal_get_configuration();

/*
 * TBD - thread safety
 */

  if (piot_initialized) {
    int num_arg;

    for (num_arg = 0; num_arg < argc; num_arg ++) {
      if (strcmp(argv[num_arg], "--") == 0) {
        break;
      } 
    }
    return num_arg;
  }
  piot_initialized = 1;


  memset(&rw_piot_global_config, 0, sizeof(rw_piot_global_config));
  ASSERT(instance_ptr);
  rw_piot_global_config.instance_ptr = instance_ptr;
  rw_piot_global_config.log_fn = log_fn;

  memset(&(rw_piot_lcore[0]), 0, sizeof(rw_piot_lcore));
  
  for (i=0; i<argc; i++) {
    if (strcmp("--no-huge", argv[i]) == 0) {
      no_huge = 1;
      RW_PIOT_LOG(RTE_LOG_INFO, "PIOT: Huge pages disabled by --no-huge cmdarg\n");
    }
    if (strcmp("-m", argv[i]) == 0) {
      if ((i+1) <argc) {
        memory_req = atoi(argv[i+1]);
        RW_PIOT_LOG(RTE_LOG_INFO, "PIOT: -m cmdarg setting requested memory to %d mb\n", memory_req);
      }
    }
    if (strcmp("--", argv[i]) == 0) {
      break;
    }
  }
  /*
   * setup system environment for DPDK
   */
  rc = dpdk_system_setup(no_huge, memory_req);
  if (rc < 0) {
    return rc;
  }

  rte_set_application_log_hook(rw_piot_log_handler);

  /*
   * Init DPDK EAL without doing thread related inits
   */
  ret = rte_eal_init_no_thread(argc, argv);
  if (ret < 0) {
    return ret;
  }
  if (geteuid() == 0){
    rte_kni_init(RWPIOT_MAX_KNI_DEVICES);
  }
 /*
  * Assign master lcore-id. Should be passed in the init - TBD
  */

  cfg->master_lcore = 0;  /* This wil be fixed with RW.Sched integration -  TBD */

  /* set the lcore ID in per-lcore memory area */
  RTE_PER_LCORE(_lcore_id) = cfg->master_lcore;

  /* set CPU affinity for master thread ??? TBD*/
  // if (eal_thread_set_affinity() < 0)
  //    rte_panic("cannot set affinity\n");

  rte_timer_subsystem_init();

  return ret; /* number of args consumed by rte_eal_init_no_thread */
}
コード例 #5
0
ファイル: kni_dev.c プロジェクト: labeneator/accel-ppp
int kni_dev_init(struct rte_mempool *mbuf_pool)
{
	struct conf_sect *s = conf_get_sect("interface");
	struct conf_opt *opt;
	struct rte_kni_conf conf;
	struct rte_kni_ops ops;
	struct rte_kni *kni;
	struct net_device *dev;
	struct knidev *knidev;
	pthread_t tid;
	struct ifconfig_arg arg;
	int i = 0, x = rte_eth_dev_count();

	for (opt = s->opt; opt; opt = opt->next) {
		const char *busid = conf_get_subopt(opt, "busid");
		if (!strcmp(busid, "kni"))
			kni_cnt++;
	}

	if (!kni_cnt)
		return 0;

	rte_kni_init(kni_cnt);

	dev_list = rte_malloc(NULL, kni_cnt * sizeof(void *), 0);

	memset(&conf, 0, sizeof(conf));
	memset(&ops, 0, sizeof(ops));

	ops.change_mtu = kni_change_mtu;
	ops.config_network_if = kni_config_network_if;

	for (opt = s->opt; opt; opt = opt->next) {
		const char *busid = conf_get_subopt(opt, "busid");
		if (strcmp(busid, "kni"))
			continue;

		strcpy(conf.name, opt->name);
		conf.group_id = i;
		conf.mbuf_size = ETHER_MAX_LEN + 8;

		ops.port_id = i;

		kni = rte_kni_alloc(mbuf_pool, &conf, &ops);

		if (!kni) {
			fprintf(stderr, "failed to create %s\n", opt->name);
			return -1;
		}

		dev = netdev_alloc(opt->name, sizeof(*knidev), NULL);
		dev->xmit = knidev_xmit;
		knidev = netdev_priv(dev);
		knidev->port = i;
		knidev->xport = i + x;
		knidev->dev = dev;
		knidev->kni = kni;

		dev_list[i] = knidev;

		arg.dev = knidev;
		arg.opt = opt;
		arg.err = -1;

		pthread_create(&tid, NULL, kni_ifconfig, &arg);

		while (arg.err == -1)
			rte_kni_handle_request(kni);

		pthread_join(tid, NULL);

		if (arg.err != 0)
			return -1;
	}

	return 0;
}