Esempio n. 1
0
static struct vhost_block_dev *
vhost_scsi_bdev_construct(const char *bdev_name, const char *bdev_serial,
			  uint32_t blk_size, uint64_t blk_cnt,
			  bool wce_enable)
{
	struct vhost_block_dev *bdev;

	bdev = rte_zmalloc(NULL, sizeof(*bdev), RTE_CACHE_LINE_SIZE);
	if (!bdev)
		return NULL;

	strncpy(bdev->name, bdev_name, sizeof(bdev->name));
	strncpy(bdev->product_name, bdev_serial, sizeof(bdev->product_name));
	bdev->blocklen = blk_size;
	bdev->blockcnt = blk_cnt;
	bdev->write_cache = wce_enable;

	/* use memory as disk storage space */
	bdev->data = rte_zmalloc(NULL, blk_cnt * blk_size, 0);
	if (!bdev->data) {
		fprintf(stderr, "no enough reseverd huge memory for disk\n");
		return NULL;
	}

	return bdev;
}
Esempio n. 2
0
/*
 * LBA + Metadata without data protection bits setting,
 *  separate metadata payload for the test case.
 */
static uint32_t dp_without_flags_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req,
		uint32_t *io_flags)
{
	uint32_t md_size, sector_size;

	req->lba_count = 16;

	/* separate metadata payload for the test case */
	if (spdk_nvme_ns_supports_extended_lba(ns))
		return 0;

	sector_size = spdk_nvme_ns_get_sector_size(ns);;
	md_size = spdk_nvme_ns_get_md_size(ns);
	req->contig = rte_zmalloc(NULL, sector_size * req->lba_count, 0x1000);
	if (!req->contig)
		return 0;

	req->metadata = rte_zmalloc(NULL, md_size * req->lba_count, 0x1000);
	if (!req->metadata) {
		rte_free(req->contig);
		return 0;
	}

	req->lba = 0x600000;
	req->use_extended_lba = false;
	*io_flags = 0;

	return req->lba_count;
}
Esempio n. 3
0
/*
 * Do a single performance test, of one type of operation.
 *
 * @param h
 *   hash table to run test on
 * @param func
 *   function to call (add, delete or lookup function)
 * @param avg_occupancy
 *   The average number of entries in each bucket of the hash table
 * @param invalid_pos_count
 *   The amount of errors (e.g. due to a full bucket).
 * @return
 *   The average number of ticks per hash function call. A negative number
 *   signifies failure.
 */
static double
run_single_tbl_perf_test(const struct rte_hash *h, hash_operation func,
		const struct tbl_perf_test_params *params, double *avg_occupancy,
		uint32_t *invalid_pos_count)
{
	uint64_t begin, end, ticks = 0;
	uint8_t *key = NULL;
	uint32_t *bucket_occupancies = NULL;
	uint32_t num_buckets, i, j;
	int32_t pos;

	/* Initialise */
	num_buckets = params->entries / params->bucket_entries;
	key = (uint8_t *) rte_zmalloc("hash key",
			params->key_len * sizeof(uint8_t), 16);
	if (key == NULL)
		return -1;

	bucket_occupancies = (uint32_t *) rte_zmalloc("bucket occupancies",
			num_buckets * sizeof(uint32_t), 16);
	if (bucket_occupancies == NULL) {
		rte_free(key);
		return -1;
	}

	ticks = 0;
	*invalid_pos_count = 0;

	for (i = 0; i < params->num_iterations; i++) {
		/* Prepare inputs for the current iteration */
		for (j = 0; j < params->key_len; j++)
			key[j] = (uint8_t) rte_rand();

		/* Perform operation, and measure time it takes */
		begin = rte_rdtsc();
		pos = func(h, key);
		end = rte_rdtsc();
		ticks += end - begin;

		/* Other work per iteration */
		if (pos < 0)
			*invalid_pos_count += 1;
		else
			bucket_occupancies[pos / params->bucket_entries]++;
	}
	*avg_occupancy = get_avg(bucket_occupancies, num_buckets);

	rte_free(bucket_occupancies);
	rte_free(key);

	return (double)ticks / params->num_iterations;
}
Esempio n. 4
0
static void*
app_pipeline_fa_init(struct pipeline_params *params,
	__rte_unused void *arg)
{
	struct app_pipeline_fa *p;
	uint32_t size, i;

	/* Check input arguments */
	if ((params == NULL) ||
		(params->n_ports_in == 0) ||
		(params->n_ports_out == 0))
		return NULL;

	/* Memory allocation */
	size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_fa));
	p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
	if (p == NULL)
		return NULL;

	/* Initialization */
	p->n_ports_in = params->n_ports_in;
	p->n_ports_out = params->n_ports_out;
	if (pipeline_fa_parse_args(&p->params, params)) {
		rte_free(p);
		return NULL;
	}

	/* Memory allocation */
	size = RTE_CACHE_LINE_ROUNDUP(
		p->params.n_flows * sizeof(struct app_pipeline_fa_flow));
	p->flows = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
	if (p->flows == NULL) {
		rte_free(p);
		return NULL;
	}

	/* Initialization of flow table */
	for (i = 0; i < p->params.n_flows; i++)
		pipeline_fa_flow_params_set_default(&p->flows[i].params);

	/* Initialization of DSCP table */
	for (i = 0; i < RTE_DIM(p->dscp); i++) {
		p->dscp[i].traffic_class = 0;
		p->dscp[i].color = e_RTE_METER_GREEN;
	}

	return (void *) p;
}
Esempio n. 5
0
static int
fs_sub_device_alloc(struct rte_eth_dev *dev,
		const char *params)
{
	uint8_t nb_subs;
	int ret;
	int i;

	ret = failsafe_args_count_subdevice(dev, params);
	if (ret)
		return ret;
	if (PRIV(dev)->subs_tail > FAILSAFE_MAX_ETHPORTS) {
		ERROR("Cannot allocate more than %d ports",
			FAILSAFE_MAX_ETHPORTS);
		return -ENOSPC;
	}
	nb_subs = PRIV(dev)->subs_tail;
	PRIV(dev)->subs = rte_zmalloc(NULL,
			sizeof(struct sub_device) * nb_subs,
			RTE_CACHE_LINE_SIZE);
	if (PRIV(dev)->subs == NULL) {
		ERROR("Could not allocate sub_devices");
		return -ENOMEM;
	}
	/* Initiate static sub devices linked list. */
	for (i = 1; i < nb_subs; i++)
		PRIV(dev)->subs[i - 1].next = PRIV(dev)->subs + i;
	PRIV(dev)->subs[i - 1].next = PRIV(dev)->subs;
	return 0;
}
Esempio n. 6
0
/*
 * Invoked when there is a new vhost-user connection established (when
 * there is a new virtio device being attached).
 */
int
vhost_new_device(void)
{
	struct virtio_net *dev;
	int i;

	dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
	if (dev == NULL) {
		RTE_LOG(ERR, VHOST_CONFIG,
			"Failed to allocate memory for new dev.\n");
		return -1;
	}

	for (i = 0; i < MAX_VHOST_DEVICE; i++) {
		if (vhost_devices[i] == NULL)
			break;
	}
	if (i == MAX_VHOST_DEVICE) {
		RTE_LOG(ERR, VHOST_CONFIG,
			"Failed to find a free slot for new device.\n");
		rte_free(dev);
		return -1;
	}

	vhost_devices[i] = dev;
	dev->vid = i;
	dev->slave_req_fd = -1;

	return i;
}
Esempio n. 7
0
/*****************************************************************************
 * trace_init_component()
 ****************************************************************************/
static int trace_init_component(uint32_t trace_id)
{
    trace_comp_t *tc;
    uint32_t      i;

    if (trace_id >= TRACE_MAX)
        return -EINVAL;

    tc = &trace_components[trace_id];

    tc->tc_comp_id = trace_id;
    /* To be set later if needed (through an API). */
    tc->tc_fmt = NULL;

    tc->tc_buffers = rte_zmalloc("trace_buffer",
                                 rte_lcore_count() * sizeof(*tc->tc_buffers),
                                 0);
    if (!tc->tc_buffers)
        return -ENOMEM;

    for (i = 0; i < rte_lcore_count(); i++)
        TRACE_BUF_SET_LEVEL(&tc->tc_buffers[i], TRACE_LVL_LOG);

    return 0;
}
Esempio n. 8
0
int
rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
		struct rte_vdpa_dev_ops *ops)
{
	struct rte_vdpa_device *dev;
	char device_name[MAX_VDPA_NAME_LEN];
	int i;

	if (vdpa_device_num >= MAX_VHOST_DEVICE)
		return -1;

	for (i = 0; i < MAX_VHOST_DEVICE; i++) {
		dev = vdpa_devices[i];
		if (dev && is_same_vdpa_device(&dev->addr, addr))
			return -1;
	}

	for (i = 0; i < MAX_VHOST_DEVICE; i++) {
		if (vdpa_devices[i] == NULL)
			break;
	}

	sprintf(device_name, "vdpa-dev-%d", i);
	dev = rte_zmalloc(device_name, sizeof(struct rte_vdpa_device),
			RTE_CACHE_LINE_SIZE);
	if (!dev)
		return -1;

	memcpy(&dev->addr, addr, sizeof(struct rte_vdpa_dev_addr));
	dev->ops = ops;
	vdpa_devices[i] = dev;
	vdpa_device_num++;

	return i;
}
Esempio n. 9
0
static void*
pipeline_init(__rte_unused struct pipeline_params *params, void *arg)
{
	struct app_params *app = (struct app_params *) arg;
	struct pipeline_master *p;
	uint32_t size;

	/* Check input arguments */
	if (app == NULL)
		return NULL;

	/* Memory allocation */
	size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_master));
	p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
	if (p == NULL)
		return NULL;

	/* Initialization */
	p->app = app;

	p->cl = cmdline_stdin_new(app->cmds, "pipeline> ");
	if (p->cl == NULL) {
		rte_free(p);
		return NULL;
	}

	p->script_file_done = 0;
	if (app->script_file == NULL)
		p->script_file_done = 1;

	return (void *) p;
}
static void*
app_pipeline_fc_init(struct pipeline_params *params,
	__rte_unused void *arg)
{
	struct app_pipeline_fc *p;
	uint32_t size, i;

	/* Check input arguments */
	if ((params == NULL) ||
		(params->n_ports_in == 0) ||
		(params->n_ports_out == 0))
		return NULL;

	/* Memory allocation */
	size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_fc));
	p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
	if (p == NULL)
		return NULL;

	/* Initialization */
	p->n_ports_in = params->n_ports_in;
	p->n_ports_out = params->n_ports_out;

	for (i = 0; i < N_BUCKETS; i++)
		TAILQ_INIT(&p->flows[i]);
	p->n_flows = 0;

	return (void *) p;
}
Esempio n. 11
0
/*
 * Function is called from the CUSE open function. The device structure is
 * initialised and a new entry is added to the device configuration linked
 * list.
 */
int
vhost_new_device(struct vhost_device_ctx ctx)
{
	struct virtio_net *dev;
	int i;

	dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
	if (dev == NULL) {
		RTE_LOG(ERR, VHOST_CONFIG,
			"(%"PRIu64") Failed to allocate memory for dev.\n",
			ctx.fh);
		return -1;
	}

	for (i = 0; i < MAX_VHOST_DEVICE; i++) {
		if (vhost_devices[i] == NULL)
			break;
	}
	if (i == MAX_VHOST_DEVICE) {
		RTE_LOG(ERR, VHOST_CONFIG,
			"Failed to find a free slot for new device.\n");
		return -1;
	}

	vhost_devices[i] = dev;
	dev->device_fh   = i;

	return i;
}
Esempio n. 12
0
/*
 * No protection information with PRACT setting to 1,
 *  both extended LBA format and separate metadata can
 *  run the test case.
 */
static uint32_t dp_with_pract_test(struct spdk_nvme_ns *ns, struct io_request *req,
				   uint32_t *io_flags)
{
	uint32_t sector_size;

	req->lba_count = 8;

	sector_size = spdk_nvme_ns_get_sector_size(ns);
	/* No additional metadata buffer provided */
	req->contig = rte_zmalloc(NULL, sector_size * req->lba_count, 0x1000);
	if (!req->contig)
		return 0;

	switch (spdk_nvme_ns_get_pi_type(ns)) {
	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
		*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRACT;
		break;
	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1:
	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2:
		*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRCHK_REFTAG |
			    SPDK_NVME_IO_FLAGS_PRACT;
		break;
	default:
		*io_flags = 0;
		break;
	}
	req->lba = 0x100000;
	req->use_extended_lba = false;
	req->metadata = NULL;

	return req->lba_count;
}
Esempio n. 13
0
static void
ns_attach(struct dev *device, int attachment_op, int ctrlr_id, int ns_id)
{
	int ret = 0;
	struct spdk_nvme_ctrlr_list *ctrlr_list;

	ctrlr_list = rte_zmalloc("nvme controller list", sizeof(struct spdk_nvme_ctrlr_list),
				 4096);
	if (ctrlr_list == NULL) {
		printf("Allocation error (controller list)\n");
		exit(1);
	}

	ctrlr_list->ctrlr_count = 1;
	ctrlr_list->ctrlr_list[0] = ctrlr_id;

	if (attachment_op == SPDK_NVME_NS_CTRLR_ATTACH) {
		ret = spdk_nvme_ctrlr_attach_ns(device->ctrlr, ns_id, ctrlr_list);
	} else if (attachment_op == SPDK_NVME_NS_CTRLR_DETACH) {
		ret = spdk_nvme_ctrlr_detach_ns(device->ctrlr, ns_id, ctrlr_list);
	}

	if (ret) {
		fprintf(stdout, "ns attach: Failed\n");
	}

	rte_free(ctrlr_list);
}
// ATTENTION: Queue size must be at least one!!
int mg_distribute_register_output(
  struct mg_distribute_config *cfg,
  uint16_t number,
  uint8_t port_id,
  uint16_t queue_id,
  uint16_t burst_size,
  uint64_t timeout
  ){
  if(number >= cfg->nr_outputs){
    printf("ERROR: invalid outputnumber\n");
    return -EINVAL;
  }
  cfg->outputs[number].port_id = port_id; 
  cfg->outputs[number].queue_id = queue_id; 
  cfg->outputs[number].timeout = timeout; 
  cfg->outputs[number].valid = 1; 
  if(burst_size != 0){
    // allocate a queue for the output
    // Aligned to cacheline...
    // FIXME: MACRO for cacheline size?
    struct mg_distribute_queue *queue = rte_zmalloc(NULL, sizeof(struct mg_distribute_queue) + burst_size * sizeof(struct rte_mbuf*), 64);
    cfg->outputs[number].queue = queue;
    cfg->outputs[number].queue->size = burst_size; 
  }
  return 0;
}
Esempio n. 15
0
static void
register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
{
	int nsid, num_ns;
	struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry));
	const struct spdk_nvme_ctrlr_data *cdata = spdk_nvme_ctrlr_get_data(ctrlr);

	if (entry == NULL) {
		perror("ctrlr_entry malloc");
		exit(1);
	}

	entry->latency_page = rte_zmalloc("nvme latency", sizeof(struct spdk_nvme_intel_rw_latency_page),
					  4096);
	if (entry->latency_page == NULL) {
		printf("Allocation error (latency page)\n");
		exit(1);
	}

	snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);

	entry->ctrlr = ctrlr;
	entry->next = g_controllers;
	g_controllers = entry;

	if (g_latency_tracking_enable &&
	    spdk_nvme_ctrlr_is_feature_supported(ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING))
		set_latency_tracking_feature(ctrlr, true);

	num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
	for (nsid = 1; nsid <= num_ns; nsid++) {
		register_ns(ctrlr, spdk_nvme_ctrlr_get_ns(ctrlr, nsid));
	}

}
Esempio n. 16
0
struct mg_bitmask * mg_bitmask_create(uint16_t size){
  uint16_t n_blocks = (size-1)/64 + 1;
  struct mg_bitmask *mask = rte_zmalloc(NULL, sizeof(struct mg_bitmask) + (size-1)/64 * 8 + 8, 0);
  mask->size = size;
  mask->n_blocks = n_blocks;
  return mask;
}
Esempio n. 17
0
/* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */
static uint32_t dp_without_pract_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req,
		uint32_t *io_flags)
{
	struct spdk_nvme_protection_info *pi;
	uint32_t md_size, sector_size;

	req->lba_count = 2;

	switch (spdk_nvme_ns_get_pi_type(ns)) {
	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
		return 0;
	default:
		break;
	}

	/* separate metadata payload for the test case */
	if (spdk_nvme_ns_supports_extended_lba(ns))
		return 0;

	sector_size = spdk_nvme_ns_get_sector_size(ns);;
	md_size = spdk_nvme_ns_get_md_size(ns);
	req->contig = rte_zmalloc(NULL, sector_size * req->lba_count, 0x1000);
	if (!req->contig)
		return 0;

	req->metadata = rte_zmalloc(NULL, md_size * req->lba_count, 0x1000);
	if (!req->metadata) {
		rte_free(req->contig);
		return 0;
	}

	req->lba = 0x400000;
	req->use_extended_lba = false;

	/* last 8 bytes if the metadata size bigger than 8 */
	pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size - 8);
	/* big-endian for reference tag */
	pi->ref_tag = swap32((uint32_t)req->lba);

	pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size * 2 - 8);
	/* is incremented for each subsequent logical block */
	pi->ref_tag = swap32((uint32_t)req->lba + 1);

	*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;

	return req->lba_count;
}
Esempio n. 18
0
static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
			    struct rte_dev_eeprom_info *eeprom)
{
	struct port_info *pi = (struct port_info *)(dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	u8 *buf;
	int err = 0;
	u32 aligned_offset, aligned_len, *p;

	if (eeprom->magic != EEPROM_MAGIC)
		return -EINVAL;

	aligned_offset = eeprom->offset & ~3;
	aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;

	if (adapter->pf > 0) {
		u32 start = 1024 + adapter->pf * EEPROMPFSIZE;

		if (aligned_offset < start ||
		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
			return -EPERM;
	}

	if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
		/* RMW possibly needed for first or last words.
		 */
		buf = rte_zmalloc(NULL, aligned_len, 0);
		if (!buf)
			return -ENOMEM;
		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
		if (!err && aligned_len > 4)
			err = eeprom_rd_phys(adapter,
					     aligned_offset + aligned_len - 4,
					     (u32 *)&buf[aligned_len - 4]);
		if (err)
			goto out;
		rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
			   eeprom->length);
	} else {
		buf = eeprom->data;
	}

	err = t4_seeprom_wp(adapter, false);
	if (err)
		goto out;

	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
		err = eeprom_wr_phys(adapter, aligned_offset, *p);
		aligned_offset += 4;
	}

	if (!err)
		err = t4_seeprom_wp(adapter, true);
out:
	if (buf != eeprom->data)
		rte_free(buf);
	return err;
}
Esempio n. 19
0
int odp_kni_config(struct odp_user_config * common_config, struct rte_mempool * pktmbuf_pool[])
{
	uint32_t portmask    = common_config->port_mask;
	unsigned lcore_item  = 0;

	// Link mbufs pool from outside modules.
	odp_pktmbuf_pool = pktmbuf_pool;

	// Bind params between lcores and KNI .
	for(unsigned port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
	{
		if((portmask & (1 << port_id)) == 0)
			continue;

		assert(kni_port_params_array[port_id] == NULL);

		unsigned lcore_id = lcore_item >= common_config->lcore_param_nb 
							? common_config->lcore_param[lcore_item = 0].lcore_id : common_config->lcore_param[lcore_item++].lcore_id;

		kni_port_params_array[port_id] = rte_zmalloc(NULL,sizeof(struct kni_port_params),0);
		kni_port_params_array[port_id]->port_id  = port_id;
		kni_port_params_array[port_id]->lcore_id = lcore_id;

		printf("DEBUG: port_id=%d,lcore_id=%d\n",port_id,lcore_id);
	}

	for(int i = 0; i < RTE_MAX_ETHPORTS; i++)
	{
		if(kni_port_params_array[i] == NULL)
			continue;
		
		struct kni_port_params * d  = kni_port_params_array[i];

		if(kni_lcore_params_array[d->lcore_id] == NULL)
		{
			kni_lcore_params_array[d->lcore_id] = rte_zmalloc(NULL,sizeof(struct kni_lcore_params),0);
		}

		unsigned nb_ports = kni_lcore_params_array[d->lcore_id]->nb_ports;
		kni_lcore_params_array[d->lcore_id]->port[nb_ports] = d;
		kni_lcore_params_array[d->lcore_id]->nb_ports++;
	}

	return 0;
}
Esempio n. 20
0
static int
cn23xx_vf_setup_mbox(struct lio_device *lio_dev)
{
	struct lio_mbox *mbox;

	PMD_INIT_FUNC_TRACE();

	if (lio_dev->mbox == NULL) {
		lio_dev->mbox = rte_zmalloc(NULL, sizeof(void *), 0);
		if (lio_dev->mbox == NULL)
			return -ENOMEM;
	}

	mbox = rte_zmalloc(NULL, sizeof(struct lio_mbox), 0);
	if (mbox == NULL) {
		rte_free(lio_dev->mbox);
		lio_dev->mbox = NULL;
		return -ENOMEM;
	}

	rte_spinlock_init(&mbox->lock);

	mbox->lio_dev = lio_dev;

	mbox->q_no = 0;

	mbox->state = LIO_MBOX_STATE_IDLE;

	/* VF mbox interrupt reg */
	mbox->mbox_int_reg = (uint8_t *)lio_dev->hw_addr +
				CN23XX_VF_SLI_PKT_MBOX_INT(0);
	/* VF reads from SIG0 reg */
	mbox->mbox_read_reg = (uint8_t *)lio_dev->hw_addr +
				CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
	/* VF writes into SIG1 reg */
	mbox->mbox_write_reg = (uint8_t *)lio_dev->hw_addr +
				CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);

	lio_dev->mbox[0] = mbox;

	rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);

	return 0;
}
Esempio n. 21
0
struct malloc_disk *create_malloc_disk(uint64_t num_blocks, uint32_t block_size)
{
	struct malloc_disk	*mdisk;

	if (block_size % 512 != 0) {
		SPDK_ERRLOG("Block size %u is not a multiple of 512.\n", block_size);
		return NULL;
	}

	if (num_blocks == 0) {
		SPDK_ERRLOG("Disk must be more than 0 blocks\n");
		return NULL;
	}

	mdisk = rte_malloc(NULL, sizeof(*mdisk), 0);
	if (!mdisk) {
		perror("mdisk");
		return NULL;
	}

	memset(mdisk, 0, sizeof(*mdisk));

	/*
	 * Allocate the large backend memory buffer using rte_malloc(),
	 *  so that we guarantee it is allocated from hugepage memory.
	 *
	 * TODO: need to pass a hint so we know which socket to allocate
	 *  from on multi-socket systems.
	 */
	mdisk->malloc_buf = rte_zmalloc(NULL, num_blocks * block_size, 2 * 1024 * 1024);
	if (!mdisk->malloc_buf) {
		SPDK_ERRLOG("rte_zmalloc failed\n");
		rte_free(mdisk);
		return NULL;
	}

	snprintf(mdisk->disk.name, SPDK_BDEV_MAX_NAME_LENGTH, "Malloc%d", malloc_disk_count);
	snprintf(mdisk->disk.product_name, SPDK_BDEV_MAX_PRODUCT_NAME_LENGTH, "Malloc disk");
	malloc_disk_count++;

	mdisk->disk.write_cache = 1;
	mdisk->disk.blocklen = block_size;
	mdisk->disk.blockcnt = num_blocks;
	mdisk->disk.thin_provisioning = 1;
	mdisk->disk.max_unmap_bdesc_count = MALLOC_MAX_UNMAP_BDESC;

	mdisk->disk.ctxt = mdisk;
	mdisk->disk.fn_table = &malloc_fn_table;

	spdk_bdev_register(&mdisk->disk);

	mdisk->next = g_malloc_disk_head;
	g_malloc_disk_head = mdisk;

	return mdisk;
}
Esempio n. 22
0
/* create the ring */
struct rte_ring *
rte_ring_create(const char *name, unsigned count, int socket_id,
		unsigned flags)
{
	char mz_name[RTE_MEMZONE_NAMESIZE];
	struct rte_ring *r;
	struct rte_tailq_entry *te;
	const struct rte_memzone *mz;
	ssize_t ring_size;
	int mz_flags = 0;
	struct rte_ring_list* ring_list = NULL;

	ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);

	ring_size = rte_ring_get_memsize(count);
	if (ring_size < 0) {
		rte_errno = ring_size;
		return NULL;
	}

	te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
	if (te == NULL) {
		RTE_LOG(ERR, RING, "Cannot reserve memory for tailq\n");
		rte_errno = ENOMEM;
		return NULL;
	}

	snprintf(mz_name, sizeof(mz_name), "%s%s", RTE_RING_MZ_PREFIX, name);

	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* reserve a memory zone for this ring. If we can't get rte_config or
	 * we are secondary process, the memzone_reserve function will set
	 * rte_errno for us appropriately - hence no check in this this function */
	mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
	if (mz != NULL) {
		r = mz->addr;
		/* no need to check return value here, we already checked the
		 * arguments above */
		rte_ring_init(r, name, count, flags);

		te->data = (void *) r;

		TAILQ_INSERT_TAIL(ring_list, te, next);
	} else {
		r = NULL;
		RTE_LOG(ERR, RING, "Cannot reserve memory\n");
		rte_free(te);
	}
	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

	return r;
}
int
i40e_pf_host_init(struct rte_eth_dev *dev)
{
	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
	int ret, i;
	uint32_t val;

	PMD_INIT_FUNC_TRACE();

	/**
	 * return if SRIOV not enabled, VF number not configured or
	 * no queue assigned.
	 */
	if(!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 || pf->vf_nb_qps == 0)
		return I40E_SUCCESS;

	/* Allocate memory to store VF structure */
	pf->vfs = rte_zmalloc("i40e_pf_vf",sizeof(*pf->vfs) * pf->vf_num, 0);
	if(pf->vfs == NULL)
		return -ENOMEM;

	/* Disable irq0 for VFR event */
	i40e_pf_disable_irq0(hw);

	/* Disable VF link status interrupt */
	val = I40E_READ_REG(hw, I40E_PFGEN_PORTMDIO_NUM);
	val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
	I40E_WRITE_REG(hw, I40E_PFGEN_PORTMDIO_NUM, val);
	I40E_WRITE_FLUSH(hw);

	for (i = 0; i < pf->vf_num; i++) {
		pf->vfs[i].pf = pf;
		pf->vfs[i].state = I40E_VF_INACTIVE;
		pf->vfs[i].vf_idx = i;
		ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
		if (ret != I40E_SUCCESS)
			goto fail;
		eth_random_addr(pf->vfs[i].mac_addr.addr_bytes);
	}

	/* restore irq0 */
	i40e_pf_enable_irq0(hw);

	return I40E_SUCCESS;

fail:
	rte_free(pf->vfs);
	i40e_pf_enable_irq0(hw);

	return ret;
}
struct mg_distribute_config * mg_distribute_create(
  uint16_t entry_offset,
  uint16_t nr_outputs,
  uint8_t always_flush
  ){
  struct mg_distribute_config *cfg = rte_zmalloc(NULL, sizeof(struct mg_distribute_config) + nr_outputs * sizeof(struct mg_distribute_output), 0);
  if(cfg){
    cfg->nr_outputs = nr_outputs;
    cfg->always_flush = always_flush;
    cfg->entry_offset = entry_offset;
  }
  return cfg;
}
Esempio n. 25
0
/*
 * Initialize driver
 * It returns 0 on success.
 */
static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
{
	struct rte_pci_device *pci_dev;
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = NULL;
	char name[RTE_ETH_NAME_MAX_LEN];
	int err = 0;

	CXGBE_FUNC_TRACE();

	eth_dev->dev_ops = &cxgbe_eth_dev_ops;
	eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
	eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;

	/* for secondary processes, we don't initialise any further as primary
	 * has already done this work.
	 */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return 0;

	pci_dev = eth_dev->pci_dev;

	snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
	adapter = rte_zmalloc(name, sizeof(*adapter), 0);
	if (!adapter)
		return -1;

	adapter->use_unpacked_mode = 1;
	adapter->regs = (void *)pci_dev->mem_resource[0].addr;
	if (!adapter->regs) {
		dev_err(adapter, "%s: cannot map device registers\n", __func__);
		err = -ENOMEM;
		goto out_free_adapter;
	}
	adapter->pdev = pci_dev;
	adapter->eth_dev = eth_dev;
	pi->adapter = adapter;

	err = cxgbe_probe(adapter);
	if (err) {
		dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
			__func__, err);
		goto out_free_adapter;
	}

	return 0;

out_free_adapter:
	rte_free(adapter);
	return err;
}
Esempio n. 26
0
static int
reservation_ns_report(struct nvme_controller *ctrlr, uint16_t ns_id)
{
	int ret, i;
	uint8_t *payload;
	struct nvme_reservation_status_data *status;
	struct nvme_reservation_controller_data *cdata;
	struct nvme_namespace *ns;

	ns = nvme_ctrlr_get_ns(ctrlr, ns_id);
	payload = rte_zmalloc(NULL, 0x1000, 0x1000);

	outstanding_commands = 0;
	reserve_command_result = -1;

	ret = nvme_ns_cmd_reservation_report(ns, payload, 0x1000,
					     reservation_ns_completion, NULL);
	if (ret) {
		fprintf(stderr, "Reservation Report Failed\n");
		rte_free(payload);
		return -1;
	}

	outstanding_commands++;
	while (outstanding_commands) {
		nvme_ctrlr_process_io_completions(ctrlr, 100);
	}

	if (reserve_command_result) {
		fprintf(stderr, "Reservation Report Failed\n");
		rte_free(payload);
		return 0;
	}

	status = (struct nvme_reservation_status_data *)payload;
	fprintf(stdout, "Reservation Generation Counter                  %u\n", status->generation);
	fprintf(stdout, "Reservation type                                %u\n", status->type);
	fprintf(stdout, "Reservation Number of Registered Controllers    %u\n", status->nr_regctl);
	fprintf(stdout, "Reservation Persist Through Power Loss State    %u\n", status->ptpl_state);
	for (i = 0; i < status->nr_regctl; i++) {
		cdata = (struct nvme_reservation_controller_data *)(payload + sizeof(struct
				nvme_reservation_status_data) * (i + 1));
		fprintf(stdout, "Controller ID                           %u\n", cdata->ctrlr_id);
		fprintf(stdout, "Controller Reservation Status           %u\n", cdata->rcsts.status);
		fprintf(stdout, "Controller Host ID                      0x%"PRIx64"\n", cdata->host_id);
		fprintf(stdout, "Controller Reservation Key              0x%"PRIx64"\n", cdata->key);
	}

	rte_free(payload);
	return 0;
}
Esempio n. 27
0
/* Application Tag checked with PRACT setting to 0 */
static uint32_t dp_without_pract_separate_meta_apptag_test(struct spdk_nvme_ns *ns,
		struct io_request *req,
		uint32_t *io_flags)
{
	struct spdk_nvme_protection_info *pi;
	uint32_t md_size, sector_size;

	req->lba_count = 1;

	/* separate metadata payload for the test case */
	if (spdk_nvme_ns_supports_extended_lba(ns))
		return 0;

	sector_size = spdk_nvme_ns_get_sector_size(ns);;
	md_size = spdk_nvme_ns_get_md_size(ns);
	req->contig = rte_zmalloc(NULL, sector_size * req->lba_count, 0x1000);
	if (!req->contig)
		return 0;

	req->metadata = rte_zmalloc(NULL, md_size * req->lba_count, 0x1000);
	if (!req->metadata) {
		rte_free(req->contig);
		return 0;
	}

	req->lba = 0x500000;
	req->use_extended_lba = false;
	req->apptag_mask = 0xFFFF;
	req->apptag = req->lba_count;

	/* last 8 bytes if the metadata size bigger than 8 */
	pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size - 8);
	pi->app_tag = swap16(req->lba_count);

	*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_APPTAG;

	return req->lba_count;
}
Esempio n. 28
0
static int
i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
{
	struct virtchnl_vf_resource *vf_res = NULL;
	struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
	uint32_t len = 0;
	int ret = I40E_SUCCESS;

	if (!b_op) {
		i40e_pf_host_send_msg_to_vf(vf,
					    VIRTCHNL_OP_GET_VF_RESOURCES,
					    I40E_NOT_SUPPORTED, NULL, 0);
		return ret;
	}

	/* only have 1 VSI by default */
	len =  sizeof(struct virtchnl_vf_resource) +
				I40E_DEFAULT_VF_VSI_NUM *
		sizeof(struct virtchnl_vsi_resource);

	vf_res = rte_zmalloc("i40e_vf_res", len, 0);
	if (vf_res == NULL) {
		PMD_DRV_LOG(ERR, "failed to allocate mem");
		ret = I40E_ERR_NO_MEMORY;
		vf_res = NULL;
		len = 0;
		goto send_msg;
	}

	vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 |
				VIRTCHNL_VF_OFFLOAD_VLAN;
	vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf;
	vf_res->num_queue_pairs = vf->vsi->nb_qps;
	vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM;

	/* Change below setting if PF host can support more VSIs for VF */
	vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
	vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
	vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
	ether_addr_copy(&vf->mac_addr,
		(struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);

send_msg:
	i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
					ret, (uint8_t *)vf_res, len);
	rte_free(vf_res);

	return ret;
}
Esempio n. 29
0
static int qed_load_firmware_data(struct ecore_dev *edev)
{
	int fd;
	struct stat st;
	const char *fw = RTE_LIBRTE_QEDE_FW;

	if (strcmp(fw, "") == 0)
		strcpy(fw_file, QEDE_DEFAULT_FIRMWARE);
	else
		strcpy(fw_file, fw);

	fd = open(fw_file, O_RDONLY);
	if (fd < 0) {
		DP_ERR(edev, "Can't open firmware file\n");
		return -ENOENT;
	}

	if (fstat(fd, &st) < 0) {
		DP_ERR(edev, "Can't stat firmware file\n");
		close(fd);
		return -1;
	}

	edev->firmware = rte_zmalloc("qede_fw", st.st_size,
				    RTE_CACHE_LINE_SIZE);
	if (!edev->firmware) {
		DP_ERR(edev, "Can't allocate memory for firmware\n");
		close(fd);
		return -ENOMEM;
	}

	if (read(fd, edev->firmware, st.st_size) != st.st_size) {
		DP_ERR(edev, "Can't read firmware data\n");
		close(fd);
		return -1;
	}

	edev->fw_len = st.st_size;
	if (edev->fw_len < 104) {
		DP_ERR(edev, "Invalid fw size: %" PRIu64 "\n",
			  edev->fw_len);
		close(fd);
		return -EINVAL;
	}

	close(fd);
	return 0;
}
Esempio n. 30
0
struct rte_keepalive *
rte_keepalive_create(rte_keepalive_failure_callback_t callback,
	void *data)
{
	struct rte_keepalive *keepcfg;

	keepcfg = rte_zmalloc("RTE_EAL_KEEPALIVE",
		sizeof(struct rte_keepalive),
		RTE_CACHE_LINE_SIZE);
	if (keepcfg != NULL) {
		keepcfg->callback = callback;
		keepcfg->callback_data = data;
		keepcfg->tsc_initial = rte_rdtsc();
		keepcfg->tsc_mhz = rte_get_tsc_hz() / 1000;
	}
	return keepcfg;
}