Exemplo n.º 1
0
/**
 * Real function entrance ran in slave process
 **/
static int
slave_proc_func(void)
{
	struct rte_config *config;
	unsigned slave_id = rte_lcore_id();
	struct lcore_stat *cfg = &core_cfg[slave_id];

	if (prctl(PR_SET_PDEATHSIG, SIG_PARENT_EXIT, 0, 0, 0, 0) != 0)
		printf("Warning: Slave can't register for being notified in"
               "case master process exited\n");
	else {
		struct sigaction act;
		memset(&act, 0 , sizeof(act));
		act.sa_handler = sighand_parent_exit;
		if (sigaction(SIG_PARENT_EXIT, &act, NULL) != 0)
			printf("Fail to register signal handler:%d\n", SIG_PARENT_EXIT);
	}

	/* Set slave process to SECONDARY to avoid operation like dev_start/stop etc */
	config = rte_eal_get_configuration();
	if (NULL == config)
		printf("Warning:Can't get rte_config\n");
	else
		config->process_type = RTE_PROC_SECONDARY;

	printf("Core %u is ready (pid=%d)\n", slave_id, (int)cfg->pid);

	exit(cfg->f(cfg->arg));
}
Exemplo n.º 2
0
/* Dump all reserved memory zones on console */
void
rte_memzone_dump(FILE *f)
{
	struct rte_mem_config *mcfg;
	unsigned i = 0;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	rte_rwlock_read_lock(&mcfg->mlock);
	/* dump all zones */
	for (i=0; i<RTE_MAX_MEMZONE; i++) {
		if (mcfg->memzone[i].addr == NULL)
			break;
		fprintf(f, "Zone %u: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
		       ", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
		       mcfg->memzone[i].name,
		       mcfg->memzone[i].phys_addr,
		       mcfg->memzone[i].len,
		       mcfg->memzone[i].addr,
		       mcfg->memzone[i].socket_id,
		       mcfg->memzone[i].flags);
	}
	rte_rwlock_read_unlock(&mcfg->mlock);
}
Exemplo n.º 3
0
/**
 * Based on physical address to caculate MFN in Xen Dom0.
 */
phys_addr_t
rte_xen_mem_phy2mch(int32_t memseg_id, const phys_addr_t phy_addr)
{
	int mfn_id, i;
	uint64_t mfn, mfn_offset;
	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
	struct rte_memseg *memseg = mcfg->memseg;

	/* find the memory segment owning the physical address */
	if (memseg_id == -1) {
		for (i = 0; i < RTE_MAX_MEMSEG; i++) {
			if ((phy_addr >= memseg[i].phys_addr) &&
					(phy_addr < memseg[i].phys_addr +
						memseg[i].len)) {
				memseg_id = i;
				break;
			}
		}
		if (memseg_id == -1)
			return RTE_BAD_PHYS_ADDR;
	}

	mfn_id = (phy_addr - memseg[memseg_id].phys_addr) / RTE_PGSIZE_2M;

	/*the MFN is contiguous in 2M */
	mfn_offset = (phy_addr - memseg[memseg_id].phys_addr) %
					RTE_PGSIZE_2M / PAGE_SIZE;
	mfn = mfn_offset + memseg[memseg_id].mfn[mfn_id];

	/** return mechine address */
	return mfn * PAGE_SIZE + phy_addr % PAGE_SIZE;
}
Exemplo n.º 4
0
/*
 * Init the memzone subsystem
 */
int
rte_eal_memzone_init(void)
{
	struct rte_mem_config *mcfg;
	const struct rte_memseg *memseg;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	/* secondary processes don't need to initialise anything */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
		return 0;

	memseg = rte_eal_get_physmem_layout();
	if (memseg == NULL) {
		RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
		return -1;
	}

	rte_rwlock_write_lock(&mcfg->mlock);

	/* delete all zones */
	mcfg->memzone_cnt = 0;
	memset(mcfg->memzone, 0, sizeof(mcfg->memzone));

	rte_rwlock_write_unlock(&mcfg->mlock);

	return rte_eal_malloc_heap_init();
}
Exemplo n.º 5
0
/* This function will return the greatest free block if a heap has been
 * specified. If no heap has been specified, it will return the heap and
 * length of the greatest free block available in all heaps */
static size_t
find_heap_max_free_elem(int *s, unsigned align)
{
	struct rte_mem_config *mcfg;
	struct rte_malloc_socket_stats stats;
	int i, socket = *s;
	size_t len = 0;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
		if ((socket != SOCKET_ID_ANY) && (socket != i))
			continue;

		malloc_heap_get_stats(&mcfg->malloc_heaps[i], &stats);
		if (stats.greatest_free_size > len) {
			len = stats.greatest_free_size;
			*s = i;
		}
	}

	if (len < MALLOC_ELEM_OVERHEAD + align)
		return 0;

	return len - MALLOC_ELEM_OVERHEAD - align;
}
Exemplo n.º 6
0
/* Dump the physical memory layout on console */
void
rte_dump_physmem_layout(FILE *f)
{
	const struct rte_mem_config *mcfg;
	unsigned i = 0;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
		if (mcfg->memseg[i].addr == NULL)
			break;

		fprintf(f, "Segment %u: phys:0x%"PRIx64", len:%zu, "
		       "virt:%p, socket_id:%"PRId32", "
		       "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
		       "nrank:%"PRIx32"\n", i,
		       mcfg->memseg[i].phys_addr,
		       mcfg->memseg[i].len,
		       mcfg->memseg[i].addr,
		       mcfg->memseg[i].socket_id,
		       mcfg->memseg[i].hugepage_sz,
		       mcfg->memseg[i].nchannel,
		       mcfg->memseg[i].nrank);
	}
}
Exemplo n.º 7
0
/*
 * Return a pointer to a correctly filled memzone descriptor (with a
 * specified alignment and boundary).
 * If the allocation cannot be done, return NULL.
 */
const struct rte_memzone *
rte_memzone_reserve_bounded(const char *name, size_t len,
		int socket_id, unsigned flags, unsigned align, unsigned bound)
{
	struct rte_mem_config *mcfg;
	const struct rte_memzone *mz = NULL;

	/* both sizes cannot be explicitly called for */
	if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
		|| ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
		rte_errno = EINVAL;
		return NULL;
	}

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	rte_rwlock_write_lock(&mcfg->mlock);

	mz = memzone_reserve_aligned_thread_unsafe(
		name, len, socket_id, flags, align, bound);

	rte_rwlock_write_unlock(&mcfg->mlock);

	return mz;
}
Exemplo n.º 8
0
int
rte_memzone_free(const struct rte_memzone *mz)
{
	struct rte_mem_config *mcfg;
	int ret = 0;
	void *addr;
	unsigned idx;

	if (mz == NULL)
		return -EINVAL;

	mcfg = rte_eal_get_configuration()->mem_config;

	rte_rwlock_write_lock(&mcfg->mlock);

	idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone);
	idx = idx / sizeof(struct rte_memzone);

	addr = mcfg->memzone[idx].addr;
	if (addr == NULL)
		ret = -EINVAL;
	else if (mcfg->memzone_cnt == 0) {
		rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n",
				__func__);
	} else {
		memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx]));
		mcfg->memzone_cnt--;
	}

	rte_rwlock_write_unlock(&mcfg->mlock);

	rte_free(addr);

	return ret;
}
Exemplo n.º 9
0
static int
test_memzone_reserve_memory_in_smallest_segment(void)
{
	const struct rte_memzone *mz;
	const struct rte_memseg *ms, *min_ms, *prev_min_ms;
	size_t min_len, prev_min_len;
	const struct rte_config *config;
	int i;

	config = rte_eal_get_configuration();

	min_ms = NULL;  /*< smallest segment */
	prev_min_ms = NULL; /*< second smallest segment */

	/* find two smallest segments */
	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
		ms = &config->mem_config->free_memseg[i];

		if (ms->addr == NULL)
			break;
		if (ms->len == 0)
			continue;

		if (min_ms == NULL)
			min_ms = ms;
		else if (min_ms->len > ms->len) {
			/* set last smallest to second last */
			prev_min_ms = min_ms;

			/* set new smallest */
			min_ms = ms;
		} else if ((prev_min_ms == NULL)
			|| (prev_min_ms->len > ms->len))
			prev_min_ms = ms;
	}

	if (min_ms == NULL || prev_min_ms == NULL) {
		printf("Smallest segments not found!\n");
		return -1;
	}

	min_len = min_ms->len;
	prev_min_len = prev_min_ms->len;

	/* try reserving a memzone in the smallest memseg */
	mz = rte_memzone_reserve("smallest_mz", RTE_CACHE_LINE_SIZE,
			SOCKET_ID_ANY, 0);
	if (mz == NULL) {
		printf("Failed to reserve memory from smallest memseg!\n");
		return -1;
	}
	if (prev_min_ms->len != prev_min_len &&
			min_ms->len != min_len - RTE_CACHE_LINE_SIZE) {
		printf("Reserved memory from wrong memseg!\n");
		return -1;
	}

	return 0;
}
Exemplo n.º 10
0
static int
rte_eal_contigmem_attach(void)
{
	const struct hugepage_info *hpi;
	int fd_hugepage_info, fd_hugepage = -1;
	unsigned i = 0;
	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
    
	/* Obtain a file descriptor for hugepage_info */
	fd_hugepage_info = open(eal_hugepage_info_path(), O_RDONLY);
	if (fd_hugepage_info < 0) {
		RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
		return -1;
	}

	/* Map the shared hugepage_info into the process address spaces */
	hpi = mmap(NULL, sizeof(struct hugepage_info), PROT_READ, MAP_PRIVATE,
			fd_hugepage_info, 0);
	if (hpi == NULL) {
		RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
		goto error;
	}

	/* Obtain a file descriptor for contiguous memory */
	fd_hugepage = open(hpi->hugedir, O_RDWR);
	if (fd_hugepage < 0) {
		RTE_LOG(ERR, EAL, "Could not open %s\n", hpi->hugedir);
		goto error;
	}

	/* Map the contiguous memory into each memory segment */
	for (i = 0; i < hpi->num_pages[0]; i++) {

		void *addr;
		struct rte_memseg *seg = &mcfg->memseg[i];

		addr = mmap(seg->addr, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
				MAP_SHARED|MAP_FIXED, fd_hugepage, i * PAGE_SIZE);
		if (addr == MAP_FAILED || addr != seg->addr) {
			RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
				i, hpi->hugedir);
			goto error;
		}
        
	}

	/* hugepage_info is no longer required */
	munmap((void *)(uintptr_t)hpi, sizeof(struct hugepage_info));
	close(fd_hugepage_info);
	close(fd_hugepage);
	return 0;

error:
	if (fd_hugepage_info >= 0)
		close(fd_hugepage_info);
	if (fd_hugepage >= 0)
		close(fd_hugepage);
	return -1;
}
Exemplo n.º 11
0
static int
eal_parse_coremask(const char *coremask)
{
	struct rte_config *cfg = rte_eal_get_configuration();
	int i, j, idx = 0 ;
	unsigned count = 0;
	char c;
	int val;

	if (coremask == NULL)
		return -1;
	/* Remove all blank characters ahead and after .
	 * Remove 0x/0X if exists.
	 */
	while (isblank(*coremask))
		coremask++;
	if (coremask[0] == '0' && ((coremask[1] == 'x')
		||  (coremask[1] == 'X')) )
		coremask += 2;
	i = strnlen(coremask, PATH_MAX);
	while ((i > 0) && isblank(coremask[i - 1]))
		i--;
	if (i == 0)
		return -1;

	for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
		c = coremask[i];
		if (isxdigit(c) == 0) {
			/* invalid characters */
			return (-1);
		}
		val = xdigit2val(c);
		for(j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; j++, idx++) {
			if((1 << j) & val) {
				if (!lcore_config[idx].detected) {
					RTE_LOG(ERR, EAL, "lcore %u "
					        "unavailable\n", idx);
					return -1;
				}
				cfg->lcore_role[idx] = ROLE_RTE;
				if(count == 0)
					cfg->master_lcore = idx;
				count++;
			} else  {
				cfg->lcore_role[idx] = ROLE_OFF;
			}
		}
	}
	for(; i >= 0; i--)
		if(coremask[i] != '0')
			return -1;
	for(; idx < RTE_MAX_LCORE; idx++)
		cfg->lcore_role[idx] = ROLE_OFF;
	if(count == 0)
		return -1;
	/* Update the count of enabled logical cores of the EAL configuration */
	cfg->lcore_count = count;
	return 0;
}
Exemplo n.º 12
0
/*
 * Parse /sys/devices/system/cpu to get the number of physical and logical
 * processors on the machine. The function will fill the cpu_info
 * structure.
 */
int
rte_eal_cpu_init(void)
{
	/* pointer to global configuration */
	struct rte_config *config = rte_eal_get_configuration();
	unsigned lcore_id;
	unsigned count = 0;

	/*
	 * Parse the maximum set of logical cores, detect the subset of running
	 * ones and enable them by default.
	 */
	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
		lcore_config[lcore_id].core_index = count;

		/* init cpuset for per lcore config */
		CPU_ZERO(&lcore_config[lcore_id].cpuset);

		/* in 1:1 mapping, record related cpu detected state */
		lcore_config[lcore_id].detected = eal_cpu_detected(lcore_id);
		if (lcore_config[lcore_id].detected == 0) {
			config->lcore_role[lcore_id] = ROLE_OFF;
			lcore_config[lcore_id].core_index = -1;
			continue;
		}

		/* By default, lcore 1:1 map to cpu id */
		CPU_SET(lcore_id, &lcore_config[lcore_id].cpuset);

		/* By default, each detected core is enabled */
		config->lcore_role[lcore_id] = ROLE_RTE;
		lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
		lcore_config[lcore_id].socket_id = eal_cpu_socket_id(lcore_id);
		if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES)
#ifdef RTE_EAL_ALLOW_INV_SOCKET_ID
			lcore_config[lcore_id].socket_id = 0;
#else
			rte_panic("Socket ID (%u) is greater than "
				"RTE_MAX_NUMA_NODES (%d)\n",
				lcore_config[lcore_id].socket_id,
				RTE_MAX_NUMA_NODES);
#endif

		RTE_LOG(DEBUG, EAL, "Detected lcore %u as "
				"core %u on socket %u\n",
				lcore_id, lcore_config[lcore_id].core_id,
				lcore_config[lcore_id].socket_id);
		count++;
	}
	/* Set the count of enabled logical cores of the EAL configuration */
	config->lcore_count = count;
	RTE_LOG(DEBUG, EAL,
		"Support maximum %u logical core(s) by configuration.\n",
		RTE_MAX_LCORE);
	RTE_LOG(DEBUG, EAL, "Detected %u lcore(s)\n", config->lcore_count);

	return 0;
}
Exemplo n.º 13
0
/*
 * Function to retrieve data for heap on given socket
 */
int
rte_malloc_get_socket_stats(int socket,
		struct rte_malloc_socket_stats *socket_stats)
{
	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;

	if (socket >= RTE_MAX_NUMA_NODES || socket < 0)
		return -1;

	return malloc_heap_get_stats(&mcfg->malloc_heaps[socket], socket_stats);
}
Exemplo n.º 14
0
/*
 * Init the memzone subsystem
 */
int
rte_eal_memzone_init(void)
{
	struct rte_mem_config *mcfg;
	const struct rte_memseg *memseg;
	unsigned i = 0;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	/* mirror the runtime memsegs from config */
	free_memseg = mcfg->free_memseg;

	/* secondary processes don't need to initialise anything */
	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
		return 0;

	memseg = rte_eal_get_physmem_layout();
	if (memseg == NULL) {
		RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
		return -1;
	}

	rte_rwlock_write_lock(&mcfg->mlock);

	/* fill in uninitialized free_memsegs */
	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
		if (memseg[i].addr == NULL)
			break;
		if (free_memseg[i].addr != NULL)
			continue;
		memcpy(&free_memseg[i], &memseg[i], sizeof(struct rte_memseg));
	}

	/* make all zones cache-aligned */
	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
		if (free_memseg[i].addr == NULL)
			break;
		if (memseg_sanitize(&free_memseg[i]) < 0) {
			RTE_LOG(ERR, EAL, "%s(): Sanity check failed\n", __func__);
			rte_rwlock_write_unlock(&mcfg->mlock);
			return -1;
		}
	}

	/* delete all zones */
	mcfg->memzone_idx = 0;
	memset(mcfg->memzone, 0, sizeof(mcfg->memzone));

	rte_rwlock_write_unlock(&mcfg->mlock);

	return 0;
}
Exemplo n.º 15
0
static void
set_lcore_state(uint32_t lcore, int32_t state)
{
	/* mark core state in hugepage backed config */
	struct rte_config *cfg = rte_eal_get_configuration();
	cfg->lcore_role[lcore] = state;

	/* mark state in process local lcore_config */
	lcore_config[lcore].core_role = state;

	/* update per-lcore optimized state tracking */
	lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
}
Exemplo n.º 16
0
int
eal_check_common_options(struct internal_config *internal_cfg)
{
	struct rte_config *cfg = rte_eal_get_configuration();

	if (!lcores_parsed) {
		RTE_LOG(ERR, EAL, "CPU cores must be enabled with options "
			"-c or -l\n");
		return -1;
	}
	if (cfg->lcore_role[cfg->master_lcore] != ROLE_RTE) {
		RTE_LOG(ERR, EAL, "Master lcore is not enabled for DPDK\n");
		return -1;
	}

	if (internal_cfg->process_type == RTE_PROC_INVALID) {
		RTE_LOG(ERR, EAL, "Invalid process type specified\n");
		return -1;
	}
	if (internal_cfg->process_type == RTE_PROC_PRIMARY &&
			internal_cfg->force_nchannel == 0) {
		RTE_LOG(ERR, EAL, "Number of memory channels (-n) not "
			"specified\n");
		return -1;
	}
	if (index(internal_cfg->hugefile_prefix, '%') != NULL) {
		RTE_LOG(ERR, EAL, "Invalid char, '%%', in --"OPT_FILE_PREFIX" "
			"option\n");
		return -1;
	}
	if (mem_parsed && internal_cfg->force_sockets == 1) {
		RTE_LOG(ERR, EAL, "Options -m and --"OPT_SOCKET_MEM" cannot "
			"be specified at the same time\n");
		return -1;
	}
	if (internal_cfg->no_hugetlbfs &&
			(mem_parsed || internal_cfg->force_sockets == 1)) {
		RTE_LOG(ERR, EAL, "Options -m or --"OPT_SOCKET_MEM" cannot "
			"be specified together with --"OPT_NO_HUGE"\n");
		return -1;
	}

	if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) != 0 &&
		rte_eal_devargs_type_count(RTE_DEVTYPE_BLACKLISTED_PCI) != 0) {
		RTE_LOG(ERR, EAL, "Options blacklist (-b) and whitelist (-w) "
			"cannot be used at the same time\n");
		return -1;
	}

	return 0;
}
Exemplo n.º 17
0
static int
rte_eal_memdevice_init(void)
{
	struct rte_config *config;

	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
		return 0;

	config = rte_eal_get_configuration();
	config->mem_config->nchannel = internal_config.force_nchannel;
	config->mem_config->nrank = internal_config.force_nrank;

	return 0;
}
Exemplo n.º 18
0
/* Walk all reserved memory zones */
void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *),
		      void *arg)
{
	struct rte_mem_config *mcfg;
	unsigned i;

	mcfg = rte_eal_get_configuration()->mem_config;

	rte_rwlock_read_lock(&mcfg->mlock);
	for (i=0; i<RTE_MAX_MEMZONE; i++) {
		if (mcfg->memzone[i].addr != NULL)
			(*func)(&mcfg->memzone[i], arg);
	}
	rte_rwlock_read_unlock(&mcfg->mlock);
}
Exemplo n.º 19
0
/* Changes the lcore id of the master thread */
static int
eal_parse_master_lcore(const char *arg)
{
	char *parsing_end;
	struct rte_config *cfg = rte_eal_get_configuration();

	errno = 0;
	cfg->master_lcore = (uint32_t) strtol(arg, &parsing_end, 0);
	if (errno || parsing_end[0] != 0)
		return -1;
	if (cfg->master_lcore >= RTE_MAX_LCORE)
		return -1;
	master_lcore_parsed = 1;
	return 0;
}
Exemplo n.º 20
0
/* returns core mask used by DPDK */
static uint64_t
app_eal_core_mask(void)
{
	uint32_t i;
	uint64_t cm = 0;
	struct rte_config *cfg = rte_eal_get_configuration();

	for (i = 0; i < RTE_MAX_LCORE; i++) {
		if (cfg->lcore_role[i] == ROLE_RTE)
			cm |= (1ULL << i);
	}

	cm |= (1ULL << cfg->master_lcore);

	return cm;
}
Exemplo n.º 21
0
/*
 * Lookup for the memzone identified by the given name
 */
const struct rte_memzone *
rte_memzone_lookup(const char *name)
{
	struct rte_mem_config *mcfg;
	const struct rte_memzone *memzone = NULL;

	mcfg = rte_eal_get_configuration()->mem_config;

	rte_rwlock_read_lock(&mcfg->mlock);

	memzone = memzone_lookup_thread_unsafe(name);

	rte_rwlock_read_unlock(&mcfg->mlock);

	return memzone;
}
Exemplo n.º 22
0
static inline struct rte_memzone *
get_next_free_memzone(void)
{
	struct rte_mem_config *mcfg;
	unsigned i = 0;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	for (i = 0; i < RTE_MAX_MEMZONE; i++) {
		if (mcfg->memzone[i].addr == NULL)
			return &mcfg->memzone[i];
	}

	return NULL;
}
Exemplo n.º 23
0
int
rte_memzone_free(const struct rte_memzone *mz)
{
	struct rte_mem_config *mcfg;
	int ret = 0;
	void *addr;
	unsigned idx;

	if (mz == NULL)
		return -EINVAL;

	mcfg = rte_eal_get_configuration()->mem_config;

	rte_rwlock_write_lock(&mcfg->mlock);

	idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone);
	idx = idx / sizeof(struct rte_memzone);

#ifdef RTE_LIBRTE_IVSHMEM
	/*
	 * If ioremap_addr is set, it's an IVSHMEM memzone and we cannot
	 * free it.
	 */
	if (mcfg->memzone[idx].ioremap_addr != 0) {
		rte_rwlock_write_unlock(&mcfg->mlock);
		return -EINVAL;
	}
#endif

	addr = mcfg->memzone[idx].addr;

	if (addr == NULL)
		ret = -EINVAL;
	else if (mcfg->memzone_cnt == 0) {
		rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n",
				__func__);
	} else {
		memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx]));
		mcfg->memzone_cnt--;
	}

	rte_rwlock_write_unlock(&mcfg->mlock);

	rte_free(addr);

	return ret;
}
Exemplo n.º 24
0
/*
 * reserve an extra memory zone and make it available for use by a particular
 * heap. This reserves the zone and sets a dummy malloc_elem header at the end
 * to prevent overflow. The rest of the zone is added to free list as a single
 * large free block
 */
static int
malloc_heap_add_memzone(struct malloc_heap *heap, size_t size, unsigned align)
{
	const unsigned mz_flags = 0;
	const size_t block_size = get_malloc_memzone_size();
	/* ensure the data we want to allocate will fit in the memzone */
	const size_t min_size = size + align + MALLOC_ELEM_OVERHEAD * 2;
	const struct rte_memzone *mz = NULL;
	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
	unsigned numa_socket = heap - mcfg->malloc_heaps;

	size_t mz_size = min_size;
	if (mz_size < block_size)
		mz_size = block_size;

	char mz_name[RTE_MEMZONE_NAMESIZE];
	snprintf(mz_name, sizeof(mz_name), "MALLOC_S%u_HEAP_%u",
		     numa_socket, heap->mz_count++);

	/* try getting a block. if we fail and we don't need as big a block
	 * as given in the config, we can shrink our request and try again
	 */
	do {
		mz = rte_memzone_reserve(mz_name, mz_size, numa_socket,
					 mz_flags);
		if (mz == NULL)
			mz_size /= 2;
	} while (mz == NULL && mz_size > min_size);
	if (mz == NULL)
		return -1;

	/* allocate the memory block headers, one at end, one at start */
	struct malloc_elem *start_elem = (struct malloc_elem *)mz->addr;
	struct malloc_elem *end_elem = RTE_PTR_ADD(mz->addr,
			mz_size - MALLOC_ELEM_OVERHEAD);
	end_elem = RTE_PTR_ALIGN_FLOOR(end_elem, RTE_CACHE_LINE_SIZE);

	const unsigned elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem;
	malloc_elem_init(start_elem, heap, mz, elem_size);
	malloc_elem_mkend(end_elem, start_elem);
	malloc_elem_free_list_insert(start_elem);

	/* increase heap total size by size of new memzone */
	heap->total_size+=mz_size - MALLOC_ELEM_OVERHEAD;
	return 0;
}
Exemplo n.º 25
0
int
rte_assign_lcore_id (void)
{
       int ret = -1;
       unsigned lcore_id;
       struct rte_config *config = rte_eal_get_configuration();

       rte_spinlock_lock(&lcore_sl);

       /* See whether this already has an lcore ID */
       lcore_id = rte_lcore_id();
       if (lcore_id == (unsigned)-1)
       {
               /* Find the first available LCORE with a CPU detection state that
                * indicates OFF
                */
               for (lcore_id = 0;
                       (lcore_id < RTE_MAX_LCORE) && (config->lcore_role[lcore_id] == ROLE_OFF);
                       ++lcore_id);

               /* if we found one, assign it */
               if (lcore_id < RTE_MAX_LCORE)
               {
                       config->lcore_role[lcore_id] = ROLE_RTE;

                       /* These are floating lcores - no core id or socket id */
                       lcore_config[lcore_id].core_id = LCORE_ID_ANY;
                       lcore_config[lcore_id].socket_id = SOCKET_ID_ANY;

                       lcore_config[lcore_id].f = NULL;

                       lcore_config[lcore_id].thread_id = pthread_self();
                       lcore_config[lcore_id].detected = 0;                            /* Core was not detected */
                       lcore_config[lcore_id].state = RUNNING;
                       config->lcore_count++;

                       ret = lcore_id;

                       RTE_PER_LCORE(_lcore_id) = lcore_id;
               }
       }

       rte_spinlock_unlock(&lcore_sl);
       return ret;
}
Exemplo n.º 26
0
/**
 * Based on physical address to caculate MFN in Xen Dom0.
 */
phys_addr_t
rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
{
	int mfn_id;
	uint64_t mfn, mfn_offset;
	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
	struct rte_memseg *memseg = mcfg->memseg;

	mfn_id = (phy_addr - memseg[memseg_id].phys_addr) / RTE_PGSIZE_2M;

	/*the MFN is contiguous in 2M */
	mfn_offset = (phy_addr - memseg[memseg_id].phys_addr) %
					RTE_PGSIZE_2M / PAGE_SIZE;
	mfn = mfn_offset + memseg[memseg_id].mfn[mfn_id];

	/** return mechine address */
	return (mfn * PAGE_SIZE + phy_addr % PAGE_SIZE);
}
Exemplo n.º 27
0
/* get the total size of memory */
uint64_t
rte_eal_get_physmem_size(void)
{
	const struct rte_mem_config *mcfg;
	unsigned i = 0;
	uint64_t total_len = 0;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	for (i = 0; i < RTE_MAX_MEMSEG; i++) {
		if (mcfg->memseg[i].addr == NULL)
			break;

		total_len += mcfg->memseg[i].len;
	}

	return total_len;
}
Exemplo n.º 28
0
/*
 * Allocate memory on specified heap.
 */
void *
rte_malloc_socket(const char *type, size_t size, unsigned align, int socket)
{
	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;

	/* return NULL if size is 0 or alignment is not power-of-2 */
	if (size == 0 || !rte_is_power_of_2(align))
		return NULL;

	if (socket == SOCKET_ID_ANY)
		socket = malloc_get_numa_socket();

	/* Check socket parameter */
	if (socket >= RTE_MAX_NUMA_NODES)
		return NULL;

	return malloc_heap_alloc(&mcfg->malloc_heaps[socket], type,
			size, align == 0 ? 1 : align);
}
Exemplo n.º 29
0
static const struct rte_memzone *
rte_memzone_reserve_thread_safe(const char *name, size_t len,
				int socket_id, unsigned flags, unsigned align,
				unsigned bound)
{
	struct rte_mem_config *mcfg;
	const struct rte_memzone *mz = NULL;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	rte_rwlock_write_lock(&mcfg->mlock);

	mz = memzone_reserve_aligned_thread_unsafe(
		name, len, socket_id, flags, align, bound);

	rte_rwlock_write_unlock(&mcfg->mlock);

	return mz;
}
Exemplo n.º 30
0
static inline const struct rte_memzone *
memzone_lookup_thread_unsafe(const char *name)
{
	const struct rte_mem_config *mcfg;
	unsigned i = 0;

	/* get pointer to global configuration */
	mcfg = rte_eal_get_configuration()->mem_config;

	/*
	 * the algorithm is not optimal (linear), but there are few
	 * zones and this function should be called at init only
	 */
	for (i = 0; i < RTE_MAX_MEMZONE && mcfg->memzone[i].addr != NULL; i++) {
		if (!strncmp(name, mcfg->memzone[i].name, RTE_MEMZONE_NAMESIZE))
			return &mcfg->memzone[i];
	}

	return NULL;
}