Ejemplo n.º 1
0
void
rte_ivshmem_metadata_dump(FILE *f, const char *name)
{
	unsigned i = 0;
	struct ivshmem_config * config;
	struct rte_ivshmem_metadata_entry *entry;
#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
	uint64_t addr;
	uint64_t end, hugepage_sz;
	struct memseg_cache_entry e;
#endif

	if (name == NULL)
		return;

	/* return error if we try to use an unknown config file */
	config = get_config_by_name(name);
	if (config == NULL) {
		RTE_LOG(ERR, EAL, "Cannot find IVSHMEM config %s!\n", name);
		return;
	}

	rte_spinlock_lock(&config->sl);

	entry = &config->metadata->entry[0];

	while (entry->mz.addr != NULL && i < RTE_DIM(config->metadata->entry)) {

		fprintf(f, "Entry %u: name:<%-20s>, phys:0x%-15lx, len:0x%-15lx, "
			"virt:%-15p, off:0x%-15lx\n",
			i,
			entry->mz.name,
			entry->mz.phys_addr,
			entry->mz.len,
			entry->mz.addr,
			entry->offset);
		i++;

#ifdef RTE_LIBRTE_IVSHMEM_DEBUG
		fprintf(f, "\tHugepage files:\n");

		hugepage_sz = entry->mz.hugepage_sz;
		addr = RTE_ALIGN_FLOOR(entry->mz.addr_64, hugepage_sz);
		end = addr + RTE_ALIGN_CEIL(entry->mz.len + (entry->mz.addr_64 - addr),
				hugepage_sz);

		for (; addr < end; addr += hugepage_sz) {
			memset(&e, 0, sizeof(e));

			get_hugefile_by_virt_addr(addr, &e);

			fprintf(f, "\t0x%"PRIx64 "-0x%" PRIx64 " offset: 0x%" PRIx64 " %s\n",
					addr, addr + hugepage_sz, e.offset, e.filepath);
		}
#endif
		entry++;
	}

	rte_spinlock_unlock(&config->sl);
}
Ejemplo n.º 2
0
/*
 * free a malloc_elem block by adding it to the free list. If the
 * blocks either immediately before or immediately after newly freed block
 * are also free, the blocks are merged together.
 */
int
malloc_elem_free(struct malloc_elem *elem)
{
	if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
		return -1;

	rte_spinlock_lock(&(elem->heap->lock));
	struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
	if (next->state == ELEM_FREE){
		/* remove from free list, join to this one */
		elem_free_list_remove(next);
		join_elem(elem, next);
	}

	/* check if previous element is free, if so join with it and return,
	 * need to re-insert in free list, as that element's size is changing
	 */
	if (elem->prev != NULL && elem->prev->state == ELEM_FREE) {
		elem_free_list_remove(elem->prev);
		join_elem(elem->prev, elem);
		malloc_elem_free_list_insert(elem->prev);
	}
	/* otherwise add ourselves to the free list */
	else {
		malloc_elem_free_list_insert(elem);
		elem->pad = 0;
	}
	/* decrease heap's count of allocated elements */
	elem->heap->alloc_count--;
	rte_spinlock_unlock(&(elem->heap->lock));

	return 0;
}
Ejemplo n.º 3
0
static int
stack_dequeue(struct rte_mempool *mp, void **obj_table,
		unsigned n)
{
	struct rte_mempool_stack *s = mp->pool_data;
	void **cache_objs;
	unsigned index, len;

	rte_spinlock_lock(&s->sl);

	if (unlikely(n > s->len)) {
		rte_spinlock_unlock(&s->sl);
		return -ENOENT;
	}

	cache_objs = s->objs;

	for (index = 0, len = s->len - 1; index < n;
			++index, len--, obj_table++)
		*obj_table = cache_objs[len];

	s->len -= n;
	rte_spinlock_unlock(&s->sl);
	return 0;
}
Ejemplo n.º 4
0
int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
{
	enum mc_cmd_status status;
	uint64_t response;

	if (!mc_io || !mc_io->regs)
		return -EACCES;

	/* --- Call lock function here in case portal is shared --- */
	rte_spinlock_lock(&mc_portal_lock);

	mc_write_command(mc_io->regs, cmd);

	/* Spin until status changes */
	do {
		response = ioread64(mc_io->regs);
		status = mc_cmd_read_status((struct mc_command *)&response);

		/* --- Call wait function here to prevent blocking ---
		 * Change the loop condition accordingly to exit on timeout.
		 */
	} while (status == MC_CMD_STATUS_READY);

	/* Read the response back into the command buffer */
	mc_read_response(mc_io->regs, cmd);

	/* --- Call unlock function here in case portal is shared --- */
	rte_spinlock_unlock(&mc_portal_lock);

	return mc_status_to_error(status);
}
Ejemplo n.º 5
0
static int
stack_enqueue(struct rte_mempool *mp, void * const *obj_table,
		unsigned n)
{
	struct rte_mempool_stack *s = mp->pool_data;
	void **cache_objs;
	unsigned index;

	rte_spinlock_lock(&s->sl);
	cache_objs = &s->objs[s->len];

	/* Is there sufficient space in the stack ? */
	if ((s->len + n) > s->size) {
		rte_spinlock_unlock(&s->sl);
		return -ENOBUFS;
	}

	/* Add elements back into the cache */
	for (index = 0; index < n; ++index, obj_table++)
		cache_objs[index] = *obj_table;

	s->len += n;

	rte_spinlock_unlock(&s->sl);
	return 0;
}
Ejemplo n.º 6
0
int
sfc_ev_start(struct sfc_adapter *sa)
{
	int rc;

	sfc_log_init(sa, "entry");

	rc = efx_ev_init(sa->nic);
	if (rc != 0)
		goto fail_ev_init;

	/* Start management EVQ used for global events */

	/*
	 * Management event queue start polls the queue, but it cannot
	 * interfere with other polling contexts since mgmt_evq_running
	 * is false yet.
	 */
	rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
	if (rc != 0)
		goto fail_mgmt_evq_start;

	rte_spinlock_lock(&sa->mgmt_evq_lock);
	sa->mgmt_evq_running = true;
	rte_spinlock_unlock(&sa->mgmt_evq_lock);

	if (sa->intr.lsc_intr) {
		rc = sfc_ev_qprime(sa->mgmt_evq);
		if (rc != 0)
			goto fail_mgmt_evq_prime;
	}

	/*
	 * Start management EVQ polling. If interrupts are disabled
	 * (not used), it is required to process link status change
	 * and other device level events to avoid unrecoverable
	 * error because the event queue overflow.
	 */
	sfc_ev_mgmt_periodic_qpoll_start(sa);

	/*
	 * Rx/Tx event queues are started/stopped when corresponding
	 * Rx/Tx queue is started/stopped.
	 */

	return 0;

fail_mgmt_evq_prime:
	sfc_ev_qstop(sa->mgmt_evq);

fail_mgmt_evq_start:
	efx_ev_fini(sa->nic);

fail_ev_init:
	sfc_log_init(sa, "failed %d", rc);
	return rc;
}
Ejemplo n.º 7
0
int rte_ivshmem_metadata_create(const char *name)
{
	struct ivshmem_config * ivshmem_config;
	unsigned index;

	if (pagesz == 0)
		pagesz = getpagesize();

	if (name == NULL)
		return -1;

	rte_spinlock_lock(&global_cfg_sl);

	for (index = 0; index < RTE_DIM(ivshmem_global_config); index++) {
		if (ivshmem_global_config[index].metadata == NULL) {
			ivshmem_config = &ivshmem_global_config[index];
			break;
		}
	}

	if (index == RTE_DIM(ivshmem_global_config)) {
		RTE_LOG(ERR, EAL, "Cannot create more ivshmem config files. "
		"Maximum has been reached\n");
		rte_spinlock_unlock(&global_cfg_sl);
		return -1;
	}

	ivshmem_config->lock.l_type = F_WRLCK;
	ivshmem_config->lock.l_whence = SEEK_SET;

	ivshmem_config->lock.l_start = 0;
	ivshmem_config->lock.l_len = METADATA_SIZE_ALIGNED;

	ivshmem_global_config[index].metadata = ((struct rte_ivshmem_metadata *)
			ivshmem_metadata_create(
					name,
					sizeof(struct rte_ivshmem_metadata),
					&ivshmem_config->lock));

	if (ivshmem_global_config[index].metadata == NULL) {
		rte_spinlock_unlock(&global_cfg_sl);
		return -1;
	}

	/* Metadata setup */
	memset(ivshmem_config->metadata, 0, sizeof(struct rte_ivshmem_metadata));
	ivshmem_config->metadata->magic_number = IVSHMEM_MAGIC;
	snprintf(ivshmem_config->metadata->name,
			sizeof(ivshmem_config->metadata->name), "%s", name);

	rte_spinlock_unlock(&global_cfg_sl);

	return 0;
}
Ejemplo n.º 8
0
/* Dump log history to file */
void
rte_log_dump_history(FILE *out)
{
	struct log_history_list tmp_log_history;
	struct log_history *hist_buf;
	unsigned i;

	/* only one dump at a time */
	rte_spinlock_lock(&log_dump_lock);

	/* save list, and re-init to allow logging during dump */
	rte_spinlock_lock(&log_list_lock);
	tmp_log_history = log_history;
	STAILQ_INIT(&log_history);
	rte_spinlock_unlock(&log_list_lock);

	for (i=0; i<RTE_LOG_HISTORY; i++) {

		/* remove one message from history list */
		hist_buf = STAILQ_FIRST(&tmp_log_history);

		if (hist_buf == NULL)
			break;

		STAILQ_REMOVE_HEAD(&tmp_log_history, next);

		/* write on stdout */
		if (fwrite(hist_buf->buf, hist_buf->size, 1, out) == 0) {
			rte_mempool_mp_put(log_history_mp, hist_buf);
			break;
		}

		/* put back message structure in pool */
		rte_mempool_mp_put(log_history_mp, hist_buf);
	}
	fflush(out);

	rte_spinlock_unlock(&log_dump_lock);
}
Ejemplo n.º 9
0
int
rte_netmap_close(__rte_unused int fd)
{
	int32_t rc;

	rte_spinlock_lock(&netmap_lock);
	rc = fd_release(fd);
	rte_spinlock_unlock(&netmap_lock);

	if (rc < 0) {
		errno =-rc;
		rc = -1;
	}
	return (rc);
}
Ejemplo n.º 10
0
void
sfc_ev_stop(struct sfc_adapter *sa)
{
	sfc_log_init(sa, "entry");

	sfc_ev_mgmt_periodic_qpoll_stop(sa);

	rte_spinlock_lock(&sa->mgmt_evq_lock);
	sa->mgmt_evq_running = false;
	rte_spinlock_unlock(&sa->mgmt_evq_lock);

	sfc_ev_qstop(sa->mgmt_evq);

	efx_ev_fini(sa->nic);
}
Ejemplo n.º 11
0
static void
sfc_mcdi_execute(void *arg, efx_mcdi_req_t *emrp)
{
	struct sfc_adapter *sa = (struct sfc_adapter *)arg;
	struct sfc_mcdi *mcdi = &sa->mcdi;

	rte_spinlock_lock(&mcdi->lock);

	SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);

	efx_mcdi_request_start(sa->nic, emrp, B_FALSE);
	sfc_mcdi_poll(sa);

	rte_spinlock_unlock(&mcdi->lock);
}
Ejemplo n.º 12
0
/**
 * Return a "fake" file descriptor with a value above RLIMIT_NOFILE so that
 * any attempt to use that file descriptor with the usual API will fail.
 */
int
rte_netmap_open(__rte_unused const char *pathname, __rte_unused int flags)
{
	int fd;

	rte_spinlock_lock(&netmap_lock);
	fd = fd_reserve();
	rte_spinlock_unlock(&netmap_lock);

	if (fd < 0) {
		errno = -fd;
		fd = -1;
	}
	return (fd);
}
Ejemplo n.º 13
0
static void
kni_memzone_pool_release(struct rte_kni_memzone_slot *slot)
{
	rte_spinlock_lock(&kni_memzone_pool.mutex);

	if (kni_memzone_pool.free)
		kni_memzone_pool.free_tail->next = slot;
	else
		kni_memzone_pool.free = slot;

	kni_memzone_pool.free_tail = slot;
	slot->next = NULL;
	slot->in_use = 0;

	rte_spinlock_unlock(&kni_memzone_pool.mutex);
}
Ejemplo n.º 14
0
int
rte_log_add_in_history(const char *buf, size_t size)
{
	struct log_history *hist_buf = NULL;
	static const unsigned hist_buf_size = LOG_ELT_SIZE - sizeof(*hist_buf);
	void *obj;

	if (history_enabled == 0)
		return 0;

	rte_spinlock_lock(&log_list_lock);

	/* get a buffer for adding in history */
	if (log_history_size > RTE_LOG_HISTORY) {
		hist_buf = STAILQ_FIRST(&log_history);
		STAILQ_REMOVE_HEAD(&log_history, next);
	}
	else {
		if (rte_mempool_mc_get(log_history_mp, &obj) < 0)
			obj = NULL;
		hist_buf = obj;
	}

	/* no buffer */
	if (hist_buf == NULL) {
		rte_spinlock_unlock(&log_list_lock);
		return -ENOBUFS;
	}

	/* not enough room for msg, buffer go back in mempool */
	if (size >= hist_buf_size) {
		rte_mempool_mp_put(log_history_mp, hist_buf);
		rte_spinlock_unlock(&log_list_lock);
		return -ENOBUFS;
	}

	/* add in history */
	memcpy(hist_buf->buf, buf, size);
	hist_buf->buf[size] = hist_buf->buf[hist_buf_size-1] = '\0';
	hist_buf->size = size;
	STAILQ_INSERT_TAIL(&log_history, hist_buf, next);
	log_history_size++;
	rte_spinlock_unlock(&log_list_lock);

	return 0;
}
Ejemplo n.º 15
0
/* add a new ops struct in rte_mempool_ops_table, return its index. */
int
rte_mempool_register_ops(const struct rte_mempool_ops *h)
{
	struct rte_mempool_ops *ops;
	int16_t ops_index;

	rte_spinlock_lock(&rte_mempool_ops_table.sl);

	if (rte_mempool_ops_table.num_ops >=
			RTE_MEMPOOL_MAX_OPS_IDX) {
		rte_spinlock_unlock(&rte_mempool_ops_table.sl);
		RTE_LOG(ERR, MEMPOOL,
			"Maximum number of mempool ops structs exceeded\n");
		return -ENOSPC;
	}

	if (h->alloc == NULL || h->enqueue == NULL ||
			h->dequeue == NULL || h->get_count == NULL) {
		rte_spinlock_unlock(&rte_mempool_ops_table.sl);
		RTE_LOG(ERR, MEMPOOL,
			"Missing callback while registering mempool ops\n");
		return -EINVAL;
	}

	if (strlen(h->name) >= sizeof(ops->name) - 1) {
		rte_spinlock_unlock(&rte_mempool_ops_table.sl);
		RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n",
				__func__, h->name);
		rte_errno = EEXIST;
		return -EEXIST;
	}

	ops_index = rte_mempool_ops_table.num_ops++;
	ops = &rte_mempool_ops_table.ops[ops_index];
	snprintf(ops->name, sizeof(ops->name), "%s", h->name);
	ops->alloc = h->alloc;
	ops->free = h->free;
	ops->enqueue = h->enqueue;
	ops->dequeue = h->dequeue;
	ops->get_count = h->get_count;

	rte_spinlock_unlock(&rte_mempool_ops_table.sl);

	return ops_index;
}
Ejemplo n.º 16
0
int
rte_assign_lcore_id (void)
{
       int ret = -1;
       unsigned lcore_id;
       struct rte_config *config = rte_eal_get_configuration();

       rte_spinlock_lock(&lcore_sl);

       /* See whether this already has an lcore ID */
       lcore_id = rte_lcore_id();
       if (lcore_id == (unsigned)-1)
       {
               /* Find the first available LCORE with a CPU detection state that
                * indicates OFF
                */
               for (lcore_id = 0;
                       (lcore_id < RTE_MAX_LCORE) && (config->lcore_role[lcore_id] == ROLE_OFF);
                       ++lcore_id);

               /* if we found one, assign it */
               if (lcore_id < RTE_MAX_LCORE)
               {
                       config->lcore_role[lcore_id] = ROLE_RTE;

                       /* These are floating lcores - no core id or socket id */
                       lcore_config[lcore_id].core_id = LCORE_ID_ANY;
                       lcore_config[lcore_id].socket_id = SOCKET_ID_ANY;

                       lcore_config[lcore_id].f = NULL;

                       lcore_config[lcore_id].thread_id = pthread_self();
                       lcore_config[lcore_id].detected = 0;                            /* Core was not detected */
                       lcore_config[lcore_id].state = RUNNING;
                       config->lcore_count++;

                       ret = lcore_id;

                       RTE_PER_LCORE(_lcore_id) = lcore_id;
               }
       }

       rte_spinlock_unlock(&lcore_sl);
       return ret;
}
Ejemplo n.º 17
0
static int
add_memzone_to_metadata(const struct rte_memzone * mz,
		struct ivshmem_config * config)
{
	struct rte_ivshmem_metadata_entry * entry;
	unsigned i;

	rte_spinlock_lock(&config->sl);

	/* find free slot in this config */
	for (i = 0; i < RTE_DIM(config->metadata->entry); i++) {
		entry = &config->metadata->entry[i];

		if (&entry->mz.addr_64 != 0 && overlap(mz, &entry->mz)) {
			RTE_LOG(ERR, EAL, "Overlapping memzones!\n");
			goto fail;
		}

		/* if addr is zero, the memzone is probably free */
		if (entry->mz.addr_64 == 0) {
			RTE_LOG(DEBUG, EAL, "Adding memzone '%s' at %p to metadata %s\n",
					mz->name, mz->addr, config->metadata->name);
			memcpy(&entry->mz, mz, sizeof(struct rte_memzone));

			/* run config file parser */
			if (build_config(config->metadata) < 0)
				goto fail;

			break;
		}
	}

	/* if we reached the maximum, that means we have no place in config */
	if (i == RTE_DIM(config->metadata->entry)) {
		RTE_LOG(ERR, EAL, "No space left in IVSHMEM metadata %s!\n",
				config->metadata->name);
		goto fail;
	}

	rte_spinlock_unlock(&config->sl);
	return 0;
fail:
	rte_spinlock_unlock(&config->sl);
	return -1;
}
Ejemplo n.º 18
0
struct rte_mempool* init_mem(uint32_t nb_mbuf, uint32_t socket, uint32_t mbuf_size) {
	static volatile uint32_t mbuf_cnt = 0;
	char pool_name[32];
	sprintf(pool_name, "mbuf_pool%d", __sync_fetch_and_add(&mbuf_cnt, 1));
	// rte_mempool_create is apparently not thread-safe :(
	static rte_spinlock_t lock = RTE_SPINLOCK_INITIALIZER;
	rte_spinlock_lock(&lock);
	struct rte_mempool* pool = rte_pktmbuf_pool_create(pool_name, nb_mbuf, MEMPOOL_CACHE_SIZE,
		0, mbuf_size + RTE_PKTMBUF_HEADROOM,
		socket
	);
	rte_spinlock_unlock(&lock);
	if (!pool) {
		printf("Memory allocation failed: %s (%d)\n", rte_strerror(-rte_errno), rte_errno); 
		return 0;
	}
	return pool;
}
Ejemplo n.º 19
0
/**
 * Simulate a Netmap NIOCREGIF ioctl:
 */
static int
ioctl_niocregif(int32_t fd, void * param)
{
	uint8_t portid;
	int32_t rc;
	uint32_t idx;
	struct nmreq *req;

	req = (struct nmreq *)param;
	if ((rc = check_nmreq(req, &portid)) != 0)
		return (rc);

	idx = FD_TO_IDX(fd);

	rte_spinlock_lock(&netmap_lock);
	rc = netmap_regif(req, idx, portid);
	rte_spinlock_unlock(&netmap_lock);

	return (rc);
}
Ejemplo n.º 20
0
struct rte_mempool* init_mem(uint32_t nb_mbuf, int32_t socket) {
	static volatile uint32_t mbuf_cnt = 0;
	char pool_name[32];
	sprintf(pool_name, "mbuf_pool%d", __sync_fetch_and_add(&mbuf_cnt, 1));
	// rte_mempool_create is apparently not thread-safe :(
	static rte_spinlock_t lock = RTE_SPINLOCK_INITIALIZER;
	rte_spinlock_lock(&lock);
	struct rte_mempool* pool = rte_mempool_create(pool_name, nb_mbuf, MBUF_SIZE, MEMPOOL_CACHE_SIZE,
		sizeof(struct rte_pktmbuf_pool_private),
		rte_pktmbuf_pool_init, NULL,
		rte_pktmbuf_init, NULL,
		socket < 0 ? rte_socket_id() : (uint32_t) socket, 0
	);
	rte_spinlock_unlock(&lock);
	if (!pool) {
		printf("Memory allocation failed: %s (%d)\n", rte_strerror(rte_errno), rte_errno); 
		return 0;
	}
	return pool;
}
Ejemplo n.º 21
0
int
rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id)
{
	struct rte_eth_dev *bonded_eth_dev;
	struct bond_dev_private *internals;
	int retval;

	if (valid_bonded_port_id(bonded_port_id) != 0)
		return -1;

	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
	internals = bonded_eth_dev->data->dev_private;

	rte_spinlock_lock(&internals->lock);

	retval = __eth_bond_slave_remove_lock_free(bonded_port_id, slave_port_id);

	rte_spinlock_unlock(&internals->lock);

	return retval;
}
Ejemplo n.º 22
0
static inline int
mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
		uint16_t txsize, void *rxmsg, uint16_t rxsize)
{
	int res = -EINVAL;

	if (m->init_once == 0 || hdr == NULL ||
		txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
		mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
				m->init_once, hdr, txsize, rxsize);
		return res;
	}

	rte_spinlock_lock(&m->lock);

	mbox_send_request(m, hdr, txmsg, txsize);
	res = mbox_wait_response(m, hdr, rxmsg, rxsize);

	rte_spinlock_unlock(&m->lock);
	return res;
}
Ejemplo n.º 23
0
int
rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id)
{
	struct rte_eth_dev *bonded_eth_dev;
	struct bond_dev_private *internals;

	int retval;

	/* Verify that port id's are valid bonded and slave ports */
	if (valid_bonded_port_id(bonded_port_id) != 0)
		return -1;

	bonded_eth_dev = &rte_eth_devices[bonded_port_id];
	internals = bonded_eth_dev->data->dev_private;

	rte_spinlock_lock(&internals->lock);

	retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id);

	rte_spinlock_unlock(&internals->lock);

	return retval;
}
Ejemplo n.º 24
0
/* Pool mgmt */
static struct rte_kni_memzone_slot*
kni_memzone_pool_alloc(void)
{
	struct rte_kni_memzone_slot *slot;

	rte_spinlock_lock(&kni_memzone_pool.mutex);

	if (!kni_memzone_pool.free) {
		rte_spinlock_unlock(&kni_memzone_pool.mutex);
		return NULL;
	}

	slot = kni_memzone_pool.free;
	kni_memzone_pool.free = slot->next;
	slot->in_use = 1;

	if (!kni_memzone_pool.free)
		kni_memzone_pool.free_tail = NULL;

	rte_spinlock_unlock(&kni_memzone_pool.mutex);

	return slot;
}
Ejemplo n.º 25
0
/*
 * attempt to resize a malloc_elem by expanding into any free space
 * immediately after it in memory.
 */
int
malloc_elem_resize(struct malloc_elem *elem, size_t size)
{
	const size_t new_size = size + MALLOC_ELEM_OVERHEAD;
	/* if we request a smaller size, then always return ok */
	const size_t current_size = elem->size - elem->pad;
	if (current_size >= new_size)
		return 0;

	struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
	rte_spinlock_lock(&elem->heap->lock);
	if (next ->state != ELEM_FREE)
		goto err_return;
	if (current_size + next->size < new_size)
		goto err_return;

	/* we now know the element fits, so remove from free list,
	 * join the two
	 */
	elem_free_list_remove(next);
	join_elem(elem, next);

	if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD){
		/* now we have a big block together. Lets cut it down a bit, by splitting */
		struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
		split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
		split_elem(elem, split_pt);
		malloc_elem_free_list_insert(split_pt);
	}
	rte_spinlock_unlock(&elem->heap->lock);
	return 0;

err_return:
	rte_spinlock_unlock(&elem->heap->lock);
	return -1;
}
Ejemplo n.º 26
0
void
sfc_mcdi_fini(struct sfc_adapter *sa)
{
	struct sfc_mcdi *mcdi;
	efx_mcdi_transport_t *emtp;

	sfc_log_init(sa, "entry");

	mcdi = &sa->mcdi;
	emtp = &mcdi->transport;

	rte_spinlock_lock(&mcdi->lock);

	SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
	mcdi->state = SFC_MCDI_UNINITIALIZED;

	sfc_log_init(sa, "fini MCDI");
	efx_mcdi_fini(sa->nic);
	memset(emtp, 0, sizeof(*emtp));

	rte_spinlock_unlock(&mcdi->lock);

	sfc_dma_free(sa, &mcdi->mem);
}
Ejemplo n.º 27
0
/**
 * Simulate a Netmap NIOCUNREGIF ioctl: put an interface running in Netmap
 * mode back in "normal" mode. In our case, we just stop the port associated
 * with this file descriptor.
 */
static int
ioctl_niocunregif(int fd)
{
	uint32_t idx, port;
	int32_t rc;

	idx = FD_TO_IDX(fd);

	rte_spinlock_lock(&netmap_lock);

	port = fd_port[idx].port;
	if (port < RTE_DIM(ports) && ports[port].fd == idx) {
		netmap_unregif(idx, port);
		rc = 0;
	} else {
		RTE_LOG(ERR, USER1,
			"%s: %d is not associated with valid port\n",
			__func__, fd);
		rc = -EINVAL;
	}

	rte_spinlock_unlock(&netmap_lock);
	return (rc);
}
Ejemplo n.º 28
0
int
rte_ivshmem_metadata_cmdline_generate(char *buffer, unsigned size, const char *name)
{
	const struct memseg_cache_entry * ms_cache, *entry;
	struct ivshmem_config * config;
	char cmdline[IVSHMEM_QEMU_CMDLINE_BUFSIZE], *cmdline_ptr;
	char cfg_file_path[PATH_MAX];
	unsigned remaining_len, tmplen, iter;
	uint64_t shared_mem_size, zero_size, total_size;

	if (buffer == NULL || name == NULL)
		return -1;

	config = get_config_by_name(name);

	if (config == NULL) {
		RTE_LOG(ERR, EAL, "Config %s not found!\n", name);
		return -1;
	}

	rte_spinlock_lock(&config->sl);

	/* prepare metadata file path */
	snprintf(cfg_file_path, sizeof(cfg_file_path), IVSHMEM_CONFIG_FILE_FMT,
			config->metadata->name);

	ms_cache = config->memseg_cache;

	cmdline_ptr = cmdline;
	remaining_len = sizeof(cmdline);

	shared_mem_size = 0;
	iter = 0;

	while ((ms_cache[iter].len != 0) && (iter < RTE_DIM(config->metadata->entry))) {

		entry = &ms_cache[iter];

		/* Offset and sizes within the current pathname */
		tmplen = snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
				entry->filepath, entry->offset, entry->len);

		shared_mem_size += entry->len;

		cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
		remaining_len -= tmplen;

		if (remaining_len == 0) {
			RTE_LOG(ERR, EAL, "Command line too long!\n");
			rte_spinlock_unlock(&config->sl);
			return -1;
		}

		iter++;
	}

	total_size = rte_align64pow2(shared_mem_size + METADATA_SIZE_ALIGNED);
	zero_size = total_size - shared_mem_size - METADATA_SIZE_ALIGNED;

	/* add /dev/zero to command-line to fill the space */
	tmplen = snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
			"/dev/zero",
			(uint64_t)0x0,
			zero_size);

	cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
	remaining_len -= tmplen;

	if (remaining_len == 0) {
		RTE_LOG(ERR, EAL, "Command line too long!\n");
		rte_spinlock_unlock(&config->sl);
		return -1;
	}

	/* add metadata file to the end of command-line */
	tmplen = snprintf(cmdline_ptr, remaining_len, IVSHMEM_QEMU_CMD_FD_FMT,
			cfg_file_path,
			(uint64_t)0x0,
			METADATA_SIZE_ALIGNED);

	cmdline_ptr = RTE_PTR_ADD(cmdline_ptr, tmplen);
	remaining_len -= tmplen;

	if (remaining_len == 0) {
		RTE_LOG(ERR, EAL, "Command line too long!\n");
		rte_spinlock_unlock(&config->sl);
		return -1;
	}

	/* if current length of the command line is bigger than the buffer supplied
	 * by the user, or if command-line is bigger than what IVSHMEM accepts */
	if ((sizeof(cmdline) - remaining_len) > size) {
		RTE_LOG(ERR, EAL, "Buffer is too short!\n");
		rte_spinlock_unlock(&config->sl);
		return -1;
	}
	/* complete the command-line */
	snprintf(buffer, size,
			IVSHMEM_QEMU_CMD_LINE_HEADER_FMT,
			total_size >> 20,
			cmdline);

	rte_spinlock_unlock(&config->sl);

	return 0;
}
int
rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
				int32_t rx_queue_id)
{
	int ret = 0;
	struct rte_eventdev *dev;
	struct rte_event_eth_rx_adapter *rx_adapter;
	struct eth_device_info *dev_info;
	uint32_t cap;
	uint16_t i;

	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);

	rx_adapter = id_to_rx_adapter(id);
	if (rx_adapter == NULL)
		return -EINVAL;

	dev = &rte_eventdevs[rx_adapter->eventdev_id];
	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
						eth_dev_id,
						&cap);
	if (ret)
		return ret;

	if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
		rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
			 (uint16_t)rx_queue_id);
		return -EINVAL;
	}

	dev_info = &rx_adapter->eth_devices[eth_dev_id];

	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
				 -ENOTSUP);
		ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
						&rte_eth_devices[eth_dev_id],
						rx_queue_id);
		if (ret == 0) {
			update_queue_info(rx_adapter,
					&rx_adapter->eth_devices[eth_dev_id],
					rx_queue_id,
					0);
			if (dev_info->nb_dev_queues == 0) {
				rte_free(dev_info->rx_queue);
				dev_info->rx_queue = NULL;
			}
		}
	} else {
		int rc;
		rte_spinlock_lock(&rx_adapter->rx_lock);
		if (rx_queue_id == -1) {
			for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
				event_eth_rx_adapter_queue_del(rx_adapter,
							dev_info,
							i);
		} else {
			event_eth_rx_adapter_queue_del(rx_adapter,
						dev_info,
						(uint16_t)rx_queue_id);
		}

		rc = eth_poll_wrr_calc(rx_adapter);
		if (rc)
			RTE_EDEV_LOG_ERR("WRR recalculation failed %" PRId32,
					rc);

		if (dev_info->nb_dev_queues == 0) {
			rte_free(dev_info->rx_queue);
			dev_info->rx_queue = NULL;
		}

		rte_spinlock_unlock(&rx_adapter->rx_lock);
		rte_service_component_runstate_set(rx_adapter->service_id,
				sw_rx_adapter_queue_count(rx_adapter));
	}

	return ret;
}
int
rte_event_eth_rx_adapter_queue_add(uint8_t id,
		uint8_t eth_dev_id,
		int32_t rx_queue_id,
		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
	int ret;
	uint32_t cap;
	struct rte_event_eth_rx_adapter *rx_adapter;
	struct rte_eventdev *dev;
	struct eth_device_info *dev_info;
	int start_service;

	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);

	rx_adapter = id_to_rx_adapter(id);
	if ((rx_adapter == NULL) || (queue_conf == NULL))
		return -EINVAL;

	dev = &rte_eventdevs[rx_adapter->eventdev_id];
	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
						eth_dev_id,
						&cap);
	if (ret) {
		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
			"eth port %" PRIu8, id, eth_dev_id);
		return ret;
	}

	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
		&& (queue_conf->rx_queue_flags &
			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
		RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
				" eth port: %" PRIu8 " adapter id: %" PRIu8,
				eth_dev_id, id);
		return -EINVAL;
	}

	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
		(rx_queue_id != -1)) {
		RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
			"event queue id %u eth port %u", id, eth_dev_id);
		return -EINVAL;
	}

	if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
			rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
			 (uint16_t)rx_queue_id);
		return -EINVAL;
	}

	start_service = 0;
	dev_info = &rx_adapter->eth_devices[eth_dev_id];

	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
					-ENOTSUP);
		if (dev_info->rx_queue == NULL) {
			dev_info->rx_queue =
			    rte_zmalloc_socket(rx_adapter->mem_name,
					dev_info->dev->data->nb_rx_queues *
					sizeof(struct eth_rx_queue_info), 0,
					rx_adapter->socket_id);
			if (dev_info->rx_queue == NULL)
				return -ENOMEM;
		}

		ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
				&rte_eth_devices[eth_dev_id],
				rx_queue_id, queue_conf);
		if (ret == 0) {
			update_queue_info(rx_adapter,
					&rx_adapter->eth_devices[eth_dev_id],
					rx_queue_id,
					1);
		}
	} else {
		rte_spinlock_lock(&rx_adapter->rx_lock);
		ret = init_service(rx_adapter, id);
		if (ret == 0)
			ret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id,
					queue_conf);
		rte_spinlock_unlock(&rx_adapter->rx_lock);
		if (ret == 0)
			start_service = !!sw_rx_adapter_queue_count(rx_adapter);
	}

	if (ret)
		return ret;

	if (start_service)
		rte_service_component_runstate_set(rx_adapter->service_id, 1);

	return 0;
}