void * spdk_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr) { void *new_buf = rte_realloc(buf, size, align); if (new_buf && phys_addr) { *phys_addr = rte_malloc_virt2phy(new_buf); } return new_buf; }
void * spdk_malloc(size_t size, size_t align, uint64_t *phys_addr) { void *buf = rte_malloc(NULL, size, align); if (buf && phys_addr) { *phys_addr = rte_malloc_virt2phy(buf); } return buf; }
static const struct rte_memzone * memzone_reserve_aligned_thread_unsafe(const char *name, size_t len, int socket_id, unsigned flags, unsigned align, unsigned bound) { struct rte_memzone *mz; struct rte_mem_config *mcfg; size_t requested_len; int socket, i; /* get pointer to global configuration */ mcfg = rte_eal_get_configuration()->mem_config; /* no more room in config */ if (mcfg->memzone_cnt >= RTE_MAX_MEMZONE) { RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__); rte_errno = ENOSPC; return NULL; } /* zone already exist */ if ((memzone_lookup_thread_unsafe(name)) != NULL) { RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n", __func__, name); rte_errno = EEXIST; return NULL; } if (strlen(name) >= sizeof(mz->name) - 1) { RTE_LOG(DEBUG, EAL, "%s(): memzone <%s>: name too long\n", __func__, name); rte_errno = EEXIST; return NULL; } /* if alignment is not a power of two */ if (align && !rte_is_power_of_2(align)) { RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__, align); rte_errno = EINVAL; return NULL; } /* alignment less than cache size is not allowed */ if (align < RTE_CACHE_LINE_SIZE) align = RTE_CACHE_LINE_SIZE; /* align length on cache boundary. Check for overflow before doing so */ if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) { rte_errno = EINVAL; /* requested size too big */ return NULL; } len += RTE_CACHE_LINE_MASK; len &= ~((size_t) RTE_CACHE_LINE_MASK); /* save minimal requested length */ requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len); /* check that boundary condition is valid */ if (bound != 0 && (requested_len > bound || !rte_is_power_of_2(bound))) { rte_errno = EINVAL; return NULL; } if ((socket_id != SOCKET_ID_ANY) && (socket_id >= RTE_MAX_NUMA_NODES)) { rte_errno = EINVAL; return NULL; } if (!rte_eal_has_hugepages()) socket_id = SOCKET_ID_ANY; if (len == 0) { if (bound != 0) requested_len = bound; else { requested_len = find_heap_max_free_elem(&socket_id, align); if (requested_len == 0) { rte_errno = ENOMEM; return NULL; } } } if (socket_id == SOCKET_ID_ANY) socket = malloc_get_numa_socket(); else socket = socket_id; /* allocate memory on heap */ void *mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[socket], NULL, requested_len, flags, align, bound); if ((mz_addr == NULL) && (socket_id == SOCKET_ID_ANY)) { /* try other heaps */ for (i = 0; i < RTE_MAX_NUMA_NODES; i++) { if (socket == i) continue; mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[i], NULL, requested_len, flags, align, bound); if (mz_addr != NULL) break; } } if (mz_addr == NULL) { rte_errno = ENOMEM; return NULL; } const struct malloc_elem *elem = malloc_elem_from_data(mz_addr); /* fill the zone in config */ mz = get_next_free_memzone(); if (mz == NULL) { RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone but there is room " "in config!\n", __func__); rte_errno = ENOSPC; return NULL; } mcfg->memzone_cnt++; snprintf(mz->name, sizeof(mz->name), "%s", name); mz->phys_addr = rte_malloc_virt2phy(mz_addr); mz->addr = mz_addr; mz->len = (requested_len == 0 ? elem->size : requested_len); mz->hugepage_sz = elem->ms->hugepage_sz; mz->socket_id = elem->ms->socket_id; mz->flags = 0; mz->memseg_id = elem->ms - rte_eal_get_configuration()->mem_config->memseg; return mz; }
static struct snobj *init_port(struct port *p, struct snobj *conf) { struct vport_priv *priv = get_port_priv(p); int container_pid = 0; int cpu; int rxq; int ret; if (strlen(p->name) >= IFNAMSIZ) return snobj_err(EINVAL, "Linux interface name should be " \ "shorter than %d characters", IFNAMSIZ); if (snobj_eval_exists(conf, "docker")) { struct snobj *err = docker_container_pid( snobj_eval_str(conf, "docker"), &container_pid); if (err) return err; } priv->fd = open("/dev/softnic", O_RDONLY); if (priv->fd == -1) return snobj_err(ENODEV, "the kernel module is not loaded"); priv->bar = alloc_bar(p, container_pid); ret = ioctl(priv->fd, SN_IOC_CREATE_HOSTNIC, rte_malloc_virt2phy(priv->bar)); if (ret < 0) { close(priv->fd); return snobj_errno_details(-ret, snobj_str("SN_IOC_CREATE_HOSTNIC failure")); } if (snobj_eval_exists(conf, "ip_addr")) { struct snobj *err = set_ip_addr(p, container_pid, snobj_eval(conf, "ip_addr")); if (err) { deinit_port(p); return err; } } for (cpu = 0; cpu < SN_MAX_CPU; cpu++) priv->map.cpu_to_txq[cpu] = cpu % p->num_queues[PACKET_DIR_INC]; cpu = 0; for (rxq = 0; rxq < p->num_queues[PACKET_DIR_OUT]; rxq++) { while (is_worker_core(cpu)) cpu = (cpu + 1) % sysconf(_SC_NPROCESSORS_ONLN); priv->map.rxq_to_cpu[rxq] = cpu; cpu = (cpu + 1) % sysconf(_SC_NPROCESSORS_ONLN); } ret = ioctl(priv->fd, SN_IOC_SET_QUEUE_MAPPING, &priv->map); if (ret < 0) perror("SN_IOC_SET_QUEUE_MAPPING"); return NULL; }
struct cperf_test_vector* cperf_test_vector_get_dummy(struct cperf_options *options) { struct cperf_test_vector *t_vec; t_vec = (struct cperf_test_vector *)rte_malloc(NULL, sizeof(struct cperf_test_vector), 0); if (t_vec == NULL) return t_vec; t_vec->plaintext.data = plaintext; t_vec->plaintext.length = options->buffer_sz; if (options->op_type == CPERF_CIPHER_ONLY || options->op_type == CPERF_CIPHER_THEN_AUTH || options->op_type == CPERF_AUTH_THEN_CIPHER || options->op_type == CPERF_AEAD) { if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL) { t_vec->cipher_key.length = -1; t_vec->ciphertext.data = plaintext; t_vec->cipher_key.data = NULL; t_vec->iv.data = NULL; } else { t_vec->cipher_key.length = options->cipher_key_sz; t_vec->ciphertext.data = ciphertext; t_vec->cipher_key.data = cipher_key; t_vec->iv.data = rte_malloc(NULL, options->cipher_iv_sz, 16); if (t_vec->iv.data == NULL) { rte_free(t_vec); return NULL; } memcpy(t_vec->iv.data, iv, options->cipher_iv_sz); } t_vec->ciphertext.length = options->buffer_sz; t_vec->iv.phys_addr = rte_malloc_virt2phy(t_vec->iv.data); t_vec->iv.length = options->cipher_iv_sz; t_vec->data.cipher_offset = 0; t_vec->data.cipher_length = options->buffer_sz; } if (options->op_type == CPERF_AUTH_ONLY || options->op_type == CPERF_CIPHER_THEN_AUTH || options->op_type == CPERF_AUTH_THEN_CIPHER || options->op_type == CPERF_AEAD) { uint8_t aad_alloc = 0; t_vec->auth_key.length = options->auth_key_sz; switch (options->auth_algo) { case RTE_CRYPTO_AUTH_NULL: t_vec->auth_key.data = NULL; aad_alloc = 0; break; case RTE_CRYPTO_AUTH_AES_GCM: t_vec->auth_key.data = NULL; aad_alloc = 1; break; case RTE_CRYPTO_AUTH_SNOW3G_UIA2: case RTE_CRYPTO_AUTH_KASUMI_F9: case RTE_CRYPTO_AUTH_ZUC_EIA3: t_vec->auth_key.data = auth_key; aad_alloc = 1; break; case RTE_CRYPTO_AUTH_AES_GMAC: /* auth key should be the same as cipher key */ t_vec->auth_key.data = cipher_key; aad_alloc = 1; break; default: t_vec->auth_key.data = auth_key; aad_alloc = 0; break; } if (aad_alloc) { t_vec->aad.data = rte_malloc(NULL, options->auth_aad_sz, 16); if (t_vec->aad.data == NULL) { if (options->op_type != CPERF_AUTH_ONLY) rte_free(t_vec->iv.data); rte_free(t_vec); return NULL; } memcpy(t_vec->aad.data, aad, options->auth_aad_sz); } else { t_vec->aad.data = NULL; } t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data); t_vec->aad.length = options->auth_aad_sz; t_vec->digest.data = rte_malloc(NULL, options->auth_digest_sz, 16); if (t_vec->digest.data == NULL) { if (options->op_type != CPERF_AUTH_ONLY) rte_free(t_vec->iv.data); rte_free(t_vec->aad.data); rte_free(t_vec); return NULL; } t_vec->digest.phys_addr = rte_malloc_virt2phy(t_vec->digest.data); t_vec->digest.length = options->auth_digest_sz; memcpy(t_vec->digest.data, digest, options->auth_digest_sz); t_vec->data.auth_offset = 0; t_vec->data.auth_length = options->buffer_sz; } return t_vec; }