static int eal_parse_base_virtaddr(const char *arg) { char *end; uint64_t addr; errno = 0; addr = strtoull(arg, &end, 16); /* check for errors */ if ((errno != 0) || (arg[0] == '\0') || end == NULL || (*end != '\0')) return -1; /* make sure we don't exceed 32-bit boundary on 32-bit target */ #ifndef RTE_ARCH_X86_64 if (addr >= UINTPTR_MAX) return -1; #endif /* align the addr on 2M boundary */ internal_config.base_virtaddr = RTE_PTR_ALIGN_CEIL((uintptr_t)addr, RTE_PGSIZE_2M); return 0; }
static int eal_parse_base_virtaddr(const char *arg) { char *end; uint64_t addr; errno = 0; addr = strtoull(arg, &end, 16); /* check for errors */ if ((errno != 0) || (arg[0] == '\0') || end == NULL || (*end != '\0')) return -1; /* make sure we don't exceed 32-bit boundary on 32-bit target */ #ifndef RTE_ARCH_64 if (addr >= UINTPTR_MAX) return -1; #endif /* align the addr on 16M boundary, 16MB is the minimum huge page * size on IBM Power architecture. If the addr is aligned to 16MB, * it can align to 2MB for x86. So this alignment can also be used * on x86 */ internal_config.base_virtaddr = RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M); return 0; }
static struct tb_mem_block * tb_pool(struct tb_mem_pool *pool, size_t sz) { struct tb_mem_block *block; uint8_t *ptr; size_t size; size = sz + pool->alignment - 1; block = calloc(1, size + sizeof(*pool->block)); if (block == NULL) { RTE_LOG(ERR, MALLOC, "%s(%zu)\n failed, currently allocated " "by pool: %zu bytes\n", __func__, sz, pool->alloc); return NULL; } block->pool = pool; block->next = pool->block; pool->block = block; pool->alloc += size; ptr = (uint8_t *)(block + 1); block->mem = RTE_PTR_ALIGN_CEIL(ptr, pool->alignment); block->size = size - (block->mem - ptr); return block; }
/* * attempt to resize a malloc_elem by expanding into any free space * immediately after it in memory. */ int malloc_elem_resize(struct malloc_elem *elem, size_t size) { const size_t new_size = size + MALLOC_ELEM_OVERHEAD; /* if we request a smaller size, then always return ok */ const size_t current_size = elem->size - elem->pad; if (current_size >= new_size) return 0; struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size); rte_spinlock_lock(&elem->heap->lock); if (next ->state != ELEM_FREE) goto err_return; if (current_size + next->size < new_size) goto err_return; /* we now know the element fits, so remove from free list, * join the two */ elem_free_list_remove(next); join_elem(elem, next); if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD){ /* now we have a big block together. Lets cut it down a bit, by splitting */ struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size); split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE); split_elem(elem, split_pt); malloc_elem_free_list_insert(split_pt); } rte_spinlock_unlock(&elem->heap->lock); return 0; err_return: rte_spinlock_unlock(&elem->heap->lock); return -1; }
static int test_align(void) { #define FAIL_ALIGN(x, i, p)\ {printf(x "() test failed: %u %u\n", i, p);\ return -1;} #define ERROR_FLOOR(res, i, pow) \ (res % pow) || /* check if not aligned */ \ ((res / pow) != (i / pow)) /* check if correct alignment */ #define ERROR_CEIL(res, i, pow) \ (res % pow) || /* check if not aligned */ \ ((i % pow) == 0 ? /* check if ceiling is invoked */ \ val / pow != i / pow : /* if aligned */ \ val / pow != (i / pow) + 1) /* if not aligned, hence +1 */ uint32_t i, p, val; for (i = 1, p = 1; i <= MAX_NUM; i ++) { if (rte_align32pow2(i) != p) FAIL_ALIGN("rte_align32pow2", i, p); if (i == p) p <<= 1; } for (p = 2; p <= MAX_NUM; p <<= 1) { if (!rte_is_power_of_2(p)) FAIL("rte_is_power_of_2"); for (i = 1; i <= MAX_NUM; i++) { /* align floor */ if (RTE_ALIGN_FLOOR((uintptr_t)i, p) % p) FAIL_ALIGN("RTE_ALIGN_FLOOR", i, p); val = RTE_PTR_ALIGN_FLOOR((uintptr_t) i, p); if (ERROR_FLOOR(val, i, p)) FAIL_ALIGN("RTE_PTR_ALIGN_FLOOR", i, p); val = RTE_ALIGN_FLOOR(i, p); if (ERROR_FLOOR(val, i, p)) FAIL_ALIGN("RTE_ALIGN_FLOOR", i, p); /* align ceiling */ val = RTE_PTR_ALIGN((uintptr_t) i, p); if (ERROR_CEIL(val, i, p)) FAIL_ALIGN("RTE_PTR_ALIGN", i, p); val = RTE_ALIGN(i, p); if (ERROR_CEIL(val, i, p)) FAIL_ALIGN("RTE_ALIGN", i, p); val = RTE_ALIGN_CEIL(i, p); if (ERROR_CEIL(val, i, p)) FAIL_ALIGN("RTE_ALIGN_CEIL", i, p); val = RTE_PTR_ALIGN_CEIL((uintptr_t)i, p); if (ERROR_CEIL(val, i, p)) FAIL_ALIGN("RTE_PTR_ALIGN_CEIL", i, p); /* by this point we know that val is aligned to p */ if (!rte_is_aligned((void*)(uintptr_t) val, p)) FAIL("rte_is_aligned"); } } return 0; }
static int test_memzone_reserve_max_aligned(void) { const struct rte_memzone *mz; const struct rte_config *config; const struct rte_memseg *ms; int memseg_idx = 0; int memzone_idx = 0; uintptr_t addr_offset; size_t len = 0; void* last_addr; size_t maxlen = 0; /* random alignment */ rte_srand((unsigned)rte_rdtsc()); const unsigned align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */ /* get pointer to global configuration */ config = rte_eal_get_configuration(); ms = rte_eal_get_physmem_layout(); addr_offset = 0; for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){ /* ignore smaller memsegs as they can only get smaller */ if (ms[memseg_idx].len < maxlen) continue; /* align everything */ last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, RTE_CACHE_LINE_SIZE); len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr); len &= ~((size_t) RTE_CACHE_LINE_MASK); /* cycle through all memzones */ for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) { /* stop when reaching last allocated memzone */ if (config->mem_config->memzone[memzone_idx].addr == NULL) break; /* check if the memzone is in our memseg and subtract length */ if ((config->mem_config->memzone[memzone_idx].addr >= ms[memseg_idx].addr) && (config->mem_config->memzone[memzone_idx].addr < (RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) { /* since the zones can now be aligned and occasionally skip * some space, we should calculate the length based on * reported length and start addresses difference. */ len -= (uintptr_t) RTE_PTR_SUB( config->mem_config->memzone[memzone_idx].addr, (uintptr_t) last_addr); len -= config->mem_config->memzone[memzone_idx].len; last_addr = RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr, (size_t) config->mem_config->memzone[memzone_idx].len); } } /* make sure we get the alignment offset */ if (len > maxlen) { addr_offset = RTE_PTR_ALIGN_CEIL((uintptr_t) last_addr, align) - (uintptr_t) last_addr; maxlen = len; } } if (maxlen == 0 || maxlen == addr_offset) { printf("There is no space left for biggest %u-aligned memzone!\n", align); return 0; } maxlen -= addr_offset; mz = rte_memzone_reserve_aligned("max_zone_aligned", 0, SOCKET_ID_ANY, 0, align); if (mz == NULL){ printf("Failed to reserve a big chunk of memory\n"); rte_dump_physmem_layout(stdout); rte_memzone_dump(stdout); return -1; } if (mz->len != maxlen) { printf("Memzone reserve with 0 size and alignment %u did not return" " bigest block\n", align); printf("Expected size = %zu, actual size = %zu\n", maxlen, mz->len); rte_dump_physmem_layout(stdout); rte_memzone_dump(stdout); return -1; } return 0; }
static int test_memzone_reserve_max(void) { const struct rte_memzone *mz; const struct rte_config *config; const struct rte_memseg *ms; int memseg_idx = 0; int memzone_idx = 0; size_t len = 0; void* last_addr; size_t maxlen = 0; /* get pointer to global configuration */ config = rte_eal_get_configuration(); ms = rte_eal_get_physmem_layout(); for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){ /* ignore smaller memsegs as they can only get smaller */ if (ms[memseg_idx].len < maxlen) continue; /* align everything */ last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, RTE_CACHE_LINE_SIZE); len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr); len &= ~((size_t) RTE_CACHE_LINE_MASK); /* cycle through all memzones */ for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) { /* stop when reaching last allocated memzone */ if (config->mem_config->memzone[memzone_idx].addr == NULL) break; /* check if the memzone is in our memseg and subtract length */ if ((config->mem_config->memzone[memzone_idx].addr >= ms[memseg_idx].addr) && (config->mem_config->memzone[memzone_idx].addr < (RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) { /* since the zones can now be aligned and occasionally skip * some space, we should calculate the length based on * reported length and start addresses difference. Addresses * are allocated sequentially so we don't need to worry about * them being in the right order. */ len -= RTE_PTR_DIFF( config->mem_config->memzone[memzone_idx].addr, last_addr); len -= config->mem_config->memzone[memzone_idx].len; last_addr = RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr, (size_t) config->mem_config->memzone[memzone_idx].len); } } /* we don't need to calculate offset here since length * is always cache-aligned */ if (len > maxlen) maxlen = len; } if (maxlen == 0) { printf("There is no space left!\n"); return 0; } mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0); if (mz == NULL){ printf("Failed to reserve a big chunk of memory\n"); rte_dump_physmem_layout(stdout); rte_memzone_dump(stdout); return -1; } if (mz->len != maxlen) { printf("Memzone reserve with 0 size did not return bigest block\n"); printf("Expected size = %zu, actual size = %zu\n", maxlen, mz->len); rte_dump_physmem_layout(stdout); rte_memzone_dump(stdout); return -1; } return 0; }