static const struct odp_mm_district *mm_district_reserve_aligned( const char *name, const char *orig_name, size_t len, int socket_id, unsigned flags, unsigned align, unsigned bound) { struct odp_sys_layout *mcfg; unsigned i = 0; int mmfrag_idx = -1; uint64_t addr_offset, seg_offset = 0; size_t requested_len; size_t mmfrag_len = 0; phys_addr_t mmfrag_physaddr; void *mmfrag_addr; struct odp_mm_district *md = NULL; /* get pointer to global configuration */ mcfg = odp_get_configuration()->sys_layout; /* no more room in config */ if (mcfg->mm_district_idx >= ODP_MAX_MM_DISTRICT) { ODP_ERR("%s: No more room in config\n", name); odp_err = ENOSPC; return NULL; } /* zone already exist */ if (mm_district_lookup(name)) { ODP_ERR("mm_district <%s> already exists\n", name); odp_err = EEXIST; return NULL; } if (!orig_name) { ODP_ERR("Invalid param: orig_name\n"); odp_err = EINVAL; return NULL; } md = free_mm_district_lookup(orig_name); if (md) if (len <= md->len) { free_mm_district_fetch(md); return md; } /* if alignment is not a power of two */ if (align && !odp_is_power_of_2(align)) { ODP_ERR("Invalid alignment: %u\n", align); odp_err = EINVAL; return NULL; } /* alignment less than cache size is not allowed */ if (align < ODP_CACHE_LINE_SIZE) align = ODP_CACHE_LINE_SIZE; /* align length on cache boundary. Check for overflow before doing so */ if (len > MEM_SIZE_MAX - ODP_CACHE_LINE_MASK) { odp_err = EINVAL; /* requested size too big */ return NULL; } len += ODP_CACHE_LINE_MASK; len &= ~((size_t)ODP_CACHE_LINE_MASK); /* save minimal requested length */ requested_len = ODP_MAX((size_t)ODP_CACHE_LINE_SIZE, len); /* check that boundary condition is valid */ if ((bound != 0) && ((requested_len > bound) || !odp_is_power_of_2(bound))) { odp_err = EINVAL; return NULL; } /* find the smallest segment matching requirements */ for (i = 0; i < ODP_MAX_MMFRAG; i++) { /* last segment */ if (!free_mmfrag[i].addr) break; /* empty segment, skip it */ if (free_mmfrag[i].len == 0) continue; /* bad socket ID */ if ((socket_id != SOCKET_ID_ANY) && (free_mmfrag[i].socket_id != SOCKET_ID_ANY) && (socket_id != free_mmfrag[i].socket_id)) continue; /* * calculate offset to closest alignment that * meets boundary conditions. */ addr_offset = align_phys_boundary(free_mmfrag + i, requested_len, align, bound); /* check len */ if ((requested_len + addr_offset) > free_mmfrag[i].len) continue; /* check flags for hugepage sizes */ if ((flags & ODP_MEMZONE_2MB) && (free_mmfrag[i].hugepage_sz == ODP_PGSIZE_1G)) continue; if ((flags & ODP_MEMZONE_1GB) && (free_mmfrag[i].hugepage_sz == ODP_PGSIZE_2M)) continue; if ((flags & ODP_MEMZONE_16MB) && (free_mmfrag[i].hugepage_sz == ODP_PGSIZE_16G)) continue; if ((flags & ODP_MEMZONE_16GB) && (free_mmfrag[i].hugepage_sz == ODP_PGSIZE_16M)) continue; /* this segment is the best until now */ if (mmfrag_idx == -1) { mmfrag_idx = i; mmfrag_len = free_mmfrag[i].len; seg_offset = addr_offset; } /* find the biggest contiguous zone */ else if (len == 0) { if (free_mmfrag[i].len > mmfrag_len) { mmfrag_idx = i; mmfrag_len = free_mmfrag[i].len; seg_offset = addr_offset; } } /* * find the smallest (we already checked that current * zone length is > len */ else if ((free_mmfrag[i].len + align < mmfrag_len) || ((free_mmfrag[i].len <= mmfrag_len + align) && (addr_offset < seg_offset))) { mmfrag_idx = i; mmfrag_len = free_mmfrag[i].len; seg_offset = addr_offset; } } /* no segment found */ if (mmfrag_idx == -1) { /* * If ODP_MEMZONE_SIZE_HINT_ONLY flag is specified, * try allocating again without the size parameter * otherwise -fail. */ if ((flags & ODP_MEMZONE_SIZE_HINT_ONLY) && ((flags & ODP_MEMZONE_1GB) || (flags & ODP_MEMZONE_2MB) || (flags & ODP_MEMZONE_16MB) || (flags & ODP_MEMZONE_16GB))) return mm_district_reserve_aligned(name, orig_name, len, socket_id, 0, align, bound); odp_err = ENOMEM; return NULL; } /* save aligned physical and virtual addresses */ mmfrag_physaddr = free_mmfrag[mmfrag_idx].phys_addr + seg_offset; mmfrag_addr = ODP_PTR_ADD(free_mmfrag[mmfrag_idx].addr, (uintptr_t)seg_offset); /* if we are looking for a biggest mm_district */ if (len == 0) { if (bound == 0) requested_len = mmfrag_len - seg_offset; else requested_len = ODP_ALIGN_CEIL(mmfrag_physaddr + 1, bound) - mmfrag_physaddr; } /* set length to correct value */ len = (size_t)seg_offset + requested_len; /* update our internal state */ free_mmfrag[mmfrag_idx].len -= len; free_mmfrag[mmfrag_idx].phys_addr += len; free_mmfrag[mmfrag_idx].addr = (char *)free_mmfrag[mmfrag_idx].addr + len; /* fill the zone in config */ struct odp_mm_district *mz = &mcfg->mm_district[mcfg->mm_district_idx++]; snprintf(mz->orig_name, sizeof(mz->orig_name), "%s", orig_name); snprintf(mz->name, sizeof(mz->name), "%s", name); mz->phys_addr = mmfrag_physaddr; mz->phys_addr_end = mmfrag_physaddr + requested_len; mz->excursion_addr = mmfrag_addr - mmfrag_physaddr; mz->addr = mmfrag_addr; mz->len = requested_len; mz->hugepage_sz = free_mmfrag[mmfrag_idx].hugepage_sz; mz->socket_id = free_mmfrag[mmfrag_idx].socket_id; mz->flags = 0; mz->mmfrag_id = mmfrag_idx; return mz; }
static const struct rte_memzone * memzone_reserve_aligned_thread_unsafe(const char *name, size_t len, int socket_id, unsigned flags, unsigned align, unsigned bound) { struct rte_mem_config *mcfg; unsigned i = 0; int memseg_idx = -1; uint64_t addr_offset, seg_offset = 0; size_t requested_len; size_t memseg_len = 0; phys_addr_t memseg_physaddr; void *memseg_addr; /* get pointer to global configuration */ mcfg = rte_eal_get_configuration()->mem_config; /* no more room in config */ if (mcfg->memzone_idx >= RTE_MAX_MEMZONE) { RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__); rte_errno = ENOSPC; return NULL; } /* zone already exist */ if ((memzone_lookup_thread_unsafe(name)) != NULL) { RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n", __func__, name); rte_errno = EEXIST; return NULL; } /* if alignment is not a power of two */ if (align && !rte_is_power_of_2(align)) { RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__, align); rte_errno = EINVAL; return NULL; } /* alignment less than cache size is not allowed */ if (align < RTE_CACHE_LINE_SIZE) align = RTE_CACHE_LINE_SIZE; /* align length on cache boundary. Check for overflow before doing so */ if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) { rte_errno = EINVAL; /* requested size too big */ return NULL; } len += RTE_CACHE_LINE_MASK; len &= ~((size_t) RTE_CACHE_LINE_MASK); /* save minimal requested length */ requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len); /* check that boundary condition is valid */ if (bound != 0 && (requested_len > bound || !rte_is_power_of_2(bound))) { rte_errno = EINVAL; return NULL; } /* find the smallest segment matching requirements */ for (i = 0; i < RTE_MAX_MEMSEG; i++) { /* last segment */ if (free_memseg[i].addr == NULL) break; /* empty segment, skip it */ if (free_memseg[i].len == 0) continue; /* bad socket ID */ if (socket_id != SOCKET_ID_ANY && free_memseg[i].socket_id != SOCKET_ID_ANY && socket_id != free_memseg[i].socket_id) continue; /* * calculate offset to closest alignment that * meets boundary conditions. */ addr_offset = align_phys_boundary(free_memseg + i, requested_len, align, bound); /* check len */ if ((requested_len + addr_offset) > free_memseg[i].len) continue; /* check flags for hugepage sizes */ if ((flags & RTE_MEMZONE_2MB) && free_memseg[i].hugepage_sz == RTE_PGSIZE_1G) continue; if ((flags & RTE_MEMZONE_1GB) && free_memseg[i].hugepage_sz == RTE_PGSIZE_2M) continue; if ((flags & RTE_MEMZONE_16MB) && free_memseg[i].hugepage_sz == RTE_PGSIZE_16G) continue; if ((flags & RTE_MEMZONE_16GB) && free_memseg[i].hugepage_sz == RTE_PGSIZE_16M) continue; /* this segment is the best until now */ if (memseg_idx == -1) { memseg_idx = i; memseg_len = free_memseg[i].len; seg_offset = addr_offset; } /* find the biggest contiguous zone */ else if (len == 0) { if (free_memseg[i].len > memseg_len) { memseg_idx = i; memseg_len = free_memseg[i].len; seg_offset = addr_offset; } } /* * find the smallest (we already checked that current * zone length is > len */ else if (free_memseg[i].len + align < memseg_len || (free_memseg[i].len <= memseg_len + align && addr_offset < seg_offset)) { memseg_idx = i; memseg_len = free_memseg[i].len; seg_offset = addr_offset; } } /* no segment found */ if (memseg_idx == -1) { /* * If RTE_MEMZONE_SIZE_HINT_ONLY flag is specified, * try allocating again without the size parameter otherwise -fail. */ if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) && ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB) || (flags & RTE_MEMZONE_16MB) || (flags & RTE_MEMZONE_16GB))) return memzone_reserve_aligned_thread_unsafe(name, len, socket_id, 0, align, bound); rte_errno = ENOMEM; return NULL; } /* save aligned physical and virtual addresses */ memseg_physaddr = free_memseg[memseg_idx].phys_addr + seg_offset; memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr, (uintptr_t) seg_offset); /* if we are looking for a biggest memzone */ if (len == 0) { if (bound == 0) requested_len = memseg_len - seg_offset; else requested_len = RTE_ALIGN_CEIL(memseg_physaddr + 1, bound) - memseg_physaddr; } /* set length to correct value */ len = (size_t)seg_offset + requested_len; /* update our internal state */ free_memseg[memseg_idx].len -= len; free_memseg[memseg_idx].phys_addr += len; free_memseg[memseg_idx].addr = (char *)free_memseg[memseg_idx].addr + len; /* fill the zone in config */ struct rte_memzone *mz = &mcfg->memzone[mcfg->memzone_idx++]; snprintf(mz->name, sizeof(mz->name), "%s", name); mz->phys_addr = memseg_physaddr; mz->addr = memseg_addr; mz->len = requested_len; mz->hugepage_sz = free_memseg[memseg_idx].hugepage_sz; mz->socket_id = free_memseg[memseg_idx].socket_id; mz->flags = 0; mz->memseg_id = memseg_idx; return mz; }