/** * \brief allocates a frame on a specific node * * \param dest capref to store the frame * \param size size of the frame to allocated * \param node node on which the frame should be allocated * \param ret_size returned size of the frame capability * * \returns SYS_ERR_OK on SUCCESS * errval on FAILURE */ errval_t numa_frame_alloc_on_node(struct capref *dest, size_t size, nodeid_t node, size_t *ret_size) { errval_t err; NUMA_DEBUG_ALLOC("allocating frame on node %" PRIuNODEID "\n", node); uint64_t min_base, max_limit; ram_get_affinity(&min_base, &max_limit); if (node >= numa_topology.num_nodes) { return NUMA_ERR_NODEID_INVALID; } uint64_t node_base = numa_node_base(node); uint64_t node_limit = node_base + numa_node_size(node, NULL); NUMA_DEBUG_ALLOC("setting affinity to 0x%" PRIx64 "..0x%" PRIx64 "\n", node_base, node_limit); ram_set_affinity(node_base, node_limit); err = frame_alloc(dest, size, ret_size); ram_set_affinity(min_base, max_limit); NUMA_DEBUG_ALLOC("restore affinity to 0x%" PRIx64 "..0x%" PRIx64 "\n", min_base, max_limit); return err; }
void alloc_local(void) { errval_t err; #ifndef __k1om__ uint64_t minbase, maxlimit; ram_get_affinity(&minbase, &maxlimit); ram_set_affinity(XPHI_BENCH_RAM_MINBASE, XPHI_BENCH_RAM_MAXLIMIT); #endif size_t alloced_size = 0; err = frame_alloc(&local_frame, XPHI_BENCH_MSG_FRAME_SIZE, &alloced_size); EXPECT_SUCCESS(err, "frame_alloc"); #ifndef __k1om__ ram_set_affinity(minbase, maxlimit); #endif struct frame_identity id; err = invoke_frame_identify(local_frame, &id); EXPECT_SUCCESS(err, "invoke_frame_identify"); local_base = id.base; local_frame_sz = alloced_size; debug_printf("alloc_local | Frame base: %016lx, size=%lx\n", id.base, 1UL << id.bits); err = vspace_map_one_frame(&local_buf, alloced_size, local_frame, NULL, NULL); EXPECT_SUCCESS(err, "vspace_map_one_frame"); }
/** * \brief initializes a dma descriptor ring and allocates memory for it * * \param ring the ring structure to initialize * \param size number of elements in the ring * * \returns SYS_ERR_OK on success * errval on error */ errval_t xeon_phi_dma_desc_ring_alloc(struct xdma_ring *ring, uint16_t size) { errval_t err; memset(ring, 0, sizeof(*ring)); assert(size < (XEON_PHI_DMA_DESC_RING_MAX)); assert(IS_POW2(size)); #ifndef __k1om__ /* * we set the ram affinity to the maximum range mapped by the system memory * page tables when being on the host. Otherwise the card cannot access it. */ uint64_t minbase, maxlimit; ram_get_affinity(&minbase, &maxlimit); ram_set_affinity(0, XEON_PHI_SYSMEM_SIZE-8*XEON_PHI_SYSMEM_PAGE_SIZE); #endif size_t frame_size = ((size_t) size) * XEON_PHI_DMA_DESC_SIZE; err = frame_alloc(&ring->cap, frame_size, NULL); #ifndef __k1om__ ram_set_affinity(minbase, maxlimit); #endif if (err_is_fail(err)) { return err; } err = vspace_map_one_frame_attr(&ring->vbase, frame_size, ring->cap, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(err)) { cap_destroy(ring->cap); return err; } struct frame_identity id; err = invoke_frame_identify(ring->cap, &id); assert(err_is_ok(err)); ring->pbase = id.base; ring->size = size; memset(ring->vbase, 0, frame_size); return SYS_ERR_OK; }
/** * Allocates memory for kernel binary. * * For x86, the app kernel can only be loaded in the first 4GB * of memory. Further, it must not overlap the integer * boundaries, i.e. 0-1, 1-2, 2-3, or 3-4. * * Probably because we identity map this region during boot-phase * so we can't access anything higher. Not sure about overlap tough. */ static errval_t allocate_kernel_memory(lvaddr_t cpu_binary, genpaddr_t page_size, struct capref* cpu_memory_cap, size_t* cpu_memory, struct frame_identity* id) { errval_t err; #ifdef __scc__ *cpu_memory = X86_32_BASE_PAGE_SIZE; err = frame_alloc_identify(cpu_memory_cap, *cpu_memory, cpu_memory, id); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_ALLOC); } #else *cpu_memory = elf_virtual_size(cpu_binary) + page_size; uint64_t old_minbase; uint64_t old_maxlimit; ram_get_affinity(&old_minbase, &old_maxlimit); DEBUG("%s:%d: \n", __FILE__, __LINE__); for (uint64_t minbase = 0, maxlimit = (uint64_t)1 << 30; minbase < (uint64_t)4 << 30; minbase += (uint64_t)1 << 30, maxlimit += (uint64_t)1 << 30) { ram_set_affinity(minbase, maxlimit); err = frame_alloc_identify(cpu_memory_cap, *cpu_memory, cpu_memory, id); if (err_is_fail(err)) { continue; } else { goto done; } } USER_PANIC("No memory in the first 4GB, cannot continue booting cores"); done: ram_set_affinity(old_minbase, old_maxlimit); #endif return SYS_ERR_OK; }