// Callback from device manager static void idc_queue_terminated(struct e10k_binding *b) { errval_t err; INITDEBUG("idc_queue_terminated()\n"); // Free memory for hardware ring buffers err = vspace_unmap(q->tx_ring); assert(err_is_ok(err)); err = vspace_unmap(q->rx_ring); assert(err_is_ok(err)); err = cap_delete(tx_frame); assert(err_is_ok(err)); err = cap_delete(rx_frame); assert(err_is_ok(err)); if (!capref_is_null(txhwb_frame)) { err = vspace_unmap(q->tx_hwb); assert(err_is_ok(err)); err = cap_delete(txhwb_frame); assert(err_is_ok(err)); } exit(0); }
static errval_t mp_create(struct descq_binding* b, uint32_t slots, struct capref rx, struct capref tx, bool notifications, uint8_t role, errval_t *err, uint64_t *queue_id) { struct descq* q = (struct descq*) b->st; DESCQ_DEBUG("start %p\n",q); // switch RX/TX for correct setup *err = vspace_map_one_frame_attr((void**) &(q->rx_descs), slots*DESCQ_ALIGNMENT, tx, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(*err)) { goto end2; } *err = vspace_map_one_frame_attr((void**) &(q->tx_descs), slots*DESCQ_ALIGNMENT, rx, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(*err)) { goto end1; } q->tx_seq_ack = (void*)q->tx_descs; q->rx_seq_ack = (void*)q->rx_descs; q->tx_descs++; q->rx_descs++; q->slots = slots-1; q->rx_seq = 1; q->tx_seq = 1; devq_init(&q->q, true); q->q.f.enq = descq_enqueue; q->q.f.deq = descq_dequeue; q->q.f.notify = descq_notify; q->q.f.reg = descq_register; q->q.f.dereg = descq_deregister; q->q.f.ctrl = descq_control; q->q.f.destroy = descq_destroy; notificator_init(&q->notificator, q, descq_can_read, descq_can_write); *err = waitset_chan_register(get_default_waitset(), &q->notificator.ready_to_read, MKCLOSURE(mp_notify, q)); assert(err_is_ok(*err)); *err = q->f.create(q, notifications, role, queue_id); if (err_is_ok(*err)) { goto end2; } end1: *err = vspace_unmap(q->rx_descs); assert(err_is_ok(*err)); end2: DESCQ_DEBUG("end \n"); return SYS_ERR_OK; }
/** * @brief Destroys a descriptor queue and frees its resources * * @param que The descriptor queue * * @returns error on failure or SYS_ERR_OK on success */ static errval_t descq_destroy(struct devq* que) { errval_t err; struct descq* q = (struct descq*) que; err = vspace_unmap(q->tx_descs); if (err_is_fail(err)) { return err; } err = vspace_unmap(q->rx_descs); if (err_is_fail(err)) { return err; } free(q->name); free(q); return SYS_ERR_OK; }
static void terminate_queue(struct net_queue_manager_binding *cc) { errval_t err; struct buffer_descriptor *buffer; // Free buffers for (buffer = buffers_list; buffer != NULL; buffer = buffer->next) { err = vspace_unmap(buffer->va); assert(err_is_ok(err)); err = cap_delete(buffer->cap); assert(err_is_ok(err)); } assert(ether_terminate_queue_ptr != NULL); ether_terminate_queue_ptr(); }
/** * \brief frees up the resources used by the ring. * * \param ring the descriptor ring to be freed * * \returns SYS_ERR_OK on success */ errval_t xeon_phi_dma_desc_ring_free(struct xdma_ring *ring) { errval_t err; if (capref_is_null(ring->cap)) { return SYS_ERR_OK; } if (ring->vbase) { vspace_unmap(ring->vbase); } err = cap_revoke(ring->cap); if (err_is_fail(err)) { DEBUG_ERR(err, "revokation of ring cap failed\n"); } return cap_destroy(ring->cap); }
/** * \brief tries to free the allocated memory region * * \returns SYS_ERR_OK on success * errval on error */ errval_t dma_mem_free(struct dma_mem *mem) { errval_t err; if (mem->vaddr) { err = vspace_unmap((void*)mem->vaddr); if (err_is_fail(err)) { /* todo: error handling ignoring for now */ } } if (!capref_is_null(mem->frame)) { err = cap_destroy(mem->frame); if (err_is_fail(err)) { /* todo: error handling ignoring for now */ } } memset(mem, 0, sizeof(*mem)); return SYS_ERR_OK; }
errval_t spawn_xcore_monitor(coreid_t coreid, int hwid, enum cpu_type cpu_type, const char *cmdline, struct frame_identity urpc_frame_id, struct capref kcb) { uint64_t start = 0; const char *monitorname = NULL, *cpuname = NULL; genpaddr_t arch_page_size; errval_t err; err = get_architecture_config(cpu_type, &arch_page_size, &monitorname, &cpuname); assert(err_is_ok(err)); DEBUG("loading kernel: %s\n", cpuname); DEBUG("loading 1st app: %s\n", monitorname); // compute size of frame needed and allocate it DEBUG("%s:%s:%d: urpc_frame_id.base=%"PRIxGENPADDR"\n", __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.base); DEBUG("%s:%s:%d: urpc_frame_id.size=%d\n", __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.bits); if (benchmark_flag) { start = bench_tsc(); } static size_t cpu_binary_size; static lvaddr_t cpu_binary = 0; static genpaddr_t cpu_binary_phys; static const char* cached_cpuname = NULL; if (cpu_binary == 0) { cached_cpuname = cpuname; // XXX: Caching these for now, until we have unmap err = lookup_module(cpuname, &cpu_binary, &cpu_binary_phys, &cpu_binary_size); if (err_is_fail(err)) { DEBUG_ERR(err, "Can not lookup module"); return err; } } // Ensure caching actually works and we're // always loading same binary. If this starts to fail, get rid of caching. assert (strcmp(cached_cpuname, cpuname) == 0); static size_t monitor_binary_size; static lvaddr_t monitor_binary = 0; static genpaddr_t monitor_binary_phys; static const char* cached_monitorname = NULL; if (monitor_binary == 0) { cached_monitorname = monitorname; // XXX: Caching these for now, until we have unmap err = lookup_module(monitorname, &monitor_binary, &monitor_binary_phys, &monitor_binary_size); if (err_is_fail(err)) { DEBUG_ERR(err, "Can not lookup module"); return err; } } // Again, ensure caching actually worked (see above) assert (strcmp(cached_monitorname, monitorname) == 0); if (benchmark_flag) { bench_data->load = bench_tsc() - start; start = bench_tsc(); } struct capref cpu_memory_cap; struct frame_identity frameid; size_t cpu_memory; err = allocate_kernel_memory(cpu_binary, arch_page_size, &cpu_memory_cap, &cpu_memory, &frameid); if (err_is_fail(err)) { DEBUG_ERR(err, "Can not allocate space for new app kernel."); return err; } err = cap_mark_remote(cpu_memory_cap); if (err_is_fail(err)) { DEBUG_ERR(err, "Can not mark cap remote."); return err; } void *cpu_buf_memory; err = vspace_map_one_frame(&cpu_buf_memory, cpu_memory, cpu_memory_cap, NULL, NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } if (benchmark_flag) { bench_data->alloc_cpu = bench_tsc() - start; start = bench_tsc(); } /* Chunk of memory to load monitor on the app core */ struct capref spawn_memory_cap; struct frame_identity spawn_memory_identity; err = frame_alloc_identify(&spawn_memory_cap, X86_CORE_DATA_PAGES * arch_page_size, NULL, &spawn_memory_identity); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_ALLOC); } err = cap_mark_remote(spawn_memory_cap); if (err_is_fail(err)) { DEBUG_ERR(err, "Can not mark cap remote."); return err; } if (benchmark_flag) { bench_data->alloc_mon = bench_tsc() - start; start = bench_tsc(); } /* Load cpu */ struct elf_allocate_state state; state.vbase = (char *)cpu_buf_memory + arch_page_size; assert(sizeof(struct x86_core_data) <= arch_page_size); state.elfbase = elf_virtual_base(cpu_binary); struct Elf64_Ehdr *cpu_head = (struct Elf64_Ehdr *)cpu_binary; genvaddr_t cpu_entry; err = elf_load(cpu_head->e_machine, elfload_allocate, &state, cpu_binary, cpu_binary_size, &cpu_entry); if (err_is_fail(err)) { return err; } if (benchmark_flag) { bench_data->elf_load = bench_tsc() - start; start = bench_tsc(); } err = relocate_cpu_binary(cpu_binary, cpu_head, state, frameid, arch_page_size); if (err_is_fail(err)) { DEBUG_ERR(err, "Can not relocate new kernel."); return err; } if (benchmark_flag) { bench_data->elf_reloc = bench_tsc() - start; } genvaddr_t cpu_reloc_entry = cpu_entry - state.elfbase + frameid.base + arch_page_size; /* Compute entry point in the foreign address space */ forvaddr_t foreign_cpu_reloc_entry = (forvaddr_t)cpu_reloc_entry; /* Setup the core_data struct in the new kernel */ struct x86_core_data *core_data = (struct x86_core_data *)cpu_buf_memory; switch (cpu_head->e_machine) { case EM_X86_64: case EM_K1OM: core_data->elf.size = sizeof(struct Elf64_Shdr); core_data->elf.addr = cpu_binary_phys + (uintptr_t)cpu_head->e_shoff; core_data->elf.num = cpu_head->e_shnum; break; case EM_386: core_data->elf.size = sizeof(struct Elf32_Shdr); struct Elf32_Ehdr *head32 = (struct Elf32_Ehdr *)cpu_binary; core_data->elf.addr = cpu_binary_phys + (uintptr_t)head32->e_shoff; core_data->elf.num = head32->e_shnum; break; default: return SPAWN_ERR_UNKNOWN_TARGET_ARCH; } core_data->module_start = cpu_binary_phys; core_data->module_end = cpu_binary_phys + cpu_binary_size; core_data->urpc_frame_base = urpc_frame_id.base; core_data->urpc_frame_bits = urpc_frame_id.bits; core_data->monitor_binary = monitor_binary_phys; core_data->monitor_binary_size = monitor_binary_size; core_data->memory_base_start = spawn_memory_identity.base; core_data->memory_bits = spawn_memory_identity.bits; core_data->src_core_id = disp_get_core_id(); core_data->src_arch_id = my_arch_id; core_data->dst_core_id = coreid; struct frame_identity fid; err = invoke_frame_identify(kcb, &fid); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Invoke frame identity for KCB failed. " "Did you add the syscall handler for that architecture?"); } DEBUG("%s:%s:%d: fid.base is 0x%"PRIxGENPADDR"\n", __FILE__, __FUNCTION__, __LINE__, fid.base); core_data->kcb = (genpaddr_t) fid.base; #ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI core_data->chan_id = chanid; #endif if (cmdline != NULL) { // copy as much of command line as will fit snprintf(core_data->kernel_cmdline, sizeof(core_data->kernel_cmdline), "%s %s", cpuname, cmdline); // ensure termination core_data->kernel_cmdline[sizeof(core_data->kernel_cmdline) - 1] = '\0'; DEBUG("%s:%s:%d: %s\n", __FILE__, __FUNCTION__, __LINE__, core_data->kernel_cmdline); } /* Invoke kernel capability to boot new core */ if (cpu_type == CPU_X86_64 || cpu_type == CPU_K1OM) { start_aps_x86_64_start(hwid, foreign_cpu_reloc_entry); } #ifndef __k1om__ else if (cpu_type == CPU_X86_32) { start_aps_x86_32_start(hwid, foreign_cpu_reloc_entry); } #endif /* Clean up */ // XXX: Should not delete the remote caps? err = cap_destroy(spawn_memory_cap); if (err_is_fail(err)) { USER_PANIC_ERR(err, "cap_destroy failed"); } err = vspace_unmap(cpu_buf_memory); if (err_is_fail(err)) { USER_PANIC_ERR(err, "vspace unmap CPU driver memory failed"); } err = cap_destroy(cpu_memory_cap); if (err_is_fail(err)) { USER_PANIC_ERR(err, "cap_destroy failed"); } return SYS_ERR_OK; }
/** * \brief initializes a IOAT DMA device with the giving capability * * \param mmio capability representing the device's MMIO registers * \param dev returns a pointer to the device structure * * \returns SYS_ERR_OK on success * errval on error */ errval_t ioat_dma_device_init(struct capref mmio, struct ioat_dma_device **dev) { errval_t err; struct ioat_dma_device *ioat_device = calloc(1, sizeof(*ioat_device)); if (ioat_device == NULL) { return LIB_ERR_MALLOC_FAIL; } #if DMA_BENCH_ENABLED bench_init(); #endif struct dma_device *dma_dev = &ioat_device->common; struct frame_identity mmio_id; err = invoke_frame_identify(mmio, &mmio_id); if (err_is_fail(err)) { free(ioat_device); return err; } dma_dev->id = device_id++; dma_dev->mmio.paddr = mmio_id.base; dma_dev->mmio.bytes = (1UL << mmio_id.bits); dma_dev->mmio.frame = mmio; IOATDEV_DEBUG("init device with mmio range: {paddr=0x%016lx, size=%u kB}\n", dma_dev->id, mmio_id.base, 1 << mmio_id.bits); err = vspace_map_one_frame_attr((void**) &dma_dev->mmio.vaddr, dma_dev->mmio.bytes, dma_dev->mmio.frame, VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL); if (err_is_fail(err)) { free(ioat_device); return err; } ioat_dma_initialize(&ioat_device->device, NULL, (void *) dma_dev->mmio.vaddr); ioat_device->version = ioat_dma_cbver_rd(&ioat_device->device); IOATDEV_DEBUG("device registers mapped at 0x%016lx. IOAT version: %u.%u\n", dma_dev->id, dma_dev->mmio.vaddr, ioat_dma_cbver_major_extract(ioat_device->version), ioat_dma_cbver_minor_extract(ioat_device->version)); switch (ioat_dma_cbver_major_extract(ioat_device->version)) { case ioat_dma_cbver_1x: err = device_init_ioat_v1(ioat_device); break; case ioat_dma_cbver_2x: err = device_init_ioat_v2(ioat_device); break; case ioat_dma_cbver_3x: err = device_init_ioat_v3(ioat_device); break; default: err = DMA_ERR_DEVICE_UNSUPPORTED; } if (err_is_fail(err)) { vspace_unmap((void*) dma_dev->mmio.vaddr); free(ioat_device); return err; } dma_dev->f.deregister_memory = NULL; dma_dev->f.register_memory = NULL; dma_dev->f.poll = ioat_dma_device_poll_channels; *dev = ioat_device; return err; }
/** * @brief initialized a descriptor queue */ errval_t descq_create(struct descq** q, size_t slots, char* name, bool exp, bool notifications, uint8_t role, uint64_t *queue_id, struct descq_func_pointer* f) { DESCQ_DEBUG("create start\n"); errval_t err; struct descq* tmp; struct capref rx; struct capref tx; // Init basic struct fields tmp = malloc(sizeof(struct descq)); assert(tmp != NULL); tmp->name = strdup(name); assert(tmp->name != NULL); if (exp) { // exporting struct descq_endpoint_state* state = malloc(sizeof(struct descq_endpoint_state)); state->name = strdup(name); assert(state->name); state->f.notify = f->notify; state->f.dereg = f->dereg; state->f.reg = f->reg; state->f.create = f->create; state->f.destroy = f->destroy; state->f.control = f->control; err = descq_export(state, export_cb, connect_cb, get_default_waitset(), IDC_BIND_FLAGS_DEFAULT); if (err_is_fail(err)) { goto cleanup1; } while(!state->exp_done) { event_dispatch(get_default_waitset()); } } else { tmp->f.notify = f->notify; tmp->f.dereg = f->dereg; tmp->f.reg = f->reg; tmp->f.create = f->create; tmp->f.destroy = f->destroy; tmp->f.control = f->control; size_t bytes; err = frame_alloc(&rx, DESCQ_ALIGNMENT*slots, &bytes); if (err_is_fail(err)) { goto cleanup1; } assert(bytes >= DESCQ_ALIGNMENT*slots); err = frame_alloc(&tx, DESCQ_ALIGNMENT*slots, &bytes); if (err_is_fail(err)) { goto cleanup2; } assert(bytes >= DESCQ_ALIGNMENT*slots); err = vspace_map_one_frame_attr((void**) &(tmp->rx_descs), slots*DESCQ_ALIGNMENT, rx, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(err)) { goto cleanup3; } err = vspace_map_one_frame_attr((void**) &(tmp->tx_descs), slots*DESCQ_ALIGNMENT, tx, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(err)) { goto cleanup4; } memset(tmp->tx_descs, 0, slots*DESCQ_ALIGNMENT); memset(tmp->rx_descs, 0, slots*DESCQ_ALIGNMENT); tmp->bound_done = false; iref_t iref; err = nameservice_blocking_lookup(name, &iref); if (err_is_fail(err)) { goto cleanup5; } err = descq_bind(iref, bind_cb, tmp, get_default_waitset(), IDC_BIND_FLAGS_DEFAULT); if (err_is_fail(err)) { goto cleanup5; } while(!tmp->bound_done) { event_dispatch(get_default_waitset()); } tmp->local_bind = tmp->binding->local_binding != NULL; errval_t err2; err = tmp->binding->rpc_tx_vtbl.create_queue(tmp->binding, slots, rx, tx, notifications, role, &err2, queue_id); if (err_is_fail(err) || err_is_fail(err2)) { err = err_is_fail(err) ? err: err2; goto cleanup5; } tmp->tx_seq_ack = (void*)tmp->tx_descs; tmp->rx_seq_ack = (void*)tmp->rx_descs; tmp->tx_seq_ack->value = 0; tmp->rx_seq_ack->value = 0; tmp->tx_descs++; tmp->rx_descs++; tmp->slots = slots-1; tmp->rx_seq = 1; tmp->tx_seq = 1; devq_init(&tmp->q, false); tmp->q.f.enq = descq_enqueue; tmp->q.f.deq = descq_dequeue; tmp->q.f.notify = descq_notify; tmp->q.f.reg = descq_register; tmp->q.f.dereg = descq_deregister; tmp->q.f.ctrl = descq_control; tmp->notifications = notifications; notificator_init(&tmp->notificator, tmp, descq_can_read, descq_can_write); err = waitset_chan_register(get_default_waitset(), &tmp->notificator.ready_to_read, MKCLOSURE(mp_notify, tmp)); assert(err_is_ok(err)); } *q = tmp; DESCQ_DEBUG("create end %p \n", *q); return SYS_ERR_OK; cleanup5: vspace_unmap(tmp->rx_descs); cleanup4: vspace_unmap(tmp->rx_descs); cleanup3: cap_destroy(tx); cleanup2: cap_destroy(rx); cleanup1: free(tmp->name); free(tmp); return err; }
/// Map in the frame caps for a module into our vspace, return their location errval_t spawn_map_module(struct mem_region *module, size_t *retsize, lvaddr_t *retaddr, genpaddr_t *retpaddr) { assert(module != NULL); assert(module->mr_type == RegionType_Module); errval_t err; size_t size = module->mrmod_size; void *base; struct memobj *memobj; struct vregion *vregion; err = vspace_map_anon_attr(&base, &memobj, &vregion, size, &size, VREGION_FLAGS_READ); if (err_is_fail(err)) { return err_push(err, LIB_ERR_VSPACE_MAP); } struct capref frame = { .cnode = cnode_module, .slot = module->mrmod_slot, }; if (retpaddr != NULL) { *retpaddr = module->mr_base; } if (retsize != NULL) { *retsize = size; } if (retaddr != NULL) { *retaddr = (lvaddr_t)base; } size_t offset = 0; while (size > 0) { assert((size & BASE_PAGE_MASK) == 0); struct frame_identity id; err = invoke_frame_identify(frame, &id); assert(err_is_ok(err)); err = memobj->f.fill(memobj, offset, frame, 1UL << id.bits); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_FILL); } err = memobj->f.pagefault(memobj, vregion, offset, 0); if (err_is_fail(err)) { return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER); } frame.slot ++; size -= (1UL << id.bits); offset += (1UL << id.bits); } return SYS_ERR_OK; } errval_t spawn_unmap_module(lvaddr_t mapped_addr) { return vspace_unmap((void *)mapped_addr); } /// Returns a raw pointer to the modules string area string const char *multiboot_module_rawstring(struct mem_region *region) { if (multiboot_strings == NULL) { errval_t err; /* Map in multiboot module strings area */ struct capref mmstrings_cap = { .cnode = cnode_module, .slot = 0 }; err = vspace_map_one_frame_attr((void**)&multiboot_strings, BASE_PAGE_SIZE, mmstrings_cap, VREGION_FLAGS_READ, NULL, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "vspace_map failed"); return NULL; } #if 0 printf("Mapped multiboot_strings at %p\n", multiboot_strings); for (int i = 0; i < 256; i++) { if ((i & 15) == 0) printf("%04x ", i); printf ("%02x ", multiboot_strings[i]& 0xff); if ((i & 15) == 15) printf("\n"); } #endif } if (region == NULL || region->mr_type != RegionType_Module) { return NULL; } return multiboot_strings + region->mrmod_data; } errval_t multiboot_cleanup_mapping(void) { errval_t err = vspace_unmap(multiboot_strings); if (err_is_fail(err)) { DEBUG_ERR(err, "multiboot_cleanup_mapping: vspace_unmap() failed\n"); return err_push(err, LIB_ERR_VSPACE_REMOVE_REGION); } multiboot_strings = NULL; return SYS_ERR_OK; }