/* Some more generic routines for helping with mapping */ void * sel4utils_dup_and_map(vka_t *vka, vspace_t *vspace, seL4_CPtr page, size_t size_bits) { int error; cspacepath_t page_path; cspacepath_t copy_path; void *mapping; /* First need to copy the cap */ error = vka_cspace_alloc_path(vka, ©_path); if (error != seL4_NoError) { return NULL; } vka_cspace_make_path(vka, page, &page_path); error = vka_cnode_copy(©_path, &page_path, seL4_AllRights); if (error != seL4_NoError) { vka_cspace_free(vka, copy_path.capPtr); return NULL; } /* Now map it in */ mapping = vspace_map_pages(vspace, ©_path.capPtr, NULL, seL4_AllRights, 1, size_bits, 1); if (!mapping) { vka_cnode_delete(©_path); vka_cspace_free(vka, copy_path.capPtr); return NULL; } return mapping; }
static void map_unity_ram(vm_t* vm) { /* Dimensions of physical memory that we'll use. Note that we do not map the entirety of RAM. */ static const uintptr_t paddr_start = RAM_BASE; static const uintptr_t paddr_end = 0x60000000; int err; uintptr_t start; reservation_t res; unsigned int bits = 21; res = vspace_reserve_range_at(&vm->vm_vspace, (void*)paddr_start, paddr_end - paddr_start, seL4_AllRights, 1); assert(res.res); for (start = paddr_start;; start += BIT(bits)) { cspacepath_t frame; err = vka_cspace_alloc_path(vm->vka, &frame); assert(!err); err = simple_get_frame_cap(vm->simple, (void*)start, bits, &frame); if (err) { vka_cspace_free(vm->vka, frame.capPtr); break; } err = vspace_map_pages_at_vaddr(&vm->vm_vspace, &frame.capPtr, &bits, (void*)start, 1, bits, res); assert(!err); } }
void sel4utils_unmap_dup(vka_t *vka, vspace_t *vspace, void *mapping, size_t size_bits) { /* Grap a copy of the cap */ seL4_CPtr copy = vspace_get_cap(vspace, mapping); cspacepath_t copy_path; assert(copy); /* now free the mapping */ vspace_unmap_pages(vspace, mapping, 1, size_bits, VSPACE_PRESERVE); /* delete and free the cap */ vka_cspace_make_path(vka, copy, ©_path); vka_cnode_delete(©_path); vka_cspace_free(vka, copy); }
void sel4utils_unmap_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits, vka_t *vka) { uintptr_t v = (uintptr_t) vaddr; sel4utils_alloc_data_t *data = get_alloc_data(vspace); sel4utils_res_t *reserve = find_reserve(data, v); if (!sel4_valid_size_bits(size_bits)) { ZF_LOGE("Invalid size_bits %zu", size_bits); return; } if (vka == VSPACE_FREE) { vka = data->vka; } for (int i = 0; i < num_pages; i++) { seL4_CPtr cap = get_cap(data->top_level, v); /* unmap */ if (cap != 0) { int error = seL4_ARCH_Page_Unmap(cap); if (error != seL4_NoError) { ZF_LOGE("Failed to unmap page at vaddr %p", vaddr); } } if (vka) { cspacepath_t path; vka_cspace_make_path(vka, cap, &path); vka_cnode_delete(&path); vka_cspace_free(vka, cap); if (sel4utils_get_cookie(vspace, vaddr)) { vka_utspace_free(vka, kobject_get_type(KOBJECT_FRAME, size_bits), size_bits, sel4utils_get_cookie(vspace, vaddr)); } } if (reserve == NULL) { clear_entries(vspace, v, size_bits); } else { reserve_entries(vspace, v, size_bits); } assert(get_cap(data->top_level, v) != cap); assert(get_cookie(data->top_level, v) == 0); v += (1 << size_bits); vaddr = (void *) v; } }
/* Binds and IRQ to an endpoint */ static seL4_CPtr irq_bind(irq_t irq, seL4_CPtr notification_cap, int idx, vka_t* vka, simple_t *simple) { seL4_CPtr irq_cap, bnotification_cap; cspacepath_t irq_path, notification_path, bnotification_path; seL4_CapData_t badge; int err; /* Create an IRQ cap */ err = vka_cspace_alloc(vka, &irq_cap); if (err != 0) { ZF_LOGE("Failed to allocate cslot for irq\n"); return seL4_CapNull; } vka_cspace_make_path(vka, irq_cap, &irq_path); err = simple_get_IRQ_control(simple, irq, irq_path); if (err != seL4_NoError) { ZF_LOGE("Failed to get cap to irq_number %d\n", irq); vka_cspace_free(vka, irq_cap); return seL4_CapNull; } /* Badge the provided endpoint. The bit position of the badge tells us the array * index of the associated IRQ data. */ err = vka_cspace_alloc(vka, &bnotification_cap); if (err != 0) { ZF_LOGE("Failed to allocate cslot for irq\n"); vka_cspace_free(vka, irq_cap); return seL4_CapNull; } vka_cspace_make_path(vka, notification_cap, ¬ification_path); vka_cspace_make_path(vka, bnotification_cap, &bnotification_path); badge = seL4_CapData_Badge_new(BIT(idx)); err = vka_cnode_mint(&bnotification_path, ¬ification_path, seL4_AllRights, badge); if (err != seL4_NoError) { ZF_LOGE("Failed to badge IRQ notification endpoint\n"); vka_cspace_free(vka, irq_cap); vka_cspace_free(vka, bnotification_cap); return seL4_CapNull; } /* bind the IRQ cap to our badged endpoint */ err = seL4_IRQHandler_SetNotification(irq_cap, bnotification_cap); if (err != seL4_NoError) { ZF_LOGE("Failed to bind IRQ handler to notification\n"); vka_cspace_free(vka, irq_cap); vka_cspace_free(vka, bnotification_cap); return seL4_CapNull; } /* Finally ACK any pending IRQ and enable the IRQ */ seL4_IRQHandler_Ack(irq_cap); DIRQSERVER("Registered IRQ %d with badge 0x%lx\n", irq, BIT(idx)); return irq_cap; }
static void map_unity_ram(vm_t* vm) { int err; uintptr_t start; reservation_t res; unsigned int bits = 21; res = vspace_reserve_range_at(&vm->vm_vspace, (void*)RAM_START, RAM_END - RAM_START, seL4_AllRights, 1); assert(res.res); for (start = RAM_START;; start += BIT(bits)) { cspacepath_t frame; err = vka_cspace_alloc_path(vm->vka, &frame); assert(!err); err = simple_get_frame_cap(vm->simple, (void*)start, bits, &frame); if (err) { vka_cspace_free(vm->vka, frame.capPtr); break; } err = vspace_map_pages_at_vaddr(&vm->vm_vspace, &frame.capPtr, &bits, (void*)start, 1, bits, res); assert(!err); } }
void proxy_vka_cspace_free(void *data, seL4_CPtr slot) { proxy_vka_t *vka = (proxy_vka_t*)data; vka_cspace_free(&vka->regular_vka, slot); }