END_TEST START_TEST(PD1000) { pd_ref_t newpd = 0; memsection_ref_t memsec = 0; uintptr_t base; int i; char *p; newpd = pd_create(); fail_if(newpd == 0, "NULL pd returned"); memsec = pd_create_memsection(newpd, MEM_SIZE, &base); fail_if(memsec == 0, "NULL memsection returned"); i = 0; p = (char *)base; memset((void *)base, '%', 0x10); for (i = 0; i < 0x10; i++) { fail_if(*p != '%', "error data"); p++; } pd_delete(newpd); }
END_TEST /* * Repeat of the above test with a changed IID in the reference. * * EXECUTE_IID is chosen because this will result in an unaligned * pointer on most architectures. */ START_TEST(PD1401) { pd_ref_t newpd = 0; uintptr_t clist, clist_base; newpd = pd_create(); fail_if(newpd == 0, "NULL pd returned"); newpd |= EXECUTE_IID; clist = pd_create_memsection(newpd, MEM_SIZE, &clist_base); fail_if(clist == 0, "NULL memsection returned"); clist |= EXECUTE_IID; pd_add_clist(newpd, clist); pd_release_clist(newpd, clist, 0); pd_delete(newpd); }
END_TEST #endif START_TEST(PD0700) { memsection_ref_t memsec = 0; uintptr_t base; int i = 0; char *p; memsec = pd_create_memsection(pd_myself(), MEM_SIZE, &base); fail_if(memsec == 0, "NULL memsection returned"); if (memsec == 0) { return; } p = (char *)base; memset(p, '%', 0x10); for (i = 0; i < 0x10; i++) { fail_if(*p != '%', "error data"); p++; } memsection_delete(memsec); }
void sys$populate_init_objects(struct vms$meminfo *mem_info, vms$pointer pagesize) { extern struct pd freevms_pd; struct initial_obj *obj; struct memsection *ret; unsigned int i; vms$pointer base; obj = mem_info->objects; for(i = 0; i < mem_info->num_objects; i++, obj++) { if (obj->flags & VMS$IOF_VIRT) { base = sys$page_round_down(obj->base, pagesize); ret = sys$pd_create_memsection(&freevms_pd, obj->end - base, base, VMS$MEM_INTERNAL, pagesize); PANIC(ret == NULL); // Check if it is correctly in object table PANIC(sys$objtable_lookup((void*) obj->base) == 0); } } return; }
END_TEST START_TEST(PD1100) { uintptr_t callback, callback_base; callback = pd_create_memsection(pd_myself(), MEM_SIZE, &callback_base); fail_if(callback == 0, "NULL memsection returned"); pd_set_callback(pd_myself(), callback); memsection_delete(callback); pd_set_callback(pd_myself(), 0); }
END_TEST /* * Attach a memsection twice to the one PD. */ START_TEST(PD2000) { pd_ref_t newpd = 0; uintptr_t base; newpd = pd_create(); fail_if(newpd == 0, "NULL pd returned"); if (newpd != 0) { memsection_ref_t memsec = 0; memsec = pd_create_memsection(pd_myself(), MEM_SIZE, &base); fail_if(memsec == 0, "NULL memsection returned"); if (memsec != 0) { int res; res = pd_attach(newpd, memsec, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); res = pd_attach(newpd, memsec, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); if (res == 0) { pd_detach(newpd, memsec); pd_detach(newpd, memsec); } /* Attach again, to see if any data structures are * damaged. */ res = pd_attach(newpd, memsec, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); if (res == 0) { pd_detach(newpd, memsec); } memsection_delete(memsec); } pd_delete(newpd); } }
END_TEST START_TEST(PD1300) { uintptr_t clist, clist_base; clist = pd_create_memsection(pd_myself(), MEM_SIZE, &clist_base); fail_if(clist == 0, "NULL clist returned"); pd_add_clist(pd_myself(), clist); pd_release_clist(pd_myself(), clist, 0); memsection_delete(clist); }
END_TEST START_TEST(PD1200) { pd_ref_t newpd = 0; uintptr_t callback, callback_base; newpd = pd_create(); fail_if(newpd == 0, "NULL pd returned"); callback = pd_create_memsection(newpd, MEM_SIZE, &callback_base); fail_if(callback == 0, "NULL memsection returned"); pd_set_callback(newpd, callback); pd_delete(newpd); }
END_TEST START_TEST(PD1400) { pd_ref_t newpd = 0; uintptr_t clist, clist_base; newpd = pd_create(); fail_if(newpd == 0, "NULL pd returned"); clist = pd_create_memsection(newpd, MEM_SIZE, &clist_base); fail_if(clist == 0, "NULL memsection returned"); pd_add_clist(newpd, clist); pd_release_clist(newpd, clist, 0); pd_delete(newpd); }
thread_ref_t create_thread(pd_ref_t pd, void (*start)(void), int priority, int wait) { uintptr_t stack_base; int r; memsection_ref_t stack; thread_ref_t thread; L4_ThreadId_t tid; thread = pd_create_thread_with_priority(pd, priority, &tid); if (thread == 0 || thread == -1) { printf("Failed to create thread.\n"); return -1; } stack = pd_create_memsection(pd, SIZE, &stack_base); if (stack == 0 || stack == -1) { printf("Failed to create stack.\n"); return -1; } r = pd_attach(pd, stack, L4_ReadWriteOnly); if (r != 0) { printf("Failed to attach stack.\n"); return -1; } thread_start(thread, (uintptr_t)start, stack_base + SIZE); /* * Block on the mutex if waiting. The other thread will unlock just * before faulting. */ if (wait) { L4_Receive(thread_l4tid(thread)); okl4_libmutex_lock(serialise_mutex); okl4_libmutex_unlock(serialise_mutex); /* * Wait a little longer for iguana server to print out the * page fault details. */ for (r = 0; r < 1000; r++) L4_Yield(); } return thread; }
END_TEST /* Feed null PDs and memsections to pd_attach() and expect errors. */ START_TEST(PD1700) { pd_ref_t newpd = 0; uintptr_t base; newpd = pd_create(); fail_if(newpd == 0, "NULL pd returned"); if (newpd != 0) { memsection_ref_t memsec = 0; memsec = pd_create_memsection(pd_myself(), MEM_SIZE, &base); fail_if(memsec == 0, "NULL memsection returned"); if (memsec != 0) { int res; res = pd_attach(newpd, 0, L4_FullyAccessible); fail_if(res == 0, "Attaching null memsection succeeded"); res = pd_attach(0, memsec, L4_FullyAccessible); fail_if(res == 0, "Attaching memsection to null PD succeeded"); res = pd_attach(0, 0, L4_FullyAccessible); fail_if(res == 0, "Attaching null memsection to null PD succeeded"); res = pd_attach(newpd, memsec, L4_FullyAccessible); fail_if(res != 0, "Attaching PD failed."); if (res == 0) pd_detach(newpd, memsec); memsection_delete(memsec); } pd_delete(newpd); } }
static void worker_a(void *ignore) { ARCH_THREAD_INIT memsection_ref_t m; uintptr_t base; int i; L4_KDB_SetThreadName(thread_l4tid(thread_myself()), "worker_a"); L4_Yield(); for (i = 0; i < 10000; i++) { m = pd_create_memsection(pd_myself(), 1UL << 31, &base); assert(m == 0); } worker_a_done = 1; L4_WaitForever(); }
END_TEST /* * Repeat of the above test with a changed IID in the reference. * * EXECUTE_IID is chosen because this will result in an unaligned * pointer on most architectures. */ START_TEST(PD1601) { pd_ref_t newpd = 0; uintptr_t base; newpd = pd_create(); fail_if(newpd == 0, "NULL pd returned"); if (newpd != 0) { memsection_ref_t memsec = 0; newpd |= EXECUTE_IID; memsec = pd_create_memsection(pd_myself(), MEM_SIZE, &base); fail_if(memsec == 0, "NULL memsection returned"); if (memsec != 0) { int res; memsec |= EXECUTE_IID; res = pd_attach(newpd, memsec, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); if (res == 0) pd_detach(newpd, memsec); memsection_delete(memsec); } pd_delete(newpd); } }
END_TEST START_TEST(PD0900) { memsection_ref_t memsec = 0; uintptr_t base; memsec = pd_create_memsection(pd_myself(), MEM_SIZE, &base); fail_if(memsec == 0, "NULL memsection returned"); if (memsec == 0) { return; } memsection_delete(memsec); memsec = pd_create_memsection_fixed_user(pd_myself(), MEM_SIZE, base); fail_if(memsec == 0, "NULL memsection returned"); memsection_delete(memsec); }
END_TEST /* * Attach a memsection to 2 different pd */ START_TEST(PD2100) { pd_ref_t newpd1 = 0; pd_ref_t newpd2 = 0; memsection_ref_t memsec = 0; uintptr_t base; int res; newpd1 = pd_create(); fail_if(newpd1 == 0, "NULL pd returned"); newpd2 = pd_create(); fail_if(newpd2 == 0, "NULL pd returned"); if ((newpd1 != 0) && (newpd2 != 0)) { memsec = pd_create_memsection(pd_myself(), MEM_SIZE, &base); fail_if(memsec == 0, "NULL memsection returned"); if (memsec != 0) { res = pd_attach(newpd1, memsec, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); res = pd_attach(newpd2, memsec, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); if (res == 0) { pd_detach(newpd1, memsec); pd_detach(newpd2, memsec); } memsection_delete(memsec); } pd_delete(newpd1); pd_delete(newpd2); } }
END_TEST #endif /* * Try and attach a memsection to a deleted PD. */ START_TEST(PD1900) { pd_ref_t newpd = 0; uintptr_t base; newpd = pd_create(); fail_if(newpd == 0, "NULL pd returned"); if (newpd != 0) { memsection_ref_t memsec = 0; memsec = pd_create_memsection(pd_myself(), MEM_SIZE, &base); fail_if(memsec == 0, "NULL memsection returned"); if (memsec != 0) { int res; pd_delete(newpd); res = pd_attach(newpd, memsec, L4_FullyAccessible); fail_if(res == 0, "Attaching memsection to deleted PD succeeded."); if (res == 0) pd_detach(newpd, memsec); memsection_delete(memsec); } else pd_delete(newpd); } }
END_TEST #if 0 /* Disabled becuase Iguana does not track deleted memsections */ /* * Try and attach a deleted memsection to a PD. */ START_TEST(PD1800) { pd_ref_t newpd = 0; uintptr_t base; newpd = pd_create(); fail_if(newpd == 0, "NULL pd returned"); if (newpd != 0) { memsection_ref_t memsec = 0; memsec = pd_create_memsection(pd_myself(), MEM_SIZE, &base); fail_if(memsec == 0, "NULL memsection returned"); if (memsec != 0) { int res; memsection_delete(memsec); res = pd_attach(newpd, memsec, L4_FullyAccessible); fail_if(res == 0, "Attaching deleted memsection succeeded."); if (res == 0) pd_detach(newpd, memsec); } pd_delete(newpd); } }
static void worker_b(void *ignore) { ARCH_THREAD_INIT memsection_ref_t m; uintptr_t base; int i; char *p; L4_KDB_SetThreadName(thread_l4tid(thread_myself()), "worker_b"); L4_Yield(); for (i = 0; i < 1000; i++) { m = pd_create_memsection(pd_myself(), MEM_SIZE, &base); assert(m != 0); p = (char *)base; memset(p, '%', 0x10); memsection_delete(m); } worker_b_done = 1; L4_WaitForever(); }
END_TEST /* * Repeat of the above test with a changed IID in the reference. * * EXECUTE_IID is chosen because this will result in an unaligned * pointer on most architectures. */ START_TEST(PD1101) { uintptr_t callback, callback_base; pd_ref_t myself = pd_myself(); myself |= EXECUTE_IID; callback = pd_create_memsection(myself, MEM_SIZE, &callback_base); fail_if(callback == 0, "NULL memsection returned"); callback |= EXECUTE_IID; pd_set_callback(myself, callback); memsection_delete(callback); pd_set_callback(myself, 0); }
END_TEST /* * Simple use case: Create a memsection and attached it to another * pd. */ START_TEST(PD1600) { pd_ref_t newpd = 0; uintptr_t base; newpd = pd_create(); fail_if(newpd == 0, "NULL pd returned"); if (newpd != 0) { memsection_ref_t memsec = 0; memsec = pd_create_memsection(pd_myself(), MEM_SIZE, &base); fail_if(memsec == 0, "NULL memsection returned"); if (memsec != 0) { int res; res = pd_attach(newpd, memsec, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); if (res == 0) pd_detach(newpd, memsec); memsection_delete(memsec); } pd_delete(newpd); } }
void sys$bootstrap(struct vms$meminfo *mem_info, vms$pointer pagesize) { struct memsection *heap; unsigned int i; vms$pointer base; vms$pointer end; notice(SYSBOOT_I_SYSBOOT "reserving memory for preloaded objects\n"); // Initialization pm_alloc.internal.base = 0; pm_alloc.internal.end = 0; pm_alloc.internal.active = 0; vm_alloc.internal.base = 0; vm_alloc.internal.end = 0; vm_alloc.internal.active = 0; for(i = 0; i <= MAX_FPAGE_ORDER; i++) { TAILQ_INIT(&vm_alloc.flist[i]); TAILQ_INIT(&pm_alloc.flist[i]); } // Bootimage objects are removed from free virtual memory. for(i = 0; i < mem_info->num_objects; i++) { if (mem_info->objects[i].flags & VMS$IOF_VIRT) { notice(MEM_I_ALLOC "allocating $%016lX - $%016lX\n", mem_info->objects[i].base, mem_info->objects[i].end); sys$remove_virtmem(mem_info, mem_info->objects[i].base, mem_info->objects[i].end, pagesize); } } // Free up som virtual memory to bootstrap the fpage allocator. for(i = 0; i < mem_info->num_vm_regions; i++) { base = sys$page_round_up(mem_info->vm_regions[i].base, pagesize); end = sys$page_round_down(mem_info->vm_regions[i].end + 1, pagesize) - 1; if ((end - (base + 1)) >= (2 * pagesize)) { notice(MEM_I_FALLOC "bootstrapping Fpage allocator at virtual " "addresses\n"); notice(MEM_I_FALLOC "$%016lX - $%016lX\n", base, end); sys$fpage_free_internal(&vm_alloc, base, end); mem_info->vm_regions[i].end = mem_info->vm_regions[i].base; break; } } PANIC(i >= mem_info->num_regions); // We need to make sure the first chunk of physical memory we free // is at least 2 * pagesize to bootstrap the slab allocators for // memsections and the fpage lists. for(i = 0; i < mem_info->num_regions; i++) { base = sys$page_round_up(mem_info->regions[i].base, pagesize); end = sys$page_round_down(mem_info->regions[i].end + 1, pagesize) - 1; if (((end - base) + 1) >= (2 * pagesize)) { notice(MEM_I_SALLOC "bootstrapping Slab allocator at physical " "addresses\n"); notice(MEM_I_SALLOC "$%016lX - $%016lX\n", base, end); sys$fpage_free_chunk(&pm_alloc, base, end); mem_info->regions[i].end = mem_info->regions[i].base; break; } } PANIC(i >= mem_info->num_regions); // Base and end may not be aligned, but we need them to be aligned. If // the area is less than a page than we should not add it to the free list. for(i = 0; i < mem_info->num_regions; i++) { if (mem_info->regions[i].base == mem_info->regions[i].end) { continue; } base = sys$page_round_up(mem_info->regions[i].base, pagesize); end = sys$page_round_down(mem_info->regions[i].end + 1, pagesize) - 1; if (base < end) { notice(MEM_I_FREE "freeing region $%016lX - $%016lX\n", base, end); sys$fpage_free_chunk(&pm_alloc, base, end); } } sys$fpage_clear_internal(&vm_alloc); // Initialize VM allocator for(i = 0; i < mem_info->num_vm_regions; i++) { if (mem_info->vm_regions[i].base < mem_info->vm_regions[i].end) { notice(MEM_I_VALLOC "adding $%016lX - $%016lX to VM allocator\n", mem_info->vm_regions[i].base, mem_info->vm_regions[i].end); sys$fpage_free_chunk(&vm_alloc, mem_info->vm_regions[i].base, mem_info->vm_regions[i].end); } } // Setup the kernel heap heap = sys$pd_create_memsection((struct pd *) NULL, VMS$HEAP_SIZE, 0, VMS$MEM_NORMAL | VMS$MEM_USER, pagesize); PANIC(heap == NULL, notice(SYS_F_HEAP "cannot allocate kernel heap\n")); sys$alloc_init(heap->base, heap->end); return; }
END_TEST /* * This test triggers the bug #1702 in function refcmp() which causes the security check to fail. * It can not be run on MIPS32 as MIPS32 address space ends at 0x80000000. */ #if !(defined(L4_ARCH_MIPS) && defined(L4_32BIT)) #define PD2200_MEM_SIZE 0x33000 START_TEST(PD2200) { pd_ref_t newpd = 0; uintptr_t clist, clist_base; uintptr_t clist2, clist_base2; uintptr_t clist3, clist4, clist5; uintptr_t clist_ref; char *p; int i, res; clist_ref = 0x80000000; newpd = pd_create(); fail_if(newpd == 0, "NULL pd returned"); /* * The following virtual address is needed to trigger the bug #1702 * If no such virtual address is available on the platfom, the test is * skipped */ clist_base = 0xb0000000; clist3 = pd_create_memsection_fixed_user(newpd, PD2200_MEM_SIZE, clist_base); if (clist3 == 0) { printf("This test does not apply to this platform\n"); return; } res = pd_attach(newpd, (memsection_ref_t)clist3, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); clist_base += PD2200_MEM_SIZE; clist4 = pd_create_memsection_fixed_user(newpd, PD2200_MEM_SIZE, clist_base); if (clist4 == 0) { printf("This test does not apply to this platform\n"); return; } res = pd_attach(newpd, (memsection_ref_t)clist4, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); clist_base += PD2200_MEM_SIZE; clist5 = pd_create_memsection_fixed_user(newpd, PD2200_MEM_SIZE, clist_base); if (clist5 == 0) { printf("This test does not apply to this platform\n"); return; } res = pd_attach(newpd, (memsection_ref_t)clist5, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); clist_base += PD2200_MEM_SIZE; clist = pd_create_memsection_fixed_user(newpd, MEM_SIZE, clist_base); if (clist == 0) { printf("This test does not apply to this platform\n"); return; } res = pd_attach(newpd, (memsection_ref_t)clist, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); //printf("First memsection ref: %lx base: %lx\n", clist, clist_base); memsection_delete(clist3); memsection_delete(clist4); memsection_delete(clist5); //L4_KDB_Enter("PD2200"); i = 0; do { clist2 = pd_create_memsection(newpd, MEM_SIZE, &clist_base2); res = pd_attach(newpd, (memsection_ref_t)clist2, L4_FullyAccessible); fail_if(res != 0, "Attaching memsection failed."); //printf("%lx, ", clist2); if (++i == 100) { clist2 = 0; } } while ((clist2 < clist_ref) && (clist2 != 0)); if (clist && clist2) { //printf("\nSecond memsection ref: %lx, base: %lx\n", clist2, clist_base2); p = (char *)clist_base2; memset((void *)clist_base2, '%', 0x10); for (i = 0; i < 0x10; i++) { fail_if(*p != '%', "error data"); p++; } memsection_delete(clist2); } else { printf("Test skipped: Could not create requested memsection\n"); } memsection_delete(clist); pd_delete(newpd); }