/* * Returns the page table index to be evicted and the static variable * ensures it remembers the last search index for the next eviction * Uses the second chance page replacement algorithm */ static int evictPage(void) { static int evictIndex = 0; int copyIndex = evictIndex; int counter = 0; while(1) { counter++; //Check that a being updated and pinned entry is not evicted if((L4_ThreadNo(page_table[evictIndex].tid) == L4_ThreadNo(L4_nilthread)) || (page_table[evictIndex].referenced == 0 && page_table[evictIndex].being_updated == 0 && page_table[evictIndex].pinned == 0)) return evictIndex; else if(page_table[evictIndex].being_updated == 0 && page_table[evictIndex].pinned == 0){ page_table[evictIndex].referenced = 0; L4_UnmapFpage(page_table[evictIndex].tid,page_table[evictIndex].pageNo); } else if (page_table[evictIndex].being_updated == 1) { copyIndex = (copyIndex + 1) % numPTE; } //Check if we have done one full cycle of page table search without an eviction owing to //updation then bail else we go into an infinite loop if(counter == numPTE) { if(copyIndex == evictIndex) { return -1; } numPTE = 0; copyIndex = evictIndex; } evictIndex = (evictIndex + 1) % numPTE; } }
/* * Unmaps all the page table entries and pages and frees the frames for a tid */ void unmap_process(L4_ThreadId_t tid_killed) { //Clear page table for(int i=0;i<numPTE;i++) { if(L4_ThreadNo(page_table[i].tid) == L4_ThreadNo(tid_killed)) { L4_UnmapFpage(page_table[i].tid,page_table[i].pageNo); frame_free(new_low + i * PAGESIZE); page_table[i].tid = L4_nilthread; page_table[i].referenced = 0; page_table[i].dirty = 0; page_table[i].being_updated = 0; page_table[i].error_in_transfer = 0; page_table[i].pinned = 0; } } //Clear swap table for(int i=0;i<MAX_SWAP_ENTRIES;i++) { if(L4_ThreadNo(swap_table[i].tid) == L4_ThreadNo(tid_killed)) { swap_table[i].tid = L4_nilthread; swap_table[i].next_free = head_free_swap; head_free_swap = i; } } //Remove later not required L4_CacheFlushAll(); }
/* * Internal function to search the page table * Does a linear search but for 5000-6000 entries the performance is not bad */ static int isInPage(L4_ThreadId_t tid, L4_Fpage_t fpage) { for(int i=0;i<numPTE;i++) { if (L4_ThreadNo(page_table[i].tid) == L4_ThreadNo(tid) && page_table[i].pageNo.X.b == fpage.X.b) { //printf("old page found in page at %d (%lx, %d)\n", i, L4_ThreadNo(page_table[i].tid), page_table[i].pageNo.X.b); return i; } } return -1; }
/* * Function to unmap all entries from the page table(used for milestone 2) */ void unmap_all() { for (int i = 0; i < numPTE; i++) { if (L4_ThreadNo(page_table[i].tid) != L4_ThreadNo(L4_nilthread)) { L4_Fpage_t toUnmap = page_table[i].pageNo; L4_UnmapFpage(page_table[i].tid, toUnmap); } } }
/* * Internal function to search the swap table */ static int isInSwap(L4_ThreadId_t tid, L4_Fpage_t fpage) { for(int i=0;i<MAX_SWAP_ENTRIES;i++) { if (L4_ThreadNo(swap_table[i].tid) == L4_ThreadNo(tid) && swap_table[i].pageNo.X.b == fpage.X.b) { //printf("old page found in swap at %d (%lx, %d)\n", i, L4_ThreadNo(swap_table[i].tid), swap_table[i].pageNo.X.b); return i; } } return -1; }
/* * This function loads the entire elf file into the phsical frames and * maps fpages corresponding to virtual address in elf file to the process */ int load_code_segment_virtual(char *elfFile,L4_ThreadId_t new_tid) { uint32_t min[2]; uint32_t max[2]; elf_getMemoryBounds(elfFile, 0, (uint64_t*)min, (uint64_t*)max); //Now we need to reserve memory between min and max L4_Word_t lower_address = ((L4_Word_t) min[1] / PAGESIZE) * PAGESIZE; L4_Word_t upper_address = ((L4_Word_t) max[1] / PAGESIZE) * PAGESIZE; while(lower_address <= upper_address) { L4_Word_t frame = frame_alloc(); if(!frame) { //Oops out of frames unmap_process(new_tid); return -1; } else { L4_Fpage_t targetpage = L4_FpageLog2(lower_address,12); lower_address += PAGESIZE; //Now map fpage L4_Set_Rights(&targetpage,L4_FullyAccessible); L4_PhysDesc_t phys = L4_PhysDesc(frame, L4_DefaultMemory); //Map the frame to root task but enter entries in pagetable with tid since we will update the mappings once elf loading is done if (L4_MapFpage(L4_Myself(), targetpage, phys) ) { page_table[(frame-new_low)/PAGESIZE].tid = new_tid; page_table[(frame-new_low)/PAGESIZE].pinned = 1; page_table[(frame-new_low)/PAGESIZE].pageNo = targetpage; } else { unmap_process(new_tid); } } } //Now we have mapped the pages, now load elf_file should work with the virtual addresses if(elf_loadFile(elfFile,0) == 1) { //Elffile was successfully loaded //Map the fpages which were previously mapped to Myself to the tid for(int i=0;i<numPTE;i++) { if(L4_ThreadNo(new_tid) == L4_ThreadNo(page_table[i].tid)) { //Now remap the pages which were mapped to root task to the new tid L4_UnmapFpage(L4_Myself(),page_table[i].pageNo); L4_PhysDesc_t phys = L4_PhysDesc(new_low + i * PAGESIZE, L4_DefaultMemory); if(!L4_MapFpage(new_tid, page_table[i].pageNo, phys)) { unmap_process(new_tid); return -1; } } } } else { unmap_process(new_tid); } //Remove later L4_CacheFlushAll(); return 0; }
/* * This initialise data structures for managing threads. * * Called from: main (1) */ void thread_init(void) { /* Work out the thread ids that are available to us */ int r; min_threadno = L4_ThreadNo(IGUANA_SERVER) + 2; /* +1 is the callback * thread */ max_threadno = kernel_max_root_caps - 1; thread_list = rfl_new(); assert(thread_list != NULL); #ifdef CONFIG_TEST_FEW_THREADS /* * This tests iguana behaviour when the pool of available thread ids is * exhausted */ max_threadno = from + 30; #endif r = rfl_insert_range(thread_list, min_threadno, max_threadno); assert(r == 0); l4tid_to_thread = hash_init(THREAD_PD_HASHSIZE); }
void malloc_panic(void) { printf("testbench: %s called in %lu:%lu, returns %p, %p, %p!\n", __func__, L4_ThreadNo(L4_Myself()), L4_Version(L4_Myself()), __builtin_return_address(0), __builtin_return_address(1), __builtin_return_address(2)); abort(); }
static void handle_fault(L4_Word_t faddr, L4_Word_t fip, L4_MapItem_t *map) { struct drop_param *param = get_ctx(); L4_MsgTag_t tag = muidl_get_tag(); int rwx = tag.X.label & 0x000f; #if 0 L4_ThreadId_t from = muidl_get_sender(); diag("drop_pager: pf in %lu:%lu at %#lx, ip %#lx", L4_ThreadNo(from), L4_Version(from), faddr, fip); #endif param->log_top = (param->log_top + 1) % LOG_SIZE; param->log[param->log_top] = L4_FpageLog2(faddr, 12); L4_Set_Rights(¶m->log[param->log_top], rwx); int dpos = param->log_top - param->keep; if(dpos < 0) dpos += LOG_SIZE; assert(dpos >= 0 && dpos < LOG_SIZE); L4_Fpage_t drop = param->log[dpos]; if(!L4_IsNilFpage(drop) && L4_Address(drop) != (faddr & ~PAGE_MASK)) { #if 0 diag("flushing %#lx:%#lx (dpos %d)", L4_Address(drop), L4_Size(drop), dpos); #endif L4_Set_Rights(&drop, L4_FullyAccessible); L4_FlushFpage(drop); } /* pass it on. */ L4_LoadBR(0, L4_CompleteAddressSpace.raw); L4_LoadMR(0, (L4_MsgTag_t){ .X.label = 0xffe0 | rwx, .X.u = 2 }.raw);
L4_ThreadId_t thread_new(AddrSpace_t *space) { assert (space != NULL); L4_Word_t tno; L4_ThreadId_t tid; L4_ThreadId_t space_spec; L4_Word_t utcb_location; slab_t *sb; list_t *li; thread_t *this; mutex_lock(&thrlock); tno = threadno_find_free(bitmap, MAX_TASKS); if (!tno) { mutex_unlock(&thrlock); return L4_nilthread; } tid = L4_GlobalId(tno, 1); utcb_location = UTCB_AREA_LOCATION; space_spec = space->tid; tno = threadno_find_free(space->threads, MAX_THREADS_PER_TASK); if (!tno) { mutex_unlock(&thrlock); return L4_nilthread; } utcb_location += tno * UTCB_SIZE; sb = slab_alloc(&thrpool); if (!sb) { mutex_unlock(&thrlock); return L4_nilthread; } if (FALSE == (L4_ThreadControl(tid, space_spec, tid, space->pager, (void *) utcb_location))) { slab_free(&thrpool, sb); mutex_unlock(&thrlock); return L4_nilthread; } li = LIST_TYPE(sb->data); this = (thread_t *) li->data; list_push(&thread_list, li); this->tid = tid; this->space = space; this->index = tno; this->creation = L4_SystemClock(); threadno_alloc(bitmap, L4_ThreadNo(tid)); threadno_alloc(space->threads, tno); mutex_unlock(&thrlock); return tid; }
void thread_init(L4_ThreadId_t tid) { L4_Word_t my_threadno, i; mutex_init(&thrlock); mutex_lock(&thrlock); slab_init(LIST_SIZE(sizeof(thread_t)), THREAD_SLAB_BUFFER_COUNT, &thrpool, thread_slab_buffer, kmalloc, THREAD_SLAB_BUFFER_COUNT); thread_list = NULL; memset(bitmap, 0, MAX_TASKS / 8); my_threadno = L4_ThreadNo(tid); for (i = 0; i <= my_threadno; i++) threadno_alloc(bitmap, i); mutex_unlock(&thrlock); }
int thread_destroy(L4_ThreadId_t tid) { list_t *li; AddrSpace_t *as; L4_Word_t tno; mutex_lock(&thrlock); li = list_find(thread_list, &tid, sizeof(L4_ThreadId_t)); if (li == NULL) { mutex_unlock(&thrlock); return FALSE; } if (FALSE == L4_ThreadControl(tid, L4_nilthread, L4_nilthread, L4_nilthread, (void *) -1)) { mutex_unlock(&thrlock); return FALSE; } as = THREAD_TYPE(li->data)->space; tno = THREAD_TYPE(li->data)->index; list_remove(&thread_list, li); slab_free(&thrpool, SLAB_FROM_DATA(li)); threadno_free(bitmap, L4_ThreadNo(tid)); threadno_free(as->threads, tno); if (tid.raw == as->tid.raw) { for (li = thread_list; li; li = li->next) { if (THREAD_TYPE(li->data)->space == as) break; } if (li == NULL) { // task destroy notification should go here address_space_destroy(as); } else { as->tid = THREAD_TYPE(li->data)->tid; } } mutex_unlock(&thrlock); // thread destroy notification should go here return TRUE; }
void __assert_failure( const char *condition, const char *file, unsigned int line, const char *function) { if(in_test()) { printf("Bail out! %s:%d assert failure `%s'\n", file, line, condition); exit_on_fail(); } else { printf("testbench %lu:%lu %s(`%s', `%s', %u, `%s')\n", L4_ThreadNo(L4_Myself()), L4_Version(L4_Myself()), __func__, condition, file, line, function); abort(); for(;;) { asm volatile("int $1"); } } }
void thread_free(L4_ThreadId_t thread) { int r; struct thread * dead; dead = hash_lookup(l4tid_to_thread, thread.raw); if (dead != NULL) { /* Removed both mappings */ hash_remove(l4tid_to_thread, dead->id.raw); hash_remove(l4tid_to_thread, dead->handle.raw); /* Add thread back to free pool */ r = rfl_free(thread_list, L4_ThreadNo(dead->id)); assert(r == RFL_SUCCESS); } }
static L4_ThreadId_t create_local_thread(char *desc, L4_KernelInterfacePage_t *kip, int idx, threadfunc_t func, L4_Word_t stack_size) { L4_Word_t utcb_size = L4_UtcbSize(kip); L4_Word_t my_utcb = L4_MyLocalId().raw; my_utcb = (my_utcb & ~(utcb_size - 1)); L4_ThreadId_t tid = L4_GlobalId(L4_ThreadNo(L4_Myself()) + idx, 1); L4_Word_t utcb_location = my_utcb + idx * utcb_size; if (FALSE == L4_ThreadControl(tid, L4_Myself(), L4_Myself(), L4_Pager(), (void *) utcb_location)) { printf("panic: can't execute %s: error code %d\n", desc, (int) L4_ErrorCode()); return L4_nilthread; } void *stack = kmalloc(stack_size); L4_Start_SpIp(tid, (L4_Word_t) stack + stack_size - 32, (L4_Word_t) func); return tid; }
/* poke/peek thread. obeys POKE, PEEK, and QUIT. */ static void poke_peek_fn(void *param_ptr) { #if 0 diag("%s: started as %lu:%lu. pager is %#lx", __func__, L4_ThreadNo(L4_MyGlobalId()), L4_Version(L4_MyGlobalId()), L4_Pager()); #endif for(;;) { L4_ThreadId_t from; L4_MsgTag_t tag = L4_Wait(&from); for(;;) { if(L4_IpcFailed(tag)) break; if(tag.X.label == QUIT_LABEL) { // diag("%s: quitting", __func__); return; } else if(tag.X.label == PEEK_LABEL) { L4_Word_t addr; L4_StoreMR(1, &addr); L4_LoadMR(0, (L4_MsgTag_t){ .X.u = 1 }.raw); L4_LoadMR(1, *(uint8_t *)addr); } else if(tag.X.label == POKE_LABEL) {
void task_destroy(AddrSpace_t *space) { list_t *li, *next; AddrSpace_t *as; L4_Word_t tno; L4_ThreadId_t tid; assert(space != NULL); mutex_lock(&thrlock); next = NULL; for (li = thread_list; li; li = next) { as = THREAD_TYPE(li->data)->space; next = li->next; if (as != space) continue; tno = THREAD_TYPE(li->data)->index; tid = THREAD_TYPE(li->data)->tid; /* hope this succeeds */ L4_ThreadControl(tid, L4_nilthread, L4_nilthread, L4_nilthread, (void *) -1); list_remove(&thread_list, li); slab_free(&thrpool, SLAB_FROM_DATA(li)); threadno_free(bitmap, L4_ThreadNo(tid)); // thread destroy notification should go here } address_space_destroy(space); mutex_unlock(&thrlock); // task destroy notification should go here return; }
int main(void) { /* initialise communication */ ttyout_init(); // invalid access: //*(char *) 0x30000000 = 123; //*(char *) NULL = 123; pt_test(); L4_ThreadId_t myid; /*assert( ((int)&stack_space) > 0x2000000); stack_space[0] = 'a'; stack_space[1025] = 'b'; //L4_Word_t utcb_location = (L4_Word_t) L4_GetUtcbBase(); //printf("utcb is at: %ud", utcb_location); printf("stack addr: %X\n", (int)&stack_space);*/ myid = L4_Myself(); do { printf("task:\tHello world, I'm\t0x%lx!\n", L4_ThreadNo(myid)); sos_write("123456789012345\n", 0, 16, NULL); sos_write("1234567890123456789\n", 0, 20, NULL); sos_write("abcdefghijklmnop\n", 0, 17, NULL); sos_write("abc\n", 0, 4, NULL); thread_block(); // sleep(1); // Implement this as a syscall } while(1); return 0; }
static L4_ThreadId_t thread_offset(int i) { L4_ThreadId_t t = main_thread; return L4_GlobalId(L4_ThreadNo(t) + (i+16), 1); }
static void ipc_irq_setup(struct new_bench_test *test, int args[]) { int r; L4_Word_t utcb; L4_Word_t utcb_size; // L4_Word_t dummy; num_iterations = args[0]; handler_space = L4_nilspace; /* We need a maximum of two threads per task */ utcb_size = L4_GetUtcbSize(); #ifdef NO_UTCB_RELOCATE utcb = ~0UL; #else utcb =(L4_Word_t)L4_GetUtcbBase() + utcb_size; #endif /* Create pager */ master_tid = KBENCH_SERVER; pager_tid.raw = KBENCH_SERVER.raw + 1; handler_tid.raw = KBENCH_SERVER.raw + 2; interrupt = PMU_IRQ; #if SPINNER spinner_tid = L4_GlobalId (L4_ThreadNo (master_tid) + 3, 2); #endif r = L4_ThreadControl (pager_tid, KBENCH_SPACE, master_tid, master_tid, master_tid, 0, (void*)utcb); if (r == 0 && (L4_ErrorCode() == 2)) { r = L4_ThreadControl (pager_tid, L4_nilspace, L4_nilthread, L4_nilthread, L4_nilthread, 0, (void *) 0); assert(r == 1); r = L4_ThreadControl (pager_tid, KBENCH_SPACE, master_tid, master_tid, master_tid, 0, (void*)utcb); assert(r == 1); } L4_KDB_SetThreadName(pager_tid, "pager"); //L4_Schedule(pager_tid, -1, -1, 1, -1, -1, 0, &dummy, &dummy); L4_Set_Priority(pager_tid, 254); L4_Start_SpIp (pager_tid, (L4_Word_t) pager_stack + sizeof(pager_stack) - 32, START_ADDR (pager)); L4_Receive(pager_tid); #ifdef NO_UTCB_RELOCATE utcb = ~0UL; #else utcb += utcb_size; #endif r = L4_ThreadControl(handler_tid, KBENCH_SPACE, master_tid, pager_tid, pager_tid, 0, (void *) utcb); assert(r == 1); L4_KDB_SetThreadName(handler_tid, "handler"); L4_Set_Priority(handler_tid, 100); // Startup notification, start handler thread //printf("register irq %ld, to %lx\n", interrupt, handler_tid.raw); L4_Word_t control = 0 | (0 << 6) | (31<<27); L4_LoadMR(0, interrupt); r = L4_InterruptControl(handler_tid, control); if (r == 0) { printf("Cannot register interrupt %lu\n", interrupt); } L4_Start_SpIp (handler_tid, (L4_Word_t) handler_stack + sizeof(handler_stack) - 32, START_ADDR(handler)); L4_Receive(handler_tid); #if SPINNER //Create spinner thread #ifdef NO_UTCB_RELOCATE utcb = ~0UL; #else utcb += utcb_size; #endif r = L4_ThreadControl (spinner_tid, KBENCH_SPACE, master_tid, pager_tid, pager_tid, 0, (void*) utcb); if (r == 0) printf("create spinner failed %ld\n", L4_ErrorCode()); assert(r == 1); L4_KDB_SetThreadName(spinner_tid, "spinner"); //L4_Schedule(spinner_tid, -1, -1, 1, -1, -1, 0, &dummy, &dummy); //Set priority to the lowest. L4_Set_Priority(spinner_tid, 1); L4_Start_SpIp (spinner_tid, (L4_Word_t) spinner_stack + sizeof(spinner_stack) - 32, START_ADDR (spinner)); #endif }
AddrSpace_t *task_new(L4_ThreadId_t pager) { L4_Word_t tno; L4_ThreadId_t tid; L4_ThreadId_t space_spec; L4_Word_t utcb_location; AddrSpace_t *space = NULL; slab_t *sb; list_t *li; thread_t *this; mutex_lock(&thrlock); tno = threadno_find_free(bitmap, MAX_TASKS); if (!tno) { mutex_unlock(&thrlock); return NULL; } tid = L4_GlobalId(tno, 1); utcb_location = UTCB_AREA_LOCATION; space_spec = tid; sb = slab_alloc(&thrpool); if (!sb) { mutex_unlock(&thrlock); return NULL; } if (FALSE == (L4_ThreadControl(tid, space_spec, L4_Myself(), L4_nilthread, (void *) utcb_location))) { slab_free(&thrpool, sb); mutex_unlock(&thrlock); return NULL; } space = address_space_new(tid, pager); if (!space) { L4_ThreadControl(tid, L4_nilthread, L4_nilthread, L4_nilthread, (void *) -1); slab_free(&thrpool, sb); mutex_unlock(&thrlock); return NULL; } else { /* set self space, and the specified pager * FIXME - using myself as the scheduler */ L4_ThreadControl(tid, tid, L4_Myself(), pager, (void *) -1); } li = LIST_TYPE(sb->data); this = (thread_t *) li->data; list_push(&thread_list, li); this->tid = tid; this->space = space; this->index = 0; this->creation = L4_SystemClock(); threadno_alloc(bitmap, L4_ThreadNo(tid)); threadno_alloc(space->threads, 0); mutex_unlock(&thrlock); return space; }