static void pager (void) { L4_ThreadId_t tid; L4_MsgTag_t tag; L4_Msg_t msg; L4_Send(master_tid); for (;;) { tag = L4_Wait(&tid); for (;;) { L4_Word_t faddr, fip; L4_MsgStore(tag, &msg); if (L4_UntypedWords (tag) != 2 || !L4_IpcSucceeded (tag)) { printf ("Malformed pagefault IPC from %p (tag=%p)\n", (void *) tid.raw, (void *) tag.raw); L4_KDB_Enter ("malformed pf"); break; } faddr = L4_MsgWord(&msg, 0); fip = L4_MsgWord (&msg, 1); L4_MsgClear(&msg); { L4_MapItem_t map; L4_SpaceId_t space; L4_Word_t seg, offset, cache, rwx, size; int r; seg = get_seg(KBENCH_SPACE, faddr, &offset, &cache, &rwx); assert(seg != ~0UL); size = L4_GetMinPageBits(); faddr &= ~((1ul << size)-1); offset &= ~((1ul << size)-1); space.raw = __L4_TCR_SenderSpace(); L4_MapItem_Map(&map, seg, offset, faddr, size, cache, rwx); r = L4_ProcessMapItem(space, map); assert(r == 1); } L4_MsgLoad(&msg); // tag = L4_ReplyWait (tid, &tid); tag = L4_MsgTag(); L4_Set_SendBlock(&tag); L4_Set_ReceiveBlock(&tag); tag = L4_Ipc(tid, L4_anythread, tag, &tid); } } }
static void utimer(void) { L4_KDB_SetThreadName(sos_my_tid(), "utimer"); L4_Accept(L4_UntypedWordsAcceptor); List *entryq; entryq = list_empty(); for (;;) { L4_Yield(); // Walk the timer list L4_Word_t now = L4_KDB_GetTick(); list_delete(entryq, processExpired, &now); // Wait for a new packet either blocking or non-blocking L4_MsgTag_t tag = L4_Niltag; if (list_null(entryq)) L4_Set_ReceiveBlock(&tag); else L4_Clear_ReceiveBlock(&tag); L4_ThreadId_t wait_tid = L4_nilthread; tag = L4_Ipc(L4_nilthread, L4_anythread, tag, &wait_tid); if (!L4_IpcFailed(tag)) { // Received a time out request queue it L4_Msg_t msg; L4_MsgStore(tag, &msg); // Get the message utimer_entry_t *entry = (utimer_entry_t *) L4_MsgWord(&msg, 0); entry->fTid = wait_tid; list_shift(entryq, entry); } else if (3 == L4_ErrorCode()) // Receive error # 1 continue; // no-partner - non-blocking else assert(!"Unhandled IPC error"); } }
static void pager (void) { L4_ThreadId_t tid; L4_MsgTag_t tag; L4_Msg_t msg; int count = 0; for (;;) { tag = L4_Wait(&tid); for (;;) { L4_Word_t faddr, fip; L4_MsgStore(tag, &msg); if (L4_Label(tag) == START_LABEL) { // Startup notification, start ping and pong thread void (*start_addr)(void); void (*pong_start_addr)(void); L4_Word_t *pong_stack_addr = pong_stack; if (pagertimer) { start_addr = ping_thread_pager; } else if (pagertimer_simulated) { start_addr = ping_thread_simulated; } else if (fass_buffer) { start_addr = ping_thread_buffer; } else if (fault_test) { count = 0; start_addr = NULL; } else if (intra_close) { start_addr = ping_thread_close; } else if (intra_open) { start_addr = ping_thread_open; } else if (intra_rpc) { start_addr = ping_thread_rpc_server; } else if (intra_ovh) { start_addr = ping_thread_ovh; } else if (intra_async) { start_addr = ping_thread_async; } else if (intra_async_ovh) { start_addr = ping_thread_async_ovh; } else { start_addr = ping_thread; } if (start_addr != NULL) { /*printf("ping_start_addr: %lx ping_stack_addr: %lx\n", START_ADDR (start_addr), (L4_Word_t) ping_stack);*/ send_startup_ipc (ping_tid, START_ADDR(start_addr), (L4_Word_t) ping_stack + sizeof (ping_stack) - 32); L4_ThreadSwitch(ping_tid); } if (fass_buffer) { pong_start_addr = pong_thread_buffer; pong_stack_addr = pong_stack_fass; } else if (fass) { pong_start_addr = pong_thread_fass; pong_stack_addr = pong_stack_fass; } else if (fault_test) { pong_stack_addr = pong_stack_fass; pong_start_addr = pong_thread_faulter; } else if (intra_close) { pong_start_addr = pong_thread_close; } else if (intra_open) { pong_start_addr = pong_thread_open; } else if (intra_rpc) { pong_start_addr = pong_thread_close; } else if (intra_ovh) { pong_start_addr = pong_thread_ovh; } else if (intra_async) { pong_start_addr = pong_thread_async; } else if (intra_async_ovh) { pong_start_addr = pong_thread_async_ovh; } else { pong_start_addr = pong_thread; } if (!pagertimer) { /*printf("pong_start_addr: %lx pong_stack_addr: %lx\n", START_ADDR (pong_start_addr), (L4_Word_t) pong_stack_addr);*/ L4_Set_Priority(ping_tid, 100); L4_Set_Priority(pong_tid, 99); send_startup_ipc (pong_tid, START_ADDR (pong_start_addr), (L4_Word_t) pong_stack_addr + sizeof (ping_stack) - 32); } break; } if (L4_UntypedWords (tag) != 2 || !L4_IpcSucceeded (tag)) { printf ("pingpong: malformed pagefault IPC from %p (tag=%p)\n", (void *) tid.raw, (void *) tag.raw); L4_KDB_Enter ("malformed pf"); break; } faddr = L4_MsgWord(&msg, 0); fip = L4_MsgWord (&msg, 1); L4_MsgClear(&msg); if (fault_test && (faddr == (uintptr_t) fault_area)) { if (count < num_iterations) { count++; } else { /* Tell master that we're finished */ L4_Set_MsgTag (L4_Niltag); L4_Send (master_tid); break; } } else { L4_MapItem_t map; L4_SpaceId_t space; L4_Word_t seg, offset, cache, rwx, size; int r; seg = get_seg(KBENCH_SPACE, faddr, &offset, &cache, &rwx); //if can not find mapping, must be page fault test, //just map any valid address, since fault address is dummy. if (seg == ~0UL) seg = get_seg(KBENCH_SPACE, (L4_Word_t) fault_area, &offset, &cache, &rwx); if (tid.raw == ping_th.raw) space = ping_space; else if (tid.raw == pong_th.raw) { if (pong_space.raw != L4_nilspace.raw) space = pong_space; else //pong_space is not created, only ping_space is used. space = ping_space; } else space = KBENCH_SPACE; size = L4_GetMinPageBits(); faddr &= ~((1ul << size)-1); offset &= ~((1ul << size)-1); L4_MapItem_Map(&map, seg, offset, faddr, size, cache, rwx); r = L4_ProcessMapItem(space, map); assert(r == 1); } L4_MsgLoad(&msg); tag = L4_ReplyWait (tid, &tid); } } }
/* * Function invoked by roottask on pagefault */ int pager(L4_ThreadId_t tid, L4_Msg_t *msgP) { send = 1; // Get the faulting address L4_Word_t addr = L4_MsgWord(msgP, 0); L4_Word_t physicalAddress = 0; L4_Word_t permission = 0; L4_MsgTag_t tag; // Alignment addr = (addr / PAGESIZE)*PAGESIZE; tag = L4_MsgMsgTag(msgP); L4_Word_t access_type = L4_Label(tag) & 0x07; //printf("pager invoked addr=%lx by %lx %lx for access 0x%lx\n", addr,L4_ThreadNo(tid),tid.raw,access_type); // Construct fpage IPC message L4_Fpage_t targetFpage = L4_FpageLog2(addr, 12); if(VIRTUAL(addr)) { if(addr >= BASE_CODE_SEGMENT_ADDRESS) { //Code segment int inPage = isInPage(tid,targetFpage); if(inPage == -1) { //It should be in page table so this should not happen printf("Panic !!! Cannot load the code segment"); } else { physicalAddress = new_low + inPage*PAGESIZE; permission = L4_FullyAccessible; } } else { //Heap and stack int inPage = isInPage(tid, targetFpage); if (inPage == -1) { //We need to check if the page is in swap inPage = isInSwap(tid,targetFpage); mapAddress(tid, targetFpage,inPage); //We dont need to map any addresses here as mapAddresses maps the addresses return send; } else { physicalAddress = new_low+inPage*PAGESIZE; targetFpage = page_table[inPage].pageNo; page_table[inPage].referenced = 1; if(access_type & L4_Writable) { //We now need to set the dirty bit and provide read write access page_table[inPage].dirty = 1; permission = L4_ReadWriteOnly; } else { permission = L4_Readable; } } } } else { // we need to map physical addresses 1:1 physicalAddress = addr; if(addr < new_low) { // This is beyond the low memory range ie the page table // and some other addresses which is below the low range permission = L4_FullyAccessible; } else { // This would be the code segment between the new_low and high permission = L4_Readable; } } L4_Set_Rights(&targetFpage,permission); L4_PhysDesc_t phys = L4_PhysDesc(physicalAddress, L4_DefaultMemory); if ( !L4_MapFpage(tid, targetFpage, phys) ) { sos_print_error(L4_ErrorCode()); printf(" Can't map page at %lx\n", addr); } return send; }
/* XXX Note, this fuction does not currently handle faults from * vmalloc/vmaped'd memory. That should probably be in a separate * function anyway. */ int l4_do_page_fault(unsigned long address, long access, struct pt_regs *regs) { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; int fault, si_code = SEGV_MAPERR; siginfo_t info; /* If we're in an interrupt context, or have no user context, we must not take the fault. */ if (!mm) /* || in_interrupt()) */ goto bad_area_nosemaphore; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* Ok, we have a good vm_area for this memory access, so we can handle it. */ good_area: si_code = SEGV_ACCERR; if (/* LOAD */ access & 0x4) { /* Allow reads even for write/execute-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) goto bad_area; } else if (/* FETCH */ access & 0x1) { if (!(vma->vm_flags & VM_EXEC)) goto bad_area; } else { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } survive: /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ fault = handle_mm_fault(mm, vma, address, access & 0x2); up_read(&mm->mmap_sem); switch (fault) { case VM_FAULT_MINOR: current->min_flt++; break; case VM_FAULT_MAJOR: current->maj_flt++; break; case VM_FAULT_SIGBUS: goto do_sigbus; case VM_FAULT_OOM: goto out_of_memory; #if 0 /* * Well, it's a good idea to have this here, but apparently * handle_mm_fault() can return all sorts of weird stuff, which * makes it unsuitable to put BUG() here. -gl */ default: BUG(); #endif } return 0; /* Something tried to access memory that isn't in our memory map. Fix it, but check if it's kernel or user first. */ bad_area: up_read(&mm->mmap_sem); /* Check if it is at TASK_SIG_BASE */ #ifdef CONFIG_ARCH_ARM /* * Binary patching for NPTL * * XXX ??? Better place this thing? */ if (user_mode(regs) && ((address & PAGE_MASK) == 0xffff0000)) { #if 0 printk("Fault at address 0x%lx pc = 0x%lx, " "need rewrite\n", address, L4_MsgWord(¤t_regs()->msg, 1)); #endif if (address == 0xffff0fe0) { L4_Msg_t msg; unsigned long pc = L4_MsgWord(¤t_regs()->msg, 1); unsigned long lr, fpc; unsigned long instr, r; long offs; if (pc != 0xffff0fe0) goto bad_area_nosemaphore; L4_Copy_regs_to_mrs(task_thread_info(current)->user_tid); L4_StoreMRs(0, 16, &msg.msg[0]); lr = msg.msg[14]; fpc = lr - 4; L4_CacheFlushAll(); instr = get_instr(fpc); if (instr == -1UL) goto bad_area_nosemaphore; if ((instr & 0x0f000000) == 0x0b000000) { offs = instr << 8; offs = offs >> 6; /* ASR */ fpc = (fpc + 8) + offs; instr = get_instr(fpc); if (instr == -1UL) goto bad_area_nosemaphore; if ((instr & 0xffffffff) == 0xe3e00a0f) { /* mvn r0, 0xf000 */ /* * Rewrite to load the * kernel_reserved[0] from the * utcb. * * This requires L4 to cooperate * with the ExReg() syscall. */ /* mov r0, #0xff000000 */ r = set_instr(fpc, 0xe3a004ff); if (r == -1UL) goto bad_area_nosemaphore; fpc += 4; /* ldr r0, [r0, #0xff0] */ r = set_instr(fpc, 0xe5900ff0); if (r == -1UL) goto bad_area_nosemaphore; fpc += 4; /* ldr r0, [r0, #56] */ r = set_instr(fpc, 0xe5900038); if (r == -1UL) goto bad_area_nosemaphore; fpc += 4; /* mov pc, lr */ r = set_instr(fpc, 0xe1a0f00e); if (r == -1UL) goto bad_area_nosemaphore; L4_CacheFlushAll(); msg.msg[0] = current_thread_info()->tp_value; msg.msg[15] = lr; L4_LoadMRs(0, 16, &msg.msg[0]); L4_Copy_mrs_to_regs( task_thread_info(current)->user_tid); L4_MsgPutWord(¤t_regs()->msg, 1, lr); return 0; } } else if (instr == 0xe240f01f) {