int /*? me.interface.name ?*/__run(void) { // Make connection to gdb seL4_Word delegate_tcb; seL4_UserContext regs; while (1) { seL4_Recv(/*? mem_ep ?*/, &delegate_tcb); seL4_TCB_ReadRegisters(delegate_tcb, false, 0, sizeof(seL4_UserContext) / sizeof(seL4_Word), ®s); // Check eax is 0 so that we know they were checking memory // TODO Add a check on pc to see if they were in the mem check function if (regs.eax == 0) { // Signal to the delegate the memory is invalid regs.eax = 1; // Increment past the faulting instruction regs.eip += 2; // Write registers back seL4_TCB_WriteRegisters(delegate_tcb, false, 0, sizeof(seL4_UserContext) / sizeof(seL4_Word), ®s); // Resume the caller seL4_MessageInfo_t info = seL4_MessageInfo_new(0, 0, 0, 1); seL4_SetMR(0, regs.eip); seL4_Reply(info); } } }
void rpc_sv_reply(void* cl) { if (rpc_sv_skip_reply(cl)) return; seL4_CPtr reply_endpoint = rpc_sv_get_reply_endpoint(cl); seL4_MessageInfo_t reply = seL4_MessageInfo_new(0, 0, _rpc_cp, _rpc_mr); if (reply_endpoint) { seL4_Send(reply_endpoint, reply); } else { seL4_Reply(reply); } }
static int ep_test_func(seL4_CPtr sync_ep, seL4_CPtr test_ep, volatile seL4_Word *status, seL4_Word arg4) { seL4_MessageInfo_t tag = seL4_MessageInfo_new(0, 0, 0, 0); seL4_Word sender_badge; while (1) { seL4_Recv(sync_ep, &sender_badge); /* Hit up the test end point */ seL4_MessageInfo_t reply = seL4_Call(test_ep, tag); /* See what the status was */ *status = !!(seL4_MessageInfo_get_label(reply) != seL4_InvalidCapability); /* Reply */ seL4_Reply(tag); } return sel4test_get_result(); }
static int ipc_test_helper_0(ipc_test_data_t *data) { /* We are a "bouncer" thread. Each time a high priority process actually * wants to wait for a low priority process to execute and block, it does a * call to us. We are the lowest priority process and therefore will run * only after all other higher priority threads are done. */ while (1) { seL4_MessageInfo_t tag; seL4_Word sender_badge = 0; tag = seL4_Recv(data->ep0, &sender_badge); data->bounces++; seL4_Reply(tag); } return 0; }
int vm_event(vm_t* vm, seL4_MessageInfo_t tag) { seL4_Word label; seL4_Word length; label = seL4_MessageInfo_get_label(tag); length = seL4_MessageInfo_get_length(tag); switch (label) { case SEL4_PFIPC_LABEL: { int err; fault_t* fault; fault = vm->fault; err = new_fault(fault); assert(!err); do { err = handle_page_fault(vm, fault); if (err) { return -1; } } while (!fault_handled(fault)); } break; case SEL4_EXCEPT_IPC_LABEL: { int err; assert(length == SEL4_EXCEPT_IPC_LENGTH); err = handle_syscall(vm, length); assert(!err); if (!err) { seL4_MessageInfo_t reply; reply = seL4_MessageInfo_new(0, 0, 0, 0); seL4_Reply(reply); } } break; case SEL4_USER_EXCEPTION_LABEL: { seL4_Word ip; int err; assert(length == SEL4_USER_EXCEPTION_LENGTH); ip = seL4_GetMR(0); err = handle_exception(vm, ip); assert(!err); if (!err) { seL4_MessageInfo_t reply; reply = seL4_MessageInfo_new(0, 0, 0, 0); seL4_Reply(reply); } } break; case SEL4_VGIC_MAINTENANCE_LABEL: { int idx; int err; assert(length == SEL4_VGIC_MAINTENANCE_LENGTH); idx = seL4_GetMR(EXCEPT_IPC_SYS_MR_R0); /* Currently not handling spurious IRQs */ assert(idx >= 0); err = handle_vgic_maintenance(vm, idx); assert(!err); if (!err) { seL4_MessageInfo_t reply; reply = seL4_MessageInfo_new(0, 0, 0, 0); seL4_Reply(reply); } } break; case SEL4_VCPU_FAULT_LABEL: { seL4_MessageInfo_t reply; seL4_UserContext regs; seL4_CPtr tcb; uint32_t hsr; int err; assert(length == SEL4_VCPU_FAULT_LENGTH); hsr = seL4_GetMR(EXCEPT_IPC_SYS_MR_R0); /* Increment the PC and ignore the fault */ tcb = vm_get_tcb(vm); err = seL4_TCB_ReadRegisters(tcb, false, 0, sizeof(regs) / sizeof(regs.pc), ®s); assert(!err); switch (hsr) { case HSR_WFI: case HSR_WFE: regs.pc += (regs.cpsr & BIT(5)) ? 2 : 4; err = seL4_TCB_WriteRegisters(tcb, false, 0, sizeof(regs) / sizeof(regs.pc), ®s); assert(!err); reply = seL4_MessageInfo_new(0, 0, 0, 0); seL4_Reply(reply); return 0; default: printf("Unhandled VCPU fault from [%s]: HSR 0x%08x\n", vm->name, hsr); print_ctx_regs(®s); return -1; } } break; default: /* What? Why are we here? What just happened? */ printf("Unknown fault from [%s]: label=0x%x length=0x%x\n", vm->name, label, length); return -1; } return 0; }
/*! @brief Handles client VM fault messages sent by the kernel. Handles the VM fault message by looking up the details of the window that it faulted in, and decides whether this fault should be delegated to an external dataspace server for paging or content initalisation, or be handled internally by the process server's own dataspace implementation for RAM, or is an invalid memory access. In the case of an invalid memory access, or if the process server runs out of RAM, then the fault is unable to be handled and the faulting process is blocked indefinitely. @param m The recieved IPC fault message from the kernel. @param f The VM fault message info struct. */ static void handle_vm_fault(struct procserv_msg *m, struct procserv_vmfault_msg *f) { assert(f && f->pcb); dvprintf("# Process server recieved PID %d VM fault\n", f->pcb->pid); dvprintf("# %s %s fault at 0x%x, Instruction Pointer 0x%x, Fault Status Register 0x%x\n", f->instruction ? "Instruction" : "Data", f->read ? "read" : "write", f->faultAddr, f->pc, f->fsr); /* Thread should never be fault blocked (or else how did this VM fault even happen?). */ if (f->pcb->faultReply.capPtr != 0) { ROS_ERROR("(how did this VM fault even happen? Check book-keeping.\n"); output_segmentation_fault("Process should already be fault-blocked.", f); return; } /* Check faulting vaddr in segment windows. */ struct w_associated_window *aw = w_associate_find(&f->pcb->vspace.windows, f->faultAddr); if (!aw) { output_segmentation_fault("invalid memory window segment", f); return; } /* Retrieve the associated window. */ struct w_window *window = w_get_window(&procServ.windowList, aw->winID); if (!window) { output_segmentation_fault("invalid memory window - procserv book-keeping error.", f); assert(!"Process server could not find window - book-keeping error."); return; } /* Check window permissions. */ if (f->read && !(window->permissions | W_PERMISSION_READ)) { output_segmentation_fault("no read access permission to window.", f); return; } if (!f->read && !(window->permissions | W_PERMISSION_WRITE)) { output_segmentation_fault("no write access permission to window.", f); return; } /* Check that there isn't a page entry already mapped. */ cspacepath_t pageEntry = vs_get_frame(&f->pcb->vspace, f->faultAddr); if (pageEntry.capPtr != 0) { output_segmentation_fault("entry already occupied; book-keeping error.", f); return; } /* Handle the dispatch request depending on window mode. */ int error = EINVALID; switch (window->mode) { case W_MODE_EMPTY: output_segmentation_fault("fault in empty window.", f); break; case W_MODE_ANONYMOUS: error = handle_vm_fault_dspace(m, f, aw, window); break; case W_MODE_PAGER: error = handle_vm_fault_pager(m, f, aw, window); break; default: assert(!"Invalid window mode. Process server bug."); break; } /* Reply to the faulting process to unblock it. */ if (error == ESUCCESS) { seL4_Reply(_dispatcherEmptyReply); } }
static int test_ep_recycle(env_t env) { seL4_MessageInfo_t tag = seL4_MessageInfo_new(0, 0, 0, 0); struct { helper_thread_t thread; seL4_CPtr badged_ep; seL4_CPtr derived_badged_ep; volatile seL4_Word done; } senders[NUM_BADGED_CLIENTS]; helper_thread_t bouncer; seL4_CPtr bounce_ep; UNUSED int error; seL4_CPtr ep; /* Create the master endpoint. */ ep = vka_alloc_endpoint_leaky(&env->vka); /* Create N badged endpoints, and derive each of them. */ for (int i = 0; i < NUM_BADGED_CLIENTS; i++) { senders[i].badged_ep = get_free_slot(env); assert(senders[i].badged_ep != 0); senders[i].derived_badged_ep = get_free_slot(env); assert(senders[i].derived_badged_ep != 0); seL4_CapData_t cap_data; cap_data = seL4_CapData_Badge_new (i + 200); error = cnode_mint(env, ep, senders[i].badged_ep, seL4_AllRights, cap_data); assert(!error); error = cnode_copy(env, senders[i].badged_ep, senders[i].derived_badged_ep, seL4_AllRights); assert(!error); create_helper_thread(env, &senders[i].thread); set_helper_priority(&senders[i].thread, 100); senders[i].done = -1; } /* Create a bounce thread so we can get lower prio threads to run. */ bounce_ep = vka_alloc_endpoint_leaky(&env->vka); create_helper_thread(env, &bouncer); set_helper_priority(&bouncer, 0); start_helper(env, &bouncer, bouncer_func, bounce_ep, 0, 0, 0); for (int i = 0; i < NUM_BADGED_CLIENTS; i++) { start_helper(env, &senders[i].thread, (helper_fn_t) call_func, senders[i].derived_badged_ep, i + 100, (seL4_Word) &senders[i].done, 0); } /* Let the sender threads run. */ seL4_Call(bounce_ep, tag); /* Receive a message from each endpoint and check the badge. */ for (int i = 0; i < NUM_BADGED_CLIENTS; i++) { seL4_Word sender_badge; seL4_MessageInfo_ptr_set_length(&tag, 1); tag = seL4_Recv(ep, &sender_badge); assert(seL4_MessageInfo_get_length(tag) == 1); assert(seL4_GetMR(0) == sender_badge - 100); seL4_SetMR(0, ~seL4_GetMR(0)); seL4_Reply(tag); } /* Let the sender threads run. */ seL4_Call(bounce_ep, tag); /* Check none of the threads have failed yet. */ for (int i = 0; i < NUM_BADGED_CLIENTS; i++) { assert(senders[i].done == 0); } /* Recycle each endpoint. */ for (int i = 0; i < NUM_BADGED_CLIENTS; i++) { error = cnode_recycle(env, senders[i].badged_ep); assert(!error); /* Let thread run. */ seL4_Call(bounce_ep, tag); /* Check that only the intended threads have now aborted. */ for (int j = 0; j < NUM_BADGED_CLIENTS; j++) { if (j <= i) { assert(senders[j].done == 1); } else { assert(senders[j].done == 0); } } } seL4_Call(bounce_ep, tag); for (int i = 0; i < NUM_BADGED_CLIENTS; i++) { cleanup_helper(env, &senders[i].thread); } cleanup_helper(env, &bouncer); return sel4test_get_result(); }
/* * mmap service */ int mmap_main (void) { while (1) { seL4_Word badge = 0; seL4_Wait (_mmap_ep, &badge); int do_reply = false; if (badge == 0) { seL4_Word method = seL4_GetMR (0); if (method == MMAP_REQUEST) { /* queue request from root server */ do_reply = mmap_queue_schedule ( seL4_GetMR (1), seL4_GetMR (2), (struct frameinfo*)seL4_GetMR (3), (void*)seL4_GetMR (4), (struct pawpaw_event*)seL4_GetMR (5)); if (do_reply) { seL4_Notify (rootserver_async_cap, MMAP_IRQ); } } else if (method == MMAP_RESULT) { /* root server wanted to read some data out of our queue */ seL4_MessageInfo_t reply = seL4_MessageInfo_new (0, 0, 0, 3); if (!done_queue) { seL4_SetMR (0, 0); seL4_SetMR (1, 0); seL4_SetMR (2, 0); } else { seL4_SetMR (0, (seL4_Word)done_queue->cb); seL4_SetMR (1, (seL4_Word)done_queue->evt); seL4_SetMR (2, (seL4_Word)done_queue->frame); struct mmap_queue_item* cur = done_queue; done_queue = done_queue->next; free (cur); } seL4_Reply (reply); } else { panic ("unknown request from rootsvr\n"); } } else { /* response from filesystem */ struct frameinfo* evt_id = (struct frameinfo*)seL4_GetMR (1); /* find the matching mmap request */ struct mmap_queue_item* q = mmap_queue; while (q) { /* FIXME: ensure amount (MR0) == PAGE_SIZE or needed amount */ if (q->frame == evt_id) { q = mmap_move_done (q); break; } q = q->next; } /* read finished, notify server if we found one */ if (q) { seL4_Notify (rootserver_async_cap, MMAP_IRQ); } } } return 0; }