static int ipc_caller(seL4_Word ep0, seL4_Word ep1, seL4_Word word_bits, seL4_Word arg4) { /* Let our parent know we are ready. */ seL4_MessageInfo_t tag = seL4_MessageInfo_new(0, 0, 0, 1); seL4_SetMR(0, READY_MAGIC); seL4_Send(ep0, tag); /* * The parent has changed our cspace on us. Check that it makes sense. * * Basically the entire cspace should be empty except for the cap at ep0. * We should still test that various points in the cspace resolve correctly. */ /* Check that none of the typical endpoints are valid. */ for (unsigned long i = 0; i < word_bits; i++) { seL4_MessageInfo_ptr_new(&tag, 0, 0, 0, 0); tag = seL4_Call(i, tag); test_assert(seL4_MessageInfo_get_label(tag) == seL4_InvalidCapability); } /* Check that changing one bit still gives an invalid cap. */ for (unsigned long i = 0; i < word_bits; i++) { seL4_MessageInfo_ptr_new(&tag, 0, 0, 0, 0); tag = seL4_Call(ep1 ^ BIT(i), tag); test_assert(seL4_MessageInfo_get_label(tag) == seL4_InvalidCapability); } /* And we're done. This should be a valid cap and get us out of here! */ seL4_MessageInfo_ptr_new(&tag, 0, 0, 0, 1); seL4_SetMR(0, SUCCESS_MAGIC); seL4_Send(ep1, tag); return sel4test_get_result(); }
/*! @brief Handle messages received by the CPIO file server. @param s The global file server state. (No ownership transfer) @param msg The received message. (No ownership transfer) @return DISPATCH_SUCCESS if message dispatched, DISPATCH_ERROR if unknown message. */ static int fileserv_handle_message(struct fs_state *s, srv_msg_t *msg) { int result; int label = seL4_GetMR(0); void *userptr; (void) result; if (dispatch_notification(msg) == DISPATCH_SUCCESS) { return DISPATCH_SUCCESS; } if (check_dispatch_serv(msg, &userptr) == DISPATCH_SUCCESS) { result = rpc_sv_serv_dispatcher(userptr, label); assert(result == DISPATCH_SUCCESS); return DISPATCH_SUCCESS; } if (check_dispatch_data(msg, &userptr) == DISPATCH_SUCCESS) { result = rpc_sv_data_dispatcher(userptr, label); assert(result == DISPATCH_SUCCESS); return DISPATCH_SUCCESS; } dprintf("Unknown message (badge = %d msgInfo = %d label = %d (0x%x)).\n", msg->badge, seL4_MessageInfo_get_label(msg->message), label, label); ROS_ERROR("File server unknown message."); assert(!"File server unknown message."); return DISPATCH_ERROR; }
int /*? me.interface.name ?*/__run(void) { seL4_Word fault_type; seL4_Word length; seL4_MessageInfo_t info; seL4_Word args[4]; seL4_Word reply_cap = /*? reply_cap_slot ?*/; while (1) { /* Wait for fault */ info = seL4_Recv(/*? ep ?*/, &gdb_state.current_thread_tcb); /* Get the relevant registers */ fault_type = seL4_MessageInfo_get_label(info); length = seL4_MessageInfo_get_length(info); for (int i = 0; i < length; i++) { args[i] = seL4_GetMR(i); } gdb_state.current_pc = args[0]; ZF_LOGD("------------------------------"); ZF_LOGD("Received fault for tcb %zu", gdb_state.current_thread_tcb); ZF_LOGD("Stopped at %zx", gdb_state.current_pc); ZF_LOGD("Length: %zu", length); // Save the reply cap seL4_CNode_SaveCaller(/*? cnode ?*/, reply_cap, 32); gdb_state.stop_reason = find_stop_reason(fault_type, args); gdb_state.current_thread_step_mode = false; /* Send fault message to gdb client */ gdb_handle_fault(&gdb_state); /* Wait for gdb client to deal with fault */ int UNUSED error = b_wait(); /* Reply to the fault ep to restart the thread. We look inside the gdb_state struct to interpret how to restart the thread. */ if (gdb_state.stop_reason == stop_step && gdb_state.current_thread_step_mode==false) { /* If this was a Debug Exception, then we respond with a bp_num and the number of instruction to step Since we're going to continue, we set MR0 to 0 */ info = seL4_MessageInfo_new(0, 0, 0, 1); seL4_SetMR(0, 0); seL4_Send(reply_cap, info); } else if (gdb_state.stop_reason == stop_none) { /* If this was a fault, set the instruction pointer to what we expect it to be */ info = seL4_MessageInfo_new(0, 0, 0, 1); seL4_SetMR(0, gdb_state.current_pc); seL4_Send(reply_cap, info); } else { ZF_LOGD("Responding to some other debug exception %d", gdb_state.stop_reason); seL4_Signal(reply_cap); } } UNREACHABLE(); }
int check_dispatch_fault(struct procserv_msg *m, void **userptr) { if (seL4_MessageInfo_get_label(m->message) != seL4_Fault_VMFault || !dispatcher_badge_PID(m->badge)) { /* Not a VM fault, pass onto next dispatcher. */ return DISPATCH_PASS; } (void) userptr; return DISPATCH_SUCCESS; }
/** Performs the IPC register setup for a write() call to the server. * * The Server's ABI for the write() request has changed a little: the server * now returns the number of bytes it wrote out to the serial in a msg-reg, * aside from also returning an error code in the "label" of the header. * * @param conn Initialized connection token returned by * serial_server_client_connect(). * @param len length of the data in the buffer. * @param server_nbytes_written The value the server reports that it actually * wrote to the serial device. * @return 0 on success, or integer error value on error. */ static ssize_t serial_server_write_ipc_invoke(serial_client_context_t *conn, ssize_t len) { seL4_MessageInfo_t tag; seL4_SetMR(SSMSGREG_FUNC, FUNC_WRITE_REQ); seL4_SetMR(SSMSGREG_WRITE_REQ_BUFF_LEN, len); tag = seL4_MessageInfo_new(0, 0, 0, SSMSGREG_WRITE_REQ_END); tag = seL4_Call(conn->badged_server_ep_cspath.capPtr, tag); if (seL4_GetMR(SSMSGREG_FUNC) != FUNC_WRITE_ACK) { ZF_LOGE(SERSERVC"printf: Reply message was not a WRITE_ACK as " "expected."); return - seL4_IllegalOperation; } if (seL4_MessageInfo_get_label(tag) != 0) { return - seL4_MessageInfo_get_label(tag); } return seL4_GetMR(SSMSGREG_WRITE_ACK_N_BYTES_WRITTEN); }
static int ep_test_func(seL4_CPtr sync_ep, seL4_CPtr test_ep, volatile seL4_Word *status, seL4_Word arg4) { seL4_MessageInfo_t tag = seL4_MessageInfo_new(0, 0, 0, 0); seL4_Word sender_badge; while (1) { seL4_Recv(sync_ep, &sender_badge); /* Hit up the test end point */ seL4_MessageInfo_t reply = seL4_Call(test_ep, tag); /* See what the status was */ *status = !!(seL4_MessageInfo_get_label(reply) != seL4_InvalidCapability); /* Reply */ seL4_Reply(tag); } return sel4test_get_result(); }
long sys_brk(va_list ap) { //printf("sysbrk\n"); uintptr_t newbrk = va_arg(ap, uintptr_t); //printf("newbrk before = %d\n",newbrk); seL4_MessageInfo_t tag = seL4_MessageInfo_new(seL4_NoFault, 0, 0, 2); seL4_SetTag(tag); seL4_SetMR(0, SOS_SYSCALL_SYSBRK); seL4_SetMR(1, newbrk); seL4_MessageInfo_t message = seL4_Call(SYSCALL_ENDPOINT_SLOT, tag); newbrk = seL4_MessageInfo_get_label(message); //printf("newbrk result = %d\n",newbrk); return newbrk; }
seL4_MessageInfo_t irq_server_wait_for_irq(irq_server_t irq_server, seL4_Word* badge_ret) { seL4_MessageInfo_t msginfo; seL4_Word badge; /* Wait for an event */ msginfo = seL4_Recv(irq_server->delivery_ep, &badge); if (badge_ret) { *badge_ret = badge; } /* Forward to IRQ handlers */ if (seL4_MessageInfo_get_label(msginfo) == irq_server->label) { irq_server_handle_irq_ipc(irq_server); } return msginfo; }
/*! @brief Process server IPC message handler. Handles dispatching of all process server IPC messages. Calls each individual dispatcher until the correct dispatcher for the message type has been found. @param s The process server global state. @param msg The process server recieved message info. */ static void proc_server_handle_message(struct procserv_state *s, struct procserv_msg *msg) { int result; int label = seL4_GetMR(0); void *userptr = NULL; (void) result; /* Attempt to dispatch to procserv syscall dispatcher. */ if (check_dispatch_syscall(msg, &userptr) == DISPATCH_SUCCESS) { result = rpc_sv_proc_dispatcher(userptr, label); assert(result == DISPATCH_SUCCESS); mem_syscall_postaction(); proc_syscall_postaction(); return; } /* Attempt to dispatch to VM fault dispatcher. */ if (check_dispatch_fault(msg, &userptr) == DISPATCH_SUCCESS) { result = dispatch_vm_fault(msg, &userptr); assert(result == DISPATCH_SUCCESS); return; } /* Attempt to dispatch to RAM dataspace syscall dispatcher. */ if (check_dispatch_dataspace(msg, &userptr) == DISPATCH_SUCCESS) { result = rpc_sv_data_dispatcher(userptr, label); assert(result == DISPATCH_SUCCESS); mem_syscall_postaction(); return; } /* Attempt to dispatch to nameserv syscall dispatcher. */ if (check_dispatch_nameserv(msg, &userptr) == DISPATCH_SUCCESS) { result = rpc_sv_name_dispatcher(userptr, label); assert(result == DISPATCH_SUCCESS); return; } /* Unknown message. Block calling client indefinitely. */ dprintf("Unknown message (badge = %d msgInfo = %d syscall = 0x%x).\n", msg->badge, seL4_MessageInfo_get_label(msg->message), label); ROS_ERROR("Process server unknown message. ¯\(º_o)/¯"); }
static test_init_data_t * receive_init_data(seL4_CPtr endpoint) { /* wait for a message */ seL4_Word badge; UNUSED seL4_MessageInfo_t info; info = seL4_Recv(endpoint, &badge); /* check the label is correct */ assert(seL4_MessageInfo_get_label(info) == seL4_NoFault); assert(seL4_MessageInfo_get_length(info) == 1); test_init_data_t *init_data = (test_init_data_t *) seL4_GetMR(0); assert(init_data->free_slots.start != 0); assert(init_data->free_slots.end != 0); return init_data; }
int serial_server_kill(serial_client_context_t *conn) { seL4_MessageInfo_t tag; if (conn == NULL) { return seL4_InvalidArgument; } seL4_SetMR(SSMSGREG_FUNC, FUNC_KILL_REQ); tag = seL4_MessageInfo_new(0, 0, 0, SSMSGREG_KILL_REQ_END); tag = seL4_Call(conn->badged_server_ep_cspath.capPtr, tag); if (seL4_GetMR(SSMSGREG_FUNC) != FUNC_KILL_ACK) { ZF_LOGE(SERSERVC"kill: Reply message was not a KILL_ACK as expected."); return seL4_IllegalOperation; } return seL4_MessageInfo_get_label(tag); }
/*! @brief Handle messages recieved by the timer server. @param s The global timer server state. (No ownership transfer) @param msg The recieved message. (No ownership transfer) @return DISPATCH_SUCCESS if message dispatched, DISPATCH_ERROR if unknown message. */ static int timer_server_handle_message(struct timeserv_state *s, srv_msg_t *msg) { int result = DISPATCH_PASS; int label = seL4_GetMR(0); void *userptr; //printf("====In timer_server_handle_message====\n"); if (dispatch_client_watch(msg) == DISPATCH_SUCCESS) { result = DISPATCH_SUCCESS; } if (dev_dispatch_interrupt(&timeServ.irqState, msg) == DISPATCH_SUCCESS) { result = DISPATCH_SUCCESS; } if (result == DISPATCH_SUCCESS) { //printf("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n"); //printf("will return result\n"); return result; } if (check_dispatch_data(msg, &userptr) == DISPATCH_SUCCESS) { result = rpc_sv_data_dispatcher(userptr, label); assert(result == DISPATCH_SUCCESS); return DISPATCH_SUCCESS; } if (check_dispatch_serv(msg, &userptr) == DISPATCH_SUCCESS) { result = rpc_sv_serv_dispatcher(userptr, label); assert(result == DISPATCH_SUCCESS); return DISPATCH_SUCCESS; } dprintf("Unknown message (badge = %d msgInfo = %d label = %d).\n", msg->badge, seL4_MessageInfo_get_label(msg->message), label); ROS_ERROR("timer server unknown message."); assert(!"timer server unknown message."); return DISPATCH_ERROR; }
void syscall_loop(seL4_CPtr ep) { while (1) { //dprintf(3, "looping\n"); seL4_Word badge; seL4_Word label; seL4_MessageInfo_t message; message = seL4_Wait(ep, &badge); //dprintf(3, "badge=0x%x\n", badge); label = seL4_MessageInfo_get_label(message); if(badge & IRQ_EP_BADGE){ /* Interrupt */ if (badge & IRQ_BADGE_NETWORK) { network_irq(); } if (badge & IRQ_BADGE_TIMER) { int ret = timer_interrupt(); if (ret != CLOCK_R_OK) { //What now? } } }else if(label == seL4_VMFault){ /* Page fault */ dprintf(3, "user with pid = %d, 0x%08x is having a vmfault\n", badge & ~USER_EP_BADGE, badge); set_cur_proc(badge & ~USER_EP_BADGE); handle_pagefault(); }else if(label == seL4_NoFault) { /* System call */ dprintf(3, "user with pid = %d, 0x%08x is making a syscall\n", badge & ~USER_EP_BADGE, badge); set_cur_proc(badge & ~USER_EP_BADGE); handle_syscall(badge, seL4_MessageInfo_get_length(message) - 1); }else{ dprintf(3, "Rootserver got an unknown message\n"); } } }
static int call_func(seL4_CPtr ep, seL4_Word msg, volatile seL4_Word *done, seL4_Word arg3) { seL4_MessageInfo_t tag = seL4_MessageInfo_new(0, 0, 0, 1); /* Send the given message once. */ seL4_SetMR(0, msg); tag = seL4_Call(ep, tag); test_check(seL4_MessageInfo_get_length(tag) == 1); test_check(seL4_GetMR(0) == ~msg); *done = 0; /* Send the given message again - should (eventually) fault this time. */ seL4_SetMR(0, msg); tag = seL4_Call(ep, tag); /* The call should fail. */ test_check(seL4_MessageInfo_get_label(tag) == seL4_InvalidCapability); *done = 1; return sel4test_get_result(); }
int main_continued(void) { vm_t vm; int err; /* setup for restart with a setjmp */ while (setjmp(restart_jmp_buf) != 0) { reset_resources(); } restart_tcb = camkes_get_tls()->tcb_cap; restart_event_reg_callback(restart_event, NULL); err = vmm_init(); assert(!err); print_cpio_info(); /* Create the VM */ err = vm_create(VM_NAME, VM_PRIO, _fault_endpoint, VM_BADGE, &_vka, &_simple, &_vspace, &_io_ops, &vm); if (err) { printf("Failed to create VM\n"); seL4_DebugHalt(); return -1; } /* HACK: See if we have a "RAM device" for 1-1 mappings */ map_unity_ram(&vm); /* Load system images */ printf("Loading Linux: \'%s\' dtb: \'%s\'\n", VM_LINUX_NAME, VM_LINUX_DTB_NAME); err = load_linux(&vm, VM_LINUX_NAME, VM_LINUX_DTB_NAME); if (err) { printf("Failed to load VM image\n"); seL4_DebugHalt(); return -1; } vm_vchan_setup(&vm); /* Power on */ printf("Starting VM\n\n"); err = vm_start(&vm); if (err) { printf("Failed to start VM\n"); seL4_DebugHalt(); return -1; } /* Loop forever, handling events */ while (1) { seL4_MessageInfo_t tag; seL4_Word sender_badge; tag = seL4_Wait(_fault_endpoint, &sender_badge); if (sender_badge == 0) { seL4_Word label; label = seL4_MessageInfo_get_label(tag); if (label == IRQ_MESSAGE_LABEL) { irq_server_handle_irq_ipc(_irq_server); } else { printf("Unknown label (%d) for IPC badge %d\n", label, sender_badge); } } else if (sender_badge == VUSB_NBADGE) { vusb_notify(); } else { assert(sender_badge == VM_BADGE); err = vm_event(&vm, tag); if (err) { /* Shutdown */ vm_stop(&vm); seL4_DebugHalt(); while (1); } } } return 0; }
int serial_server_client_connect(seL4_CPtr badged_server_ep_cap, vka_t *client_vka, vspace_t *client_vspace, serial_client_context_t *conn) { seL4_Error error; int shmem_n_pages; uintptr_t shmem_tmp_vaddr; seL4_MessageInfo_t tag; cspacepath_t frame_cspath; if (badged_server_ep_cap == 0 || client_vka == NULL || client_vspace == NULL || conn == NULL) { return seL4_InvalidArgument; } memset(conn, 0, sizeof(serial_client_context_t)); shmem_n_pages = BYTES_TO_4K_PAGES(SERIAL_SERVER_SHMEM_MAX_SIZE); if (shmem_n_pages > seL4_MsgMaxExtraCaps) { ZF_LOGE(SERSERVC"connect: Currently unsupported shared memory size: " "IPC cap transfer capability is inadequate."); return seL4_RangeError; } conn->shmem = vspace_new_pages(client_vspace, seL4_AllRights, shmem_n_pages, seL4_PageBits); if (conn->shmem == NULL) { ZF_LOGE(SERSERVC"connect: Failed to alloc shmem."); return seL4_NotEnoughMemory; } assert(IS_ALIGNED((uintptr_t)conn->shmem, seL4_PageBits)); /* Look up the Frame cap behind each page in the shmem range, and marshal * all of those Frame caps to the parent. The parent will then map those * Frames into its VSpace and establish a shmem link. */ shmem_tmp_vaddr = (uintptr_t)conn->shmem; for (int i = 0; i < shmem_n_pages; i++) { vka_cspace_make_path(client_vka, vspace_get_cap(client_vspace, (void *)shmem_tmp_vaddr), &frame_cspath); seL4_SetCap(i, frame_cspath.capPtr); shmem_tmp_vaddr += BIT(seL4_PageBits); } /* Call the server asking it to establish the shmem mapping with us, and * get us connected up. */ seL4_SetMR(SSMSGREG_FUNC, FUNC_CONNECT_REQ); seL4_SetMR(SSMSGREG_CONNECT_REQ_SHMEM_SIZE, SERIAL_SERVER_SHMEM_MAX_SIZE); /* extraCaps doubles up as the number of shmem pages. */ tag = seL4_MessageInfo_new(0, 0, shmem_n_pages, SSMSGREG_CONNECT_REQ_END); tag = seL4_Call(badged_server_ep_cap, tag); /* It makes sense to verify that the message we're getting back is an * ACK response to our request message. */ if (seL4_GetMR(SSMSGREG_FUNC) != FUNC_CONNECT_ACK) { error = seL4_IllegalOperation; ZF_LOGE(SERSERVC"connect: Reply message was not a CONNECT_ACK as " "expected."); goto out; } /* When the parent replies, we check to see if it was successful, etc. */ error = seL4_MessageInfo_get_label(tag); if (error != (int)SERIAL_SERVER_NOERROR) { ZF_LOGE(SERSERVC"connect ERR %d: Failed to connect to the server.", error); if (error == (int)SERIAL_SERVER_ERROR_SHMEM_TOO_LARGE) { ZF_LOGE(SERSERVC"connect: Your requested shmem mapping size is too " "large.\n\tServer's max shmem size is %luB.", (long)seL4_GetMR(SSMSGREG_CONNECT_ACK_MAX_SHMEM_SIZE)); } goto out; } conn->shmem_size = SERIAL_SERVER_SHMEM_MAX_SIZE; vka_cspace_make_path(client_vka, badged_server_ep_cap, &conn->badged_server_ep_cspath); return seL4_NoError; out: if (conn->shmem != NULL) { vspace_unmap_pages(client_vspace, (void *)conn->shmem, shmem_n_pages, seL4_PageBits, VSPACE_FREE); } return error; }
void #ifdef ARCH_X86 NORETURN #endif fastpath_call(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; tcb_t *dest; word_t badge; cte_t *replySlot, *callerSlot; cap_t newVTable; pde_t *cap_pd; pde_t stored_hw_asid; word_t fault_type; /* Get message info, length, and fault type. */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = fault_get_faultType(ksCurThread->tcbFault); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.path = Entry_Syscall; ksKernelEntry.syscall_no = SysCall; ksKernelEntry.cap_type = cap_endpoint_cap; ksKernelEntry.invocation_tag = seL4_MessageInfo_get_label(info); ksKernelEntry.is_fastpath = true; benchmark_track_start(); #endif #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION benchmark_utilisation_kentry_stamp(); #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */ /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != fault_null_fault)) { slowpath(SysCall); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(ksCurThread, tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanSend(ep_cap))) { slowpath(SysCall); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Get the destination thread, which is only going to be valid * if the endpoint is valid. */ dest = TCB_PTR(endpoint_ptr_get_epQueue_head(ep_ptr)); /* Check that there's a thread waiting to receive */ if (unlikely(endpoint_ptr_get_state(ep_ptr) != EPState_Recv)) { slowpath(SysCall); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(dest, tcbVTable)->cap; /* Get vspace root. */ #if defined(ARCH_ARM) || !defined(CONFIG_PAE_PAGING) cap_pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(newVTable)); #else cap_pd = PDE_PTR(cap_pdpt_cap_get_capPDPTBasePtr(newVTable)); #endif /* Ensure that the destination has a valid VTable. */ if (unlikely(! isValidVTableRoot_fp(newVTable))) { slowpath(SysCall); } #ifdef ARCH_ARM /* Get HW ASID */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif /* Ensure the destination has a higher/equal priority to us. */ if (unlikely(dest->tcbPriority < ksCurThread->tcbPriority)) { slowpath(SysCall); } /* Ensure that the endpoint has has grant rights so that we can * create the reply cap */ if (unlikely(!cap_endpoint_cap_get_capCanGrant(ep_cap))) { slowpath(SysCall); } #ifdef ARCH_ARM if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysCall); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(dest->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysCall); } /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef ARCH_X86 /* Need to update NextIP in the calling thread */ setRegister(ksCurThread, NextIP, getRegister(ksCurThread, NextIP) + 2); #endif /* Dequeue the destination. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(dest->tcbEPNext)); if (unlikely(dest->tcbEPNext)) { dest->tcbEPNext->tcbEPPrev = NULL; } else { endpoint_ptr_mset_epQueue_tail_state(ep_ptr, 0, EPState_Idle); } badge = cap_endpoint_cap_get_capEPBadge(ep_cap); /* Block sender */ thread_state_ptr_set_tsType_np(&ksCurThread->tcbState, ThreadState_BlockedOnReply); /* Get sender reply slot */ replySlot = TCB_PTR_CTE_PTR(ksCurThread, tcbReply); /* Get dest caller slot */ callerSlot = TCB_PTR_CTE_PTR(dest, tcbCaller); /* Insert reply cap */ cap_reply_cap_ptr_new_np(&callerSlot->cap, 0, TCB_REF(ksCurThread)); mdb_node_ptr_set_mdbPrev_np(&callerSlot->cteMDBNode, CTE_REF(replySlot)); mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &replySlot->cteMDBNode, CTE_REF(callerSlot), 1, 1); fastpath_copy_mrs (length, ksCurThread, dest); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&dest->tcbState, ThreadState_Running); switchToThread_fp(dest, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif fastpath_restore(badge, msgInfo, ksCurThread); }
void fastpath_reply_recv(word_t cptr, word_t msgInfo) { seL4_MessageInfo_t info; cap_t ep_cap; endpoint_t *ep_ptr; word_t length; cte_t *callerSlot; cap_t callerCap; tcb_t *caller; word_t badge; tcb_t *endpointTail; word_t fault_type; cap_t newVTable; pde_t *cap_pd; pde_t stored_hw_asid; /* Get message info and length */ info = messageInfoFromWord_raw(msgInfo); length = seL4_MessageInfo_get_length(info); fault_type = fault_get_faultType(ksCurThread->tcbFault); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES ksKernelEntry.path = Entry_Syscall; ksKernelEntry.syscall_no = SysReplyRecv; ksKernelEntry.cap_type = cap_endpoint_cap; ksKernelEntry.invocation_tag = seL4_MessageInfo_get_label(info); ksKernelEntry.is_fastpath = true; benchmark_track_start(); #endif #ifdef CONFIG_BENCHMARK_TRACK_UTILISATION benchmark_utilisation_kentry_stamp(); #endif /* CONFIG_BENCHMARK_TRACK_UTILISATION */ /* Check there's no extra caps, the length is ok and there's no * saved fault. */ if (unlikely(fastpath_mi_check(msgInfo) || fault_type != fault_null_fault)) { slowpath(SysReplyRecv); } /* Lookup the cap */ ep_cap = lookup_fp(TCB_PTR_CTE_PTR(ksCurThread, tcbCTable)->cap, cptr); /* Check it's an endpoint */ if (unlikely(!cap_capType_equals(ep_cap, cap_endpoint_cap) || !cap_endpoint_cap_get_capCanReceive(ep_cap))) { slowpath(SysReplyRecv); } /* Check there is nothing waiting on the notification */ if (ksCurThread->tcbBoundNotification && notification_ptr_get_state(ksCurThread->tcbBoundNotification) == NtfnState_Active) { slowpath(SysReplyRecv); } /* Get the endpoint address */ ep_ptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(ep_cap)); /* Check that there's not a thread waiting to send */ if (unlikely(endpoint_ptr_get_state(ep_ptr) == EPState_Send)) { slowpath(SysReplyRecv); } /* Only reply if the reply cap is valid. */ callerSlot = TCB_PTR_CTE_PTR(ksCurThread, tcbCaller); callerCap = callerSlot->cap; if (unlikely(!fastpath_reply_cap_check(callerCap))) { slowpath(SysReplyRecv); } /* Determine who the caller is. */ caller = TCB_PTR(cap_reply_cap_get_capTCBPtr(callerCap)); /* Check that the caller has not faulted, in which case a fault reply is generated instead. */ fault_type = fault_get_faultType(caller->tcbFault); if (unlikely(fault_type != fault_null_fault)) { slowpath(SysReplyRecv); } /* Get destination thread.*/ newVTable = TCB_PTR_CTE_PTR(caller, tcbVTable)->cap; /* Get vspace root. */ #if defined(ARCH_ARM) || !defined(CONFIG_PAE_PAGING) cap_pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(newVTable)); #else cap_pd = PDE_PTR(cap_pdpt_cap_get_capPDPTBasePtr(newVTable)); #endif /* Ensure that the destination has a valid MMU. */ if (unlikely(! isValidVTableRoot_fp (newVTable))) { slowpath(SysReplyRecv); } #ifdef ARCH_ARM /* Get HWASID. */ stored_hw_asid = cap_pd[PD_ASID_SLOT]; #endif /* Ensure the original caller can be scheduled directly. */ if (unlikely(caller->tcbPriority < ksCurThread->tcbPriority)) { slowpath(SysReplyRecv); } #ifdef ARCH_ARM /* Ensure the HWASID is valid. */ if (unlikely(!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid))) { slowpath(SysReplyRecv); } #endif /* Ensure the original caller is in the current domain and can be scheduled directly. */ if (unlikely(caller->tcbDomain != ksCurDomain && maxDom)) { slowpath(SysReplyRecv); } /* * --- POINT OF NO RETURN --- * * At this stage, we have committed to performing the IPC. */ #ifdef ARCH_X86 /* Need to update NextIP in the calling thread */ setRegister(ksCurThread, NextIP, getRegister(ksCurThread, NextIP) + 2); #endif /* Set thread state to BlockedOnReceive */ thread_state_ptr_mset_blockingObject_tsType( &ksCurThread->tcbState, (word_t)ep_ptr, ThreadState_BlockedOnReceive); /* Place the thread in the endpoint queue */ endpointTail = TCB_PTR(endpoint_ptr_get_epQueue_tail(ep_ptr)); if (likely(!endpointTail)) { ksCurThread->tcbEPPrev = NULL; ksCurThread->tcbEPNext = NULL; /* Set head/tail of queue and endpoint state. */ endpoint_ptr_set_epQueue_head_np(ep_ptr, TCB_REF(ksCurThread)); endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(ksCurThread), EPState_Recv); } else { /* Append current thread onto the queue. */ endpointTail->tcbEPNext = ksCurThread; ksCurThread->tcbEPPrev = endpointTail; ksCurThread->tcbEPNext = NULL; /* Update tail of queue. */ endpoint_ptr_mset_epQueue_tail_state(ep_ptr, TCB_REF(ksCurThread), EPState_Recv); } /* Delete the reply cap. */ mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged( &CTE_PTR(mdb_node_get_mdbPrev(callerSlot->cteMDBNode))->cteMDBNode, 0, 1, 1); callerSlot->cap = cap_null_cap_new(); callerSlot->cteMDBNode = nullMDBNode; /* I know there's no fault, so straight to the transfer. */ /* Replies don't have a badge. */ badge = 0; fastpath_copy_mrs (length, ksCurThread, caller); /* Dest thread is set Running, but not queued. */ thread_state_ptr_set_tsType_np(&caller->tcbState, ThreadState_Running); switchToThread_fp(caller, cap_pd, stored_hw_asid); msgInfo = wordFromMessageInfo(seL4_MessageInfo_set_capsUnwrapped(info, 0)); #ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES benchmark_track_exit(); #endif fastpath_restore(badge, msgInfo, ksCurThread); }
bool_t handleFaultReply(tcb_t *receiver, tcb_t *sender) { /* These lookups are moved inward from doReplyTransfer */ seL4_MessageInfo_t tag = messageInfoFromWord(getRegister(sender, msgInfoRegister)); word_t label = seL4_MessageInfo_get_label(tag); word_t length = seL4_MessageInfo_get_length(tag); seL4_Fault_t fault = receiver->tcbFault; switch (seL4_Fault_get_seL4_FaultType(fault)) { case seL4_Fault_CapFault: return true; case seL4_Fault_UnknownSyscall: copyMRsFaultReply(sender, receiver, MessageID_Syscall, MIN(length, n_syscallMessage)); return (label == 0); case seL4_Fault_UserException: copyMRsFaultReply(sender, receiver, MessageID_Exception, MIN(length, n_exceptionMessage)); return (label == 0); #ifdef CONFIG_HARDWARE_DEBUG_API case seL4_Fault_DebugException: { word_t n_instrs; if (seL4_Fault_DebugException_get_exceptionReason(fault) != seL4_SingleStep) { /* Only single-step replies are required to set message registers. */ return (label == 0); } if (length < DEBUG_REPLY_N_EXPECTED_REGISTERS) { /* A single-step reply doesn't mean much if it isn't composed of the bp * number and number of instructions to skip. But even if both aren't * set, we can still allow the thread to continue because replying * should uniformly resume thread execution, based on the general seL4 * API model. * * If it was single-step, but no reply registers were set, just * default to skipping 1 and continuing. * * On x86, bp_num actually doesn't matter for single-stepping * because single-stepping doesn't use a hardware register -- it * uses EFLAGS.TF. */ n_instrs = 1; } else { /* If the reply had all expected registers set, proceed as normal */ n_instrs = getRegister(sender, msgRegisters[0]); } syscall_error_t res; res = Arch_decodeConfigureSingleStepping(receiver, 0, n_instrs, true); if (res.type != seL4_NoError) { return false; }; configureSingleStepping(receiver, 0, n_instrs, true); /* Replying will always resume the thread: the only variant behaviour * is whether or not the thread will be resumed with stepping still * enabled. */ return (label == 0); } #endif default: return Arch_handleFaultReply(receiver, sender, seL4_Fault_get_seL4_FaultType(fault)); } }
bool_t handleFaultReply(tcb_t *receiver, tcb_t *sender) { seL4_MessageInfo_t tag; word_t label; fault_t fault; word_t length; /* These lookups are moved inward from doReplyTransfer */ tag = messageInfoFromWord(getRegister(sender, msgInfoRegister)); label = seL4_MessageInfo_get_label(tag); length = seL4_MessageInfo_get_length(tag); fault = receiver->tcbFault; switch (fault_get_faultType(fault)) { case fault_cap_fault: return true; case fault_vm_fault: return true; #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT case fault_vgic_maintenance: return true; case fault_vcpu_fault: return true; #endif case fault_unknown_syscall: { word_t i; register_t r; word_t v; word_t *sendBuf; sendBuf = lookupIPCBuffer(false, sender); /* Assumes n_syscallMessage > n_msgRegisters */ for (i = 0; i < length && i < n_msgRegisters; i++) { r = syscallMessage[i]; v = getRegister(sender, msgRegisters[i]); setRegister(receiver, r, sanitiseRegister(r, v)); } if (sendBuf) { for (; i < length && i < n_syscallMessage; i++) { r = syscallMessage[i]; v = sendBuf[i + 1]; setRegister(receiver, r, sanitiseRegister(r, v)); } } } return (label == 0); case fault_user_exception: { word_t i; register_t r; word_t v; /* Assumes n_exceptionMessage <= n_msgRegisters */ for (i = 0; i < length && i < n_exceptionMessage; i++) { r = exceptionMessage[i]; v = getRegister(sender, msgRegisters[i]); setRegister(receiver, r, sanitiseRegister(r, v)); } } return (label == 0); default: fail("Invalid fault"); } }
static exception_t handleInvocation(bool_t isCall, bool_t isBlocking) { seL4_MessageInfo_t info; cptr_t cptr; lookupCapAndSlot_ret_t lu_ret; word_t *buffer; exception_t status; word_t length; tcb_t *thread; thread = ksCurThread; info = messageInfoFromWord(getRegister(thread, msgInfoRegister)); cptr = getRegister(thread, capRegister); /* faulting section */ lu_ret = lookupCapAndSlot(thread, cptr); #ifdef DEBUG ksKernelEntry.cap_type = cap_get_capType(lu_ret.cap); ksKernelEntry.invocation_tag = seL4_MessageInfo_get_label(info); #endif /* DEBUG */ if (unlikely(lu_ret.status != EXCEPTION_NONE)) { userError("Invocation of invalid cap #%lu.", cptr); current_fault = fault_cap_fault_new(cptr, false); if (isBlocking) { handleFault(thread); } return EXCEPTION_NONE; } buffer = lookupIPCBuffer(false, thread); status = lookupExtraCaps(thread, buffer, info); if (unlikely(status != EXCEPTION_NONE)) { userError("Lookup of extra caps failed."); if (isBlocking) { handleFault(thread); } return EXCEPTION_NONE; } /* Syscall error/Preemptible section */ length = seL4_MessageInfo_get_length(info); if (unlikely(length > n_msgRegisters && !buffer)) { length = n_msgRegisters; } status = decodeInvocation(seL4_MessageInfo_get_label(info), length, cptr, lu_ret.slot, lu_ret.cap, current_extra_caps, isBlocking, isCall, buffer); if (unlikely(status == EXCEPTION_PREEMPTED)) { return status; } if (unlikely(status == EXCEPTION_SYSCALL_ERROR)) { if (isCall) { replyFromKernel_error(thread); } return EXCEPTION_NONE; } if (unlikely( thread_state_get_tsType(thread->tcbState) == ThreadState_Restart)) { if (isCall) { replyFromKernel_success_empty(thread); } setThreadState(thread, ThreadState_Running); } return EXCEPTION_NONE; }
int vm_event(vm_t* vm, seL4_MessageInfo_t tag) { seL4_Word label; seL4_Word length; label = seL4_MessageInfo_get_label(tag); length = seL4_MessageInfo_get_length(tag); switch (label) { case SEL4_PFIPC_LABEL: { int err; fault_t* fault; fault = vm->fault; err = new_fault(fault); assert(!err); do { err = handle_page_fault(vm, fault); if (err) { return -1; } } while (!fault_handled(fault)); } break; case SEL4_EXCEPT_IPC_LABEL: { int err; assert(length == SEL4_EXCEPT_IPC_LENGTH); err = handle_syscall(vm, length); assert(!err); if (!err) { seL4_MessageInfo_t reply; reply = seL4_MessageInfo_new(0, 0, 0, 0); seL4_Reply(reply); } } break; case SEL4_USER_EXCEPTION_LABEL: { seL4_Word ip; int err; assert(length == SEL4_USER_EXCEPTION_LENGTH); ip = seL4_GetMR(0); err = handle_exception(vm, ip); assert(!err); if (!err) { seL4_MessageInfo_t reply; reply = seL4_MessageInfo_new(0, 0, 0, 0); seL4_Reply(reply); } } break; case SEL4_VGIC_MAINTENANCE_LABEL: { int idx; int err; assert(length == SEL4_VGIC_MAINTENANCE_LENGTH); idx = seL4_GetMR(EXCEPT_IPC_SYS_MR_R0); /* Currently not handling spurious IRQs */ assert(idx >= 0); err = handle_vgic_maintenance(vm, idx); assert(!err); if (!err) { seL4_MessageInfo_t reply; reply = seL4_MessageInfo_new(0, 0, 0, 0); seL4_Reply(reply); } } break; case SEL4_VCPU_FAULT_LABEL: { seL4_MessageInfo_t reply; seL4_UserContext regs; seL4_CPtr tcb; uint32_t hsr; int err; assert(length == SEL4_VCPU_FAULT_LENGTH); hsr = seL4_GetMR(EXCEPT_IPC_SYS_MR_R0); /* Increment the PC and ignore the fault */ tcb = vm_get_tcb(vm); err = seL4_TCB_ReadRegisters(tcb, false, 0, sizeof(regs) / sizeof(regs.pc), ®s); assert(!err); switch (hsr) { case HSR_WFI: case HSR_WFE: regs.pc += (regs.cpsr & BIT(5)) ? 2 : 4; err = seL4_TCB_WriteRegisters(tcb, false, 0, sizeof(regs) / sizeof(regs.pc), ®s); assert(!err); reply = seL4_MessageInfo_new(0, 0, 0, 0); seL4_Reply(reply); return 0; default: printf("Unhandled VCPU fault from [%s]: HSR 0x%08x\n", vm->name, hsr); print_ctx_regs(®s); return -1; } } break; default: /* What? Why are we here? What just happened? */ printf("Unknown fault from [%s]: label=0x%x length=0x%x\n", vm->name, label, length); return -1; } return 0; }
/* Run a single test. * Each test is launched as its own process. */ int run_test(struct testcase *test) { UNUSED int error; sel4utils_process_t test_process; /* Test intro banner. */ printf(" %s\n", test->name); error = sel4utils_configure_process(&test_process, &env.vka, &env.vspace, env.init->priority, TESTS_APP); assert(error == 0); /* set up caps about the process */ env.init->page_directory = copy_cap_to_process(&test_process, test_process.pd.cptr); env.init->root_cnode = SEL4UTILS_CNODE_SLOT; env.init->tcb = copy_cap_to_process(&test_process, test_process.thread.tcb.cptr); env.init->domain = copy_cap_to_process(&test_process, simple_get_init_cap(&env.simple, seL4_CapDomain)); #ifndef CONFIG_KERNEL_STABLE env.init->asid_pool = copy_cap_to_process(&test_process, simple_get_init_cap(&env.simple, seL4_CapInitThreadASIDPool)); #endif /* CONFIG_KERNEL_STABLE */ #ifdef CONFIG_IOMMU env.init->io_space = copy_cap_to_process(&test_process, simple_get_init_cap(&env.simple, seL4_CapIOSpace)); #endif /* CONFIG_IOMMU */ /* setup data about untypeds */ env.init->untypeds = copy_untypeds_to_process(&test_process, untypeds, num_untypeds); copy_timer_caps(env.init, &env, &test_process); /* copy the fault endpoint - we wait on the endpoint for a message * or a fault to see when the test finishes */ seL4_CPtr endpoint = copy_cap_to_process(&test_process, test_process.fault_endpoint.cptr); /* WARNING: DO NOT COPY MORE CAPS TO THE PROCESS BEYOND THIS POINT, * AS THE SLOTS WILL BE CONSIDERED FREE AND OVERRIDDEN BY THE TEST PROCESS. */ /* set up free slot range */ env.init->cspace_size_bits = CONFIG_SEL4UTILS_CSPACE_SIZE_BITS; env.init->free_slots.start = endpoint + 1; env.init->free_slots.end = (1u << CONFIG_SEL4UTILS_CSPACE_SIZE_BITS); assert(env.init->free_slots.start < env.init->free_slots.end); /* copy test name */ strncpy(env.init->name, test->name + strlen("TEST_"), TEST_NAME_MAX); #ifdef SEL4_DEBUG_KERNEL seL4_DebugNameThread(test_process.thread.tcb.cptr, env.init->name); #endif /* set up args for the test process */ char endpoint_string[10]; char sel4test_name[] = { TESTS_APP }; char zero_string[] = {"0"}; char *argv[] = {sel4test_name, zero_string, endpoint_string}; argv[0] = endpoint_string; snprintf(endpoint_string, 10, "%d", endpoint); /* spawn the process */ error = sel4utils_spawn_process_v(&test_process, &env.vka, &env.vspace, ARRAY_SIZE(argv), argv, 1); assert(error == 0); /* send env.init_data to the new process */ void *remote_vaddr = send_init_data(&env, test_process.fault_endpoint.cptr, &test_process); /* wait on it to finish or fault, report result */ seL4_Word badge; seL4_MessageInfo_t info = seL4_Wait(test_process.fault_endpoint.cptr, &badge); int result = seL4_GetMR(0); if (seL4_MessageInfo_get_label(info) != seL4_NoFault) { sel4utils_print_fault_message(info, test->name); result = FAILURE; } /* unmap the env.init data frame */ vspace_unmap_pages(&test_process.vspace, remote_vaddr, 1, PAGE_BITS_4K, NULL); /* reset all the untypeds for the next test */ for (int i = 0; i < num_untypeds; i++) { cspacepath_t path; vka_cspace_make_path(&env.vka, untypeds[i].cptr, &path); vka_cnode_revoke(&path); } /* destroy the process */ sel4utils_destroy_process(&test_process, &env.vka); test_assert(result == SUCCESS); return result; }
seL4_Error serial_server_parent_spawn_thread(simple_t *parent_simple, vka_t *parent_vka, vspace_t *parent_vspace, uint8_t priority) { const size_t shmem_max_size = SERIAL_SERVER_SHMEM_MAX_SIZE; seL4_Error error; size_t shmem_max_n_pages; cspacepath_t parent_cspace_cspath; seL4_MessageInfo_t tag; if (parent_simple == NULL || parent_vka == NULL || parent_vspace == NULL) { return seL4_InvalidArgument; } memset(get_serial_server(), 0, sizeof(serial_server_context_t)); /* Get a CPtr to the parent's root cnode. */ shmem_max_n_pages = BYTES_TO_4K_PAGES(shmem_max_size); vka_cspace_make_path(parent_vka, 0, &parent_cspace_cspath); get_serial_server()->server_vka = parent_vka; get_serial_server()->server_vspace = parent_vspace; get_serial_server()->server_cspace = parent_cspace_cspath.root; get_serial_server()->server_simple = parent_simple; /* Allocate the Endpoint that the server will be listening on. */ error = vka_alloc_endpoint(parent_vka, &get_serial_server()->server_ep_obj); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: failed to alloc endpoint, err=%d.", error); return error; } /* And also allocate a badged copy of the Server's endpoint that the Parent * can use to send to the Server. This is used to allow the Server to report * back to the Parent on whether or not the Server successfully bound to a * platform serial driver. * * This badged endpoint will be reused by the library as the Parent's badged * Endpoint cap, if the Parent itself ever chooses to connect() to the * Server later on. */ get_serial_server()->parent_badge_value = serial_server_badge_value_alloc(); if (get_serial_server()->parent_badge_value == SERIAL_SERVER_BADGE_VALUE_EMPTY) { error = seL4_NotEnoughMemory; goto out; } error = vka_mint_object(parent_vka, &get_serial_server()->server_ep_obj, &get_serial_server()->_badged_server_ep_cspath, seL4_AllRights, seL4_CapData_Badge_new(get_serial_server()->parent_badge_value)); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: Failed to mint badged Endpoint cap to " "server.\n" "\tParent cannot confirm Server thread successfully spawned."); goto out; } /* Allocate enough Cnode slots in our CSpace to enable us to receive * frame caps from our clients, sufficient to cover "shmem_max_size". * The problem here is that we're sort of forced to assume that we get * these slots contiguously. If they're not, we have a problem. * * If a client tries to send us too many frames, we respond with an error, * and indicate our shmem_max_size in the SSMSGREG_RESPONSE * message register. */ get_serial_server()->frame_cap_recv_cspaths = calloc(shmem_max_n_pages, sizeof(cspacepath_t)); if (get_serial_server()->frame_cap_recv_cspaths == NULL) { error = seL4_NotEnoughMemory; goto out; } for (size_t i = 0; i < shmem_max_n_pages; i++) { error = vka_cspace_alloc_path(parent_vka, &get_serial_server()->frame_cap_recv_cspaths[i]); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: Failed to alloc enough cnode slots " "to receive shmem frame caps equal to %d bytes.", shmem_max_size); goto out; } } error = sel4utils_configure_thread(parent_vka, parent_vspace, parent_vspace, get_serial_server()->server_ep_obj.cptr, priority, parent_cspace_cspath.root, seL4_NilData, &get_serial_server()->server_thread); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: sel4utils_configure_thread failed " "with %d.", error); goto out; } error = sel4utils_start_thread(&get_serial_server()->server_thread, &serial_server_main, NULL, NULL, 1); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: sel4utils_start_thread failed with " "%d.", error); goto out; } /* When the Server is spawned, it will reply to tell us whether or not it * successfully bound itself to the platform serial device. Block here * and wait for that reply. */ seL4_SetMR(SSMSGREG_FUNC, FUNC_SERVER_SPAWN_SYNC_REQ); tag = seL4_MessageInfo_new(0, 0, 0, SSMSGREG_SPAWN_SYNC_REQ_END); tag = seL4_Call(get_serial_server()->_badged_server_ep_cspath.capPtr, tag); /* Did all go well with the server? */ if (seL4_GetMR(SSMSGREG_FUNC) != FUNC_SERVER_SPAWN_SYNC_ACK) { ZF_LOGE(SERSERVP"spawn_thread: Server thread sync message after spawn " "was not a SYNC_ACK as expected."); error = seL4_InvalidArgument; goto out; } error = seL4_MessageInfo_get_label(tag); if (error != 0) { ZF_LOGE(SERSERVP"spawn_thread: Server thread failed to bind to the " "platform serial device."); goto out; } get_serial_server()->shmem_max_size = shmem_max_size; get_serial_server()->shmem_max_n_pages = shmem_max_n_pages; return 0; out: if (get_serial_server()->frame_cap_recv_cspaths != NULL) { for (size_t i = 0; i < shmem_max_n_pages; i++) { /* Since the array was allocated with calloc(), it was zero'd out. So * those indexes that didn't get allocated will have NULL in them. * Break early on the first index that has NULL. */ if (get_serial_server()->frame_cap_recv_cspaths[i].capPtr == 0) { break; } vka_cspace_free_path(parent_vka, get_serial_server()->frame_cap_recv_cspaths[i]); } } free(get_serial_server()->frame_cap_recv_cspaths); if (get_serial_server()->_badged_server_ep_cspath.capPtr != 0) { vka_cspace_free_path(parent_vka, get_serial_server()->_badged_server_ep_cspath); } if (get_serial_server()->parent_badge_value != SERIAL_SERVER_BADGE_VALUE_EMPTY) { serial_server_badge_value_free(get_serial_server()->parent_badge_value); } vka_free_object(parent_vka, &get_serial_server()->server_ep_obj); return error; }