register_t cheritest_libcheri_userfn_getstack(void) { struct cheri_stack cs; struct cheri_stack_frame *csfp; u_int stack_depth; int retval; retval = sysarch(CHERI_GET_STACK, &cs); if (retval != 0) cheritest_failure_err("sysarch(CHERI_GET_STACK) failed"); /* Does stack layout look sensible enough to continue? */ if ((cs.cs_tsize % CHERI_FRAME_SIZE) != 0) cheritest_failure_errx( "stack size (%ld) not a multiple of frame size", cs.cs_tsize); stack_depth = cs.cs_tsize / CHERI_FRAME_SIZE; if ((cs.cs_tsp % CHERI_FRAME_SIZE) != 0) cheritest_failure_errx( "stack pointer (%ld) not a multiple of frame size", cs.cs_tsp); /* Validate that two stack frames are found. */ if (cs.cs_tsp != cs.cs_tsize - (register_t)(2 * CHERI_FRAME_SIZE)) cheritest_failure_errx("stack contains %d frames; expected " "2", (cs.cs_tsize - (2 * CHERI_FRAME_SIZE)) / CHERI_FRAME_SIZE); /* Validate that the first is a saved ambient context. */ csfp = &cs.cs_frames[stack_depth - 1]; if (cheri_getbase(csfp->csf_pcc) != cheri_getbase(cheri_getpcc()) || cheri_getlen(csfp->csf_pcc) != cheri_getlen(cheri_getpcc())) cheritest_failure_errx("frame 0: not global code cap"); /* Validate that the second is cheritest_objectp. */ csfp = &cs.cs_frames[stack_depth - 2]; if ((cheri_getbase(csfp->csf_pcc) != cheri_getbase( sandbox_object_getobject(cheritest_objectp).co_codecap)) || cheri_getlen(csfp->csf_pcc) != cheri_getlen( sandbox_object_getobject(cheritest_objectp).co_codecap)) cheritest_failure_errx("frame 1: not sandbox code cap"); return (0); }
context_t act_init(context_t own_context, init_info_t* info, size_t init_base, size_t init_entry) { KERNEL_TRACE("init", "activation init"); internel_if.message_send = kernel_seal(act_send_message_get_trampoline(), act_ref_type); internel_if.message_reply = kernel_seal(act_send_return_get_trampoline(), act_sync_ref_type); setup_syscall_interface(&internel_if); kernel_next_act = 0; // This is a dummy. Our first context has already been created reg_frame_t frame; bzero(&frame, sizeof(struct reg_frame)); // Register the kernel (exception) activation act_t * kernel_act = &kernel_acts[0]; act_register(&frame, &kernel_queue.queue, "kernel", status_terminated, NULL, cheri_getbase(cheri_getpcc())); /* The kernel context already exists and we set it here */ kernel_act->context = own_context; // Create and register the init activation KERNEL_TRACE("act", "Retroactively creating init activation"); /* Not a dummy here. We will subset our own c0/pcc for init. init is loaded directly after the kernel */ bzero(&frame, sizeof(struct reg_frame)); size_t length = cheri_getlen(cheri_getdefault()) - init_base; frame.cf_c0 = cheri_setbounds(cheri_setoffset(cheri_getdefault(), init_base), length); capability pcc = cheri_setbounds(cheri_setoffset(cheri_getpcc(), init_base), length); KERNEL_TRACE("act", "assuming init has virtual entry point %lx", init_entry); frame.cf_c12 = frame.cf_pcc = cheri_setoffset(pcc, init_entry); /* provide config info to init. c3 is the conventional register */ frame.cf_c3 = info; act_t * init_act = &kernel_acts[namespace_num_boot]; act_register_create(&frame, &init_queue.queue, "init", status_alive, NULL); /* The boot activation should be the current activation */ sched_schedule(init_act); return init_act->context; }
/* * Allocate a new cheri_fd object for an already-open file descriptor. */ int cheri_fd_new(int fd, struct cheri_object *cop) { __capability void *codecap, *datacap; struct cheri_fd *cfp; cfp = calloc(1, sizeof(*cfp)); if (cfp == NULL) { errno = ENOMEM; return (-1); } CHERI_SYSTEM_OBJECT_INIT(cfp, cheri_fd_vtable); cfp->cf_fd = fd; /* * Construct a sealed code capability for the class. This is just the * ambient $pcc with the offset set to the entry address. * * XXXRW: For now, when invoked, we install $pcc into $c0, so this * needs a full set of permissions rather than just LOAD/EXECUTE. In * the future, we will want to preserve a copy of cheri_getdefault() * in the struct cheri_fd to be reinstalled by the entry code. * * XXXRW: In the future, use cheri_codeptr() here? */ codecap = cheri_setoffset(cheri_getpcc(), (register_t)CHERI_CLASS_ENTRY(cheri_fd)); cop->co_codecap = cheri_seal(codecap, cheri_fd_type); /* * Construct a sealed data capability for the class. This describes * the 'struct cheri_fd' for the specific file descriptor. The $c0 * to reinstall later is the first field in the structure. * * XXXRW: Should we also do an explicit cheri_setoffset()? */ datacap = cheri_ptrperm(cfp, sizeof(*cfp), CHERI_PERM_GLOBAL | CHERI_PERM_LOAD | CHERI_PERM_LOAD_CAP | CHERI_PERM_STORE | CHERI_PERM_STORE_CAP); cop->co_datacap = cheri_seal(datacap, cheri_fd_type); return (0); }
void bootloader_main(void) { /* Init hardware */ hw_init(); /* Initialize elf-loader environment */ init_elf_loader(); /* Load the nano kernel. Doing this will install exception vectors */ boot_printf("Boot: loading nano kernel ...\n"); nano_init_t * nano_init = (nano_init_t *)load_nano(); //We have to rederive this as an executable cap nano_init = (nano_init_t*)cheri_setoffset(cheri_getpcc(),cheri_getoffset(nano_init)); /* TODO: we could have some boot exception vectors if we want exception handling in boot. */ /* These should be in ROM as a part of the boot image (i.e. make a couple more dedicated sections */ cp0_status_bev_set(0); boot_printf("Boot: loading kernel ...\n"); size_t entry = load_kernel(); boot_printf("Boot: loading init ...\n"); boot_info_t *bi = load_init(); size_t invalid_length = bi->init_end; capability phy_start = cheri_setbounds(cheri_setoffset(cheri_getdefault(), MIPS_KSEG0), invalid_length); /* Do we actually need this? */ //boot_printf("Invalidating %p length %lx:\n", phy_start, invalid_length); //caches_invalidate(phy_start, invalid_length); register_t mem_size = bi->init_end - bi->nano_end; /* Jumps to the nano kernel init. This will completely destroy boot and so we can never return here. * All registers will be cleared apart from a specified few. mem_size of memory will be left unmanaged and the * rest will be returned as a reservation. The third argument is an extra argument to the kernel */ boot_printf("Jumping to nano kernel...\n"); BOOT_PRINT_CAP(nano_init); nano_init(mem_size, entry, bi->init_begin - bi->kernel_begin, bi->init_entry); }
/* * Unwind the trusted stack by the specified number of frames (or all). */ int cheri_stack_unwind(ucontext_t *uap, register_t ret, u_int op, u_int num_frames) { struct cheri_frame *cfp; struct cheri_stack cs; struct cheri_stack_frame *csfp; u_int stack_size, stack_frames; register_t saved_mcreg0; if (op != CHERI_STACK_UNWIND_OP_N && op != CHERI_STACK_UNWIND_OP_ALL) { errno = EINVAL; return (-1); } /* * Request to unwind zero frames is a no-op: no state transformation * is needed. */ if ((op == CHERI_STACK_UNWIND_OP_N) && (num_frames == 0)) return (0); /* * Retrieve trusted stack and validate before attempting to unwind. */ if (sysarch(CHERI_GET_STACK, &cs) != 0) return (-1); if ((cs.cs_tsize % CHERI_FRAME_SIZE) != 0 || (cs.cs_tsp > cs.cs_tsize) || (cs.cs_tsp % CHERI_FRAME_SIZE) != 0) { errno = ERANGE; return (-1); } /* * See if there is room on the stack for that much unwinding. */ stack_size = cs.cs_tsize / CHERI_FRAME_SIZE; stack_frames = (cs.cs_tsize - cs.cs_tsp) / CHERI_FRAME_SIZE; if (op == CHERI_STACK_UNWIND_OP_ALL) num_frames = stack_frames; if ((num_frames < 0) || (stack_frames < num_frames)) { errno = ERANGE; return (-1); } /* * Restore state from the last frame being unwound. */ csfp = &cs.cs_frames[stack_size - (stack_frames - num_frames) - 1]; #if 0 /* Make sure we will be returning to ambient authority. */ if (cheri_getbase(csfp->csf_pcc) != cheri_getbase(cheri_getpcc()) || cheri_getlen(csfp->csf_pcc) != cheri_getlen(cheri_getpcc())) return (-1); #endif /* * Pop stack desired number of frames. */ cs.cs_tsp += num_frames * CHERI_FRAME_SIZE; assert(cs.cs_tsp <= cs.cs_tsize); #ifdef __CHERI_PURE_CAPABILITY__ cfp = &uap->uc_mcontext.mc_cheriframe; #else cfp = (struct cheri_frame *)uap->uc_mcontext.mc_cp2state; if (cfp == NULL || uap->uc_mcontext.mc_cp2state_len != sizeof(*cfp)) { errno = ERANGE; return (-1); } #endif /* * Zero the capability register file, explicitly restoring $pcc and * $idc from the last trusted-stack frame. */ memset(cfp, 0, sizeof(*cfp)); cfp->cf_idc = csfp->csf_idc; cfp->cf_pcc = csfp->csf_pcc; /* * Zero the general-purpose register file. restore not only $pc, but * also the slot for $zero, which will hold a magic number across * sigcode and sigreturn(). Also set a return value. * * XXXRW: The kernel unwinder sets V1 to the signal number? */ saved_mcreg0 = uap->uc_mcontext.mc_regs[0]; memset(uap->uc_mcontext.mc_regs, 0, sizeof(uap->uc_mcontext.mc_regs)); uap->uc_mcontext.mc_regs[0] = saved_mcreg0; uap->uc_mcontext.mc_pc = cheri_getoffset(cfp->cf_pcc); uap->uc_mcontext.mc_regs[V0] = ret; /* * Update kernel view of trusted stack. */ if (sysarch(CHERI_SET_STACK, &cs) != 0) return (-1); return (0); }