Esempio n. 1
0
context_t act_init(context_t own_context, init_info_t* info, size_t init_base, size_t init_entry) {
	KERNEL_TRACE("init", "activation init");

	internel_if.message_send = kernel_seal(act_send_message_get_trampoline(), act_ref_type);
	internel_if.message_reply = kernel_seal(act_send_return_get_trampoline(), act_sync_ref_type);
	setup_syscall_interface(&internel_if);

	kernel_next_act = 0;

	// This is a dummy. Our first context has already been created
	reg_frame_t frame;
	bzero(&frame, sizeof(struct reg_frame));

	// Register the kernel (exception) activation
	act_t * kernel_act = &kernel_acts[0];
	act_register(&frame, &kernel_queue.queue, "kernel", status_terminated, NULL, cheri_getbase(cheri_getpcc()));
	/* The kernel context already exists and we set it here */
	kernel_act->context = own_context;

	// Create and register the init activation
	KERNEL_TRACE("act", "Retroactively creating init activation");

	/* Not a dummy here. We will subset our own c0/pcc for init. init is loaded directly after the kernel */
	bzero(&frame, sizeof(struct reg_frame));
	size_t length = cheri_getlen(cheri_getdefault()) - init_base;

    frame.cf_c0 = cheri_setbounds(cheri_setoffset(cheri_getdefault(), init_base), length);
    capability pcc =  cheri_setbounds(cheri_setoffset(cheri_getpcc(), init_base), length);

    KERNEL_TRACE("act", "assuming init has virtual entry point %lx", init_entry);
	frame.cf_c12 = frame.cf_pcc = cheri_setoffset(pcc, init_entry);

	/* provide config info to init.  c3 is the conventional register */
	frame.cf_c3 = info;

	act_t * init_act = &kernel_acts[namespace_num_boot];
	act_register_create(&frame, &init_queue.queue, "init", status_alive, NULL);

	/* The boot activation should be the current activation */
	sched_schedule(init_act);

	return init_act->context;
}
Esempio n. 2
0
void bootloader_main(void) {

	/* Init hardware */
	hw_init();

	/* Initialize elf-loader environment */
	init_elf_loader();

    /* Load the nano kernel. Doing this will install exception vectors */
    boot_printf("Boot: loading nano kernel ...\n");
	nano_init_t * nano_init = (nano_init_t *)load_nano(); //We have to rederive this as an executable cap
    nano_init = (nano_init_t*)cheri_setoffset(cheri_getpcc(),cheri_getoffset(nano_init));

    /* TODO: we could have some boot exception vectors if we want exception  handling in boot. */
    /* These should be in ROM as a part of the boot image (i.e. make a couple more dedicated sections */
    cp0_status_bev_set(0);

    boot_printf("Boot: loading kernel ...\n");
    size_t entry = load_kernel();

    boot_printf("Boot: loading init ...\n");
    boot_info_t *bi = load_init();

    size_t invalid_length = bi->init_end;
    capability phy_start = cheri_setbounds(cheri_setoffset(cheri_getdefault(), MIPS_KSEG0), invalid_length);

    /* Do we actually need this? */
    //boot_printf("Invalidating %p length %lx:\n", phy_start, invalid_length);
    //caches_invalidate(phy_start, invalid_length);


    register_t mem_size = bi->init_end - bi->nano_end;

    /* Jumps to the nano kernel init. This will completely destroy boot and so we can never return here.
     * All registers will be cleared apart from a specified few. mem_size of memory will be left unmanaged and the
     * rest will be returned as a reservation. The third argument is an extra argument to the kernel */

    boot_printf("Jumping to nano kernel...\n");
    BOOT_PRINT_CAP(nano_init);
    nano_init(mem_size, entry, bi->init_begin - bi->kernel_begin, bi->init_entry);
}
Esempio n. 3
0
/*
 * Allocate more memory to the indicated bucket.
 */
static void
morecore(int bucket)
{
	char *buf;
	union overhead *op;
	size_t sz;			/* size of desired block */
	int amt;			/* amount to allocate */
	int nblks;			/* how many blocks we get */

	/*
	 * sbrk_size <= 0 only for big, FLUFFY, requests (about
	 * 2^30 bytes on a VAX, I think) or for a negative arg.
	 */
	sz = 1 << (bucket + 3);
#ifdef MALLOC_DEBUG
	ASSERT(sz > 0);
#else
	if (sz <= 0)
		return;
#endif
	if (sz < pagesz) {
		amt = pagesz;
		nblks = amt / sz;
	} else {
		amt = sz + pagesz;
		nblks = 1;
	}
	if (amt > pagepool_end - pagepool_start)
		if (__morepages(amt/pagesz) == 0)
			return;

	/*
	 * XXXRW: For now, depend on a global $c0 -- but shouldn't need to as
	 * we could be deriving from heap.
	 */
	buf = cheri_setoffset(cheri_getdefault(), pagepool_start);
	buf = cheri_setbounds(buf, amt);
	pagepool_start += amt;

	/*
	 * Add new memory allocated to that on
	 * free list for this hash bucket.
	 */
	nextf[bucket] = op = cheri_setbounds(buf, sz);
	while (--nblks > 0) {
		op->ov_next = (union overhead *)cheri_setbounds(buf + sz, sz);
		buf += sz;
		op = op->ov_next;
	}
}
Esempio n. 4
0
/*
 * Allocate a new cheri_fd object for an already-open file descriptor.
 */
int
cheri_fd_new(int fd, struct cheri_object *cop)
{
	__capability void *codecap, *datacap;
	struct cheri_fd *cfp;

	cfp = calloc(1, sizeof(*cfp));
	if (cfp == NULL) {
		errno = ENOMEM;
		return (-1);
	}
	CHERI_SYSTEM_OBJECT_INIT(cfp, cheri_fd_vtable);
	cfp->cf_fd = fd;

	/*
	 * Construct a sealed code capability for the class.  This is just the
	 * ambient $pcc with the offset set to the entry address.
	 *
	 * XXXRW: For now, when invoked, we install $pcc into $c0, so this
	 * needs a full set of permissions rather than just LOAD/EXECUTE. In
	 * the future, we will want to preserve a copy of cheri_getdefault()
	 * in the struct cheri_fd to be reinstalled by the entry code.
	 *
	 * XXXRW: In the future, use cheri_codeptr() here?
	 */
	codecap = cheri_setoffset(cheri_getpcc(),
	    (register_t)CHERI_CLASS_ENTRY(cheri_fd));
	cop->co_codecap = cheri_seal(codecap, cheri_fd_type);

	/*
	 * Construct a sealed data capability for the class.  This describes
	 * the 'struct cheri_fd' for the specific file descriptor.  The $c0
	 * to reinstall later is the first field in the structure.
	 *
	 * XXXRW: Should we also do an explicit cheri_setoffset()?
	 */
	datacap = cheri_ptrperm(cfp, sizeof(*cfp), CHERI_PERM_GLOBAL |
	    CHERI_PERM_LOAD | CHERI_PERM_LOAD_CAP | CHERI_PERM_STORE |
	    CHERI_PERM_STORE_CAP);
	cop->co_datacap = cheri_seal(datacap, cheri_fd_type);
	return (0);
}