Exemple #1
0
int main(int argc, char **argv)
{
    struct capref theram;

    // Get an 8K RAM cap
    errval_t err = ram_alloc(&theram, 13);
    assert(err_is_ok(err));

    for(int i = 0; i < 100; i++) {
        thread_yield();
    }

    struct capability cap;
    err = debug_cap_identify(theram, &cap);
    assert(err_is_ok(err));
    assert(cap.type == ObjType_RAM);
    printf("got RAM at 0x%" PRIxGENPADDR ", size %u\n",
           cap.u.ram.base, cap.u.ram.bits);

    struct capref leftcap, rightcap;

    // XXX: Hopefully allocates two consecutive slots
    err = slot_alloc(&leftcap);
    assert(err_is_ok(err));
    err = slot_alloc(&rightcap);
    assert(err_is_ok(err));

    // Split in half
    err = cap_retype(leftcap, theram, ObjType_RAM, 12);
    assert(err_is_ok(err));

    err = debug_cap_identify(leftcap, &cap);
    assert(err_is_ok(err));
    assert(cap.type == ObjType_RAM);
    printf("left half at 0x%" PRIxGENPADDR ", size %u\n",
           cap.u.ram.base, cap.u.ram.bits);

    err = debug_cap_identify(rightcap, &cap);
    assert(err_is_ok(err));
    assert(cap.type == ObjType_RAM);
    printf("right half at 0x%" PRIxGENPADDR ", size %u\n",
           cap.u.ram.base, cap.u.ram.bits);

    // Delete the parent (8K range)
    printf("deleting parent\n");
    err = cap_delete(theram);
    assert(err_is_ok(err));

    // Delete the left half (4K)
    printf("deleting left half\n");
    err = cap_delete(leftcap);
    assert(err_is_ok(err));

    // Delete the right half (4K)
    printf("deleting right half\n");
    err = cap_delete(rightcap);
    assert(err_is_ok(err));

    return 0;
}
Exemple #2
0
void
mem_slot_test() {
  as_mem_slot_t *s = slot_new(4);
  slot_alloc(s, 4);
  int a = slot_alloc(s, 5);
  slot_alloc(s, 3);
  slot_free(s, a, 5);
  slot_alloc(s, 6);

  slot_print(s);
  slot_destroy(s);
}
/**
 * \brief Page fault handler
 *
 * \param memobj  The memory object
 * \param region  The associated vregion
 * \param offset  Offset into memory object of the page fault
 * \param type    The fault type
 */
static errval_t pagefault(struct memobj *memobj, struct vregion *vregion,
                                 genvaddr_t offset, vm_fault_type_t type)
{
    errval_t err;
    struct memobj_one_frame_lazy *lazy = (struct memobj_one_frame_lazy*)memobj;

    struct vspace *vspace = vregion_get_vspace(vregion);
    struct pmap *pmap     = vspace_get_pmap(vspace);
    genvaddr_t vregion_base  = vregion_get_base_addr(vregion);
    genvaddr_t vregion_off   = vregion_get_offset(vregion);
    vregion_flags_t flags = vregion_get_flags(vregion);

    // XXX: ugly --> need to revoke lazy->frame in order to clean up
    //               all the copies that are created here
    struct capref frame_copy;
    err = slot_alloc(&frame_copy);
    if (err_is_fail(err)) {
        return err;
    }
    err = cap_copy(frame_copy, lazy->frame);
    if (err_is_fail(err)) {
        return err;
    }

    err = pmap->f.map(pmap, vregion_base + vregion_off + offset, frame_copy,
                      offset, lazy->chunk_size, flags, NULL, NULL);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_PMAP_MAP);
    }

    return SYS_ERR_OK;
}
Exemple #4
0
/**
 * \brief Initialise a new LMP channel to accept an incoming binding request
 *
 * \param lc  Storage for channel state
 * \param buflen_words Size of incoming buffer, in words
 * \param endpoint Capability to remote LMP endpoint
 */
errval_t lmp_chan_accept(struct lmp_chan *lc,
                         size_t buflen_words, struct capref endpoint)
{
    errval_t err;

    lmp_chan_init(lc);
    lc->remote_cap = endpoint;

    /* allocate a cap slot for the new endpoint cap */
    err = slot_alloc(&lc->local_cap);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SLOT_ALLOC);
    }

    /* allocate a local endpoint */
    err = lmp_endpoint_create_in_slot(buflen_words, lc->local_cap,
                                      &lc->endpoint);
    if (err_is_fail(err)) {
        slot_free(lc->local_cap);
        return err_push(err, LIB_ERR_ENDPOINT_CREATE);
    }

    /* mark connected */
    lc->connstate = LMP_CONNECTED;
    return SYS_ERR_OK;
}
Exemple #5
0
void
update_owner__rx_handler(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st)
{
    errval_t err;
    struct intermon_state *inter_st = (struct intermon_state*)b->st;
    coreid_t from = inter_st->core_id;
    struct capref capref;
    struct capability cap;
    caprep_to_capability(&caprep, &cap);

    err = slot_alloc(&capref);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "failed to allocate slot for owner update");
    }

    err = monitor_copy_if_exists(&cap, capref);
    if (err_is_ok(err)) {
        err = monitor_set_cap_owner(cap_root, get_cap_addr(capref),
                                    get_cap_valid_bits(capref), from);
    }
    if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) {
        err = SYS_ERR_OK;
    }

    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "failed to update cap ownership");
    }

    cap_destroy(capref);

    err = owner_updated(from, st);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "failed to send ownership update response");
    }
}
Exemple #6
0
/**
 * \brief Allocates a new VNode, adding it to the page table and our metadata
 */
errval_t alloc_vnode(struct pmap_x86 *pmap, struct vnode *root,
                     enum objtype type, uint32_t entry,
                     struct vnode **retvnode)
{
    errval_t err;

    struct vnode *newvnode = slab_alloc(&pmap->slab);
    if (newvnode == NULL) {
        return LIB_ERR_SLAB_ALLOC_FAIL;
    }

    // The VNode capability
    err = pmap->p.slot_alloc->alloc(pmap->p.slot_alloc, &newvnode->u.vnode.cap);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SLOT_ALLOC);
    }

    err = vnode_create(newvnode->u.vnode.cap, type);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VNODE_CREATE);
    }

    // XXX: need to make sure that vnode cap that we will invoke is in our cspace!
    if (get_croot_addr(newvnode->u.vnode.cap) != CPTR_ROOTCN) {
        // debug_printf("%s: creating vnode for another domain in that domain's cspace; need to copy vnode cap to our cspace to make it invokable\n", __FUNCTION__);
        err = slot_alloc(&newvnode->u.vnode.invokable);
        assert(err_is_ok(err));
        err = cap_copy(newvnode->u.vnode.invokable, newvnode->u.vnode.cap);
        assert(err_is_ok(err));
    } else {
        // debug_printf("vnode in our cspace: copying capref to invokable\n");
        newvnode->u.vnode.invokable = newvnode->u.vnode.cap;
    }
    assert(!capref_is_null(newvnode->u.vnode.cap));
    assert(!capref_is_null(newvnode->u.vnode.invokable));

    err = pmap->p.slot_alloc->alloc(pmap->p.slot_alloc, &newvnode->mapping);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SLOT_ALLOC);
    }

    // Map it
    err = vnode_map(root->u.vnode.invokable, newvnode->u.vnode.cap, entry,
                    PTABLE_ACCESS_DEFAULT, 0, 1, newvnode->mapping);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VNODE_MAP);
    }

    // The VNode meta data
    newvnode->is_vnode  = true;
    newvnode->entry     = entry;
    newvnode->next      = root->u.vnode.children;
    root->u.vnode.children = newvnode;
    newvnode->u.vnode.children = NULL;

    *retvnode = newvnode;
    return SYS_ERR_OK;
}
Exemple #7
0
/**
 * \brief Allocate a new receive capability slot for an LMP channel
 *
 * This utility function allocates a new receive slot (using #slot_alloc)
 * and sets it on the channel (using #lmp_chan_set_recv_slot).
 *
 * \param lc LMP channel
 */
errval_t lmp_chan_alloc_recv_slot(struct lmp_chan *lc)
{
    struct capref slot;

    errval_t err = slot_alloc(&slot);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SLOT_ALLOC);
    }

    lmp_chan_set_recv_slot(lc, slot);
    return SYS_ERR_OK;
}
Exemple #8
0
/**
 * \brief Allocate some memory for malloc to use
 *
 * This function will keep trying with smaller and smaller frames till
 * it finds a set of frames that satisfy the requirement. retbytes can
 * be smaller than bytes if we were able to allocate a smaller memory
 * region than requested for.
 */
static void *morecore_alloc(size_t bytes, size_t *retbytes)
{
    errval_t err;
    struct morecore_state *state = get_morecore_state();

    void *buf = NULL;
    size_t mapped = 0;
    size_t step = bytes;
    while (mapped < bytes) {
        struct capref cap;
        err = slot_alloc(&cap);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "slot_alloc failed");
        }

        void *mid_buf = NULL;
        err = vspace_mmu_aware_map(&state->mmu_state, cap, step,
                                   &mid_buf, &step);
        if (err_is_ok(err)) {
            if (buf == NULL) {
                buf = mid_buf;
            }
            mapped += step;
        } else {
            /*
              vspace_mmu_aware_map failed probably because we asked
              for a very large frame, will try asking for smaller one.
             */
            if (err_no(err) == LIB_ERR_FRAME_CREATE_MS_CONSTRAINTS) {
                err = slot_free(cap);
                if (err_is_fail(err)) {
                    debug_err(__FILE__, __func__, __LINE__, err,
                              "slot_free failed");
                    return NULL;
                }
                if (step < BASE_PAGE_SIZE) {
                    // Return whatever we have allocated until now
                    break;
                }
                step /= 2;
                continue;
            } else {
                debug_err(__FILE__, __func__, __LINE__, err,
                          "vspace_mmu_aware_map fail");
                return NULL;
            }
        }
    }

    *retbytes = mapped;
    return buf;
}
Exemple #9
0
static errval_t reclaim_memory(genpaddr_t base, uint8_t bits)
{
    /* XXX: mem client is only defined for the bsp core.
     * For app cores, just return */
    if (get_mem_client() == NULL) {
        return SYS_ERR_OK;
    }

    // Fabricate new RAM cap and hand back to mem_serv
    struct capability c = {
        .type = ObjType_RAM,
        .u.ram = {
            .base = base,
            .bits = bits,
        }
    };
    struct capref ramcap;
    errval_t err = slot_alloc(&ramcap);
    if(err_is_fail(err)) {
        return err;
    }

    err = monitor_cap_create(ramcap, &c, my_core_id);
    if(err_is_fail(err)) {
        return err;
    }

    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
    errval_t result;
    thread_mutex_lock(&ram_alloc_state->ram_alloc_lock);
    struct mem_rpc_client *b = get_mem_client();
    // XXX: This should not be an RPC! It could stall the monitor, but
    // we trust mem_serv for the moment.
    err = b->vtbl.free_monitor(b, ramcap, base, bits, &result);
    thread_mutex_unlock(&ram_alloc_state->ram_alloc_lock);
    if(err_is_fail(err)) {
        return err;
    }
    if(err_is_fail(result)) {
        return result;
    }

    // XXX: this shouldn't be necessary as free_monitor uses give_away_cap
    err = cap_destroy(ramcap);
    if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) {
        err = SYS_ERR_OK;
    }
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "destroying reclaimed cap");
    }
    return err;
}
static void create_caps(void) 
{
    errval_t err;

    // allocate a bunch of ramcaps
    for (int i=0; i<CAPS_PER_CORE; i++) {
        err = ram_alloc(&my_caps[i], CHILD_BITS);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "xcorecap: RAM alloc failed\n");   
            abort(); 
        }
        err = slot_alloc(&retyped_caps[i]);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "slot alloc err\n");   
            abort(); 
        }
    }
    printf("core %i caps created\n", my_coreid);
}
Exemple #11
0
/**
 * \brief Allocates a new VNode, adding it to the page table and our metadata
 */
static errval_t alloc_vnode(struct pmap_arm *pmap_arm, struct vnode *root,
                            enum objtype type, uint32_t entry,
                            struct vnode **retvnode)
{
    assert(root->is_vnode);
    errval_t err;

    struct vnode *newvnode = slab_alloc(&pmap_arm->slab);
    if (newvnode == NULL) {
        return LIB_ERR_SLAB_ALLOC_FAIL;
    }
    newvnode->is_vnode = true;

    // The VNode capability
    err = slot_alloc(&newvnode->u.vnode.cap);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SLOT_ALLOC);
    }

    err = vnode_create(newvnode->u.vnode.cap, type);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VNODE_CREATE);
    }

    err = vnode_map(root->u.vnode.cap, newvnode->u.vnode.cap, entry,
                    KPI_PAGING_FLAGS_READ | KPI_PAGING_FLAGS_WRITE, 0, 1);

    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VNODE_MAP);
    }

    // The VNode meta data
    newvnode->entry            = entry;
    newvnode->next             = root->u.vnode.children;
    root->u.vnode.children     = newvnode;
    newvnode->u.vnode.children = NULL;

    if (retvnode) {
        *retvnode = newvnode;
    }
    return SYS_ERR_OK;
}
Exemple #12
0
struct tag_item *
tag_pool_get_item(enum tag_type type, const char *value, size_t length)
{
	struct slot **slot_p, *slot;

	slot_p = &slots[calc_hash_n(type, value, length) % NUM_SLOTS];
	for (slot = *slot_p; slot != NULL; slot = slot->next) {
		if (slot->item.type == type &&
		    length == strlen(slot->item.value) &&
		    memcmp(value, slot->item.value, length) == 0 &&
		    slot->ref < 0xff) {
			assert(slot->ref > 0);
			++slot->ref;
			return &slot->item;
		}
	}

	slot = slot_alloc(*slot_p, type, value, length);
	*slot_p = slot;
	return &slot->item;
}
Exemple #13
0
static void retype_cap(struct xcorecap_binding *b)
{
    errval_t err;
    struct capref vnode_cap;
    
    // retype the ram vnode (should fail if retyped on another core)
    err = slot_alloc(&vnode_cap);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "xcorecapserv: Vnode slot alloc failed\n"); 
    }
    err = cap_retype(vnode_cap, sent_cap, ObjType_VNode_x86_64_ptable,
                     ALLOC_BITS);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "xcorecapserv: Retype to vnode failed\n");    
    } 

    printf("xcorecapserv retyped cap\n");
    fflush(stdout);

    b->tx_vtbl.send_done(b, NOP_CONT);
}
Exemple #14
0
struct tag_item *tag_pool_dup_item(struct tag_item *item)
{
	struct slot *slot = tag_item_to_slot(item);

	assert(slot->ref > 0);

	if (slot->ref < 0xff) {
		++slot->ref;
		return item;
	} else {
		/* the reference counter overflows above 0xff;
		   duplicate the item, and start with 1 */
		size_t length = strlen(item->value);
		struct slot **slot_p =
			&slots[calc_hash_n(item->type, item->value,
					   length) % NUM_SLOTS];
		slot = slot_alloc(*slot_p, item->type,
				  item->value, strlen(item->value));
		*slot_p = slot;
		return &slot->item;
	}
}
Exemple #15
0
void
find_cap__rx_handler(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st)
{
    errval_t err, cleanup_err;
    struct intermon_state *inter_st = (struct intermon_state*)b->st;
    coreid_t from = inter_st->core_id;
    struct capability cap;
    caprep_to_capability(&caprep, &cap);
    struct capref capref;

    err = slot_alloc(&capref);
    if (err_is_fail(err)) {
        goto send_err;
    }

    err = monitor_copy_if_exists(&cap, capref);
    if (err_is_fail(err)) {
        goto free_slot;
    }

    cleanup_err = cap_delete(capref);
    if (err_is_fail(cleanup_err)) {
        USER_PANIC_ERR(err, "failed to delete temporary cap");
    }

free_slot:
    cleanup_err = slot_free(capref);
    if (err_is_fail(cleanup_err)) {
        USER_PANIC_ERR(err, "failed to free slot for temporary cap");
    }

send_err:
    cleanup_err = find_cap_result(from, err, st);
    if (err_is_fail(cleanup_err)) {
        USER_PANIC_ERR(err, "failed to send find_cap result");
    }
}
Exemple #16
0
/**
 * \brief Initialise a new LMP channel and initiate a binding
 *
 * \param lc  Storage for channel state
 * \param cont Continuation for bind completion/failure
 * \param qnode Storage for an event queue node (used for queuing bind request)
 * \param iref IREF to which to bind
 * \param buflen_words Size of incoming buffer, in number of words
 */
errval_t lmp_chan_bind(struct lmp_chan *lc, struct lmp_bind_continuation cont,
                       struct event_queue_node *qnode, iref_t iref,
                       size_t buflen_words)
{
    errval_t err;

    lmp_chan_init(lc);

    /* store bind arguments */
    lc->iref = iref;
    lc->buflen_words = buflen_words;
    lc->bind_continuation = cont;

    /* allocate a cap slot for the new endpoint cap */
    err = slot_alloc(&lc->local_cap);
    if (err_is_fail(err)) {
        waitset_chanstate_destroy(&lc->send_waitset);
        return err_push(err, LIB_ERR_SLOT_ALLOC);
    }

    /* allocate a local endpoint */
    err = lmp_endpoint_create_in_slot(buflen_words, lc->local_cap,
                                      &lc->endpoint);
    if (err_is_fail(err)) {
        slot_free(lc->local_cap);
        waitset_chanstate_destroy(&lc->send_waitset);
        return err_push(err, LIB_ERR_ENDPOINT_CREATE);
    }

    // wait for the ability to use the monitor binding
    lc->connstate = LMP_BIND_WAIT;
    struct monitor_binding *mb = lc->monitor_binding = get_monitor_binding();
    event_mutex_enqueue_lock(&mb->mutex, qnode,
                             MKCLOSURE(send_bind_cont, lc));

    return SYS_ERR_OK;
}
static errval_t init_allocators(void)
{
    errval_t err, msgerr;

    struct monitor_blocking_rpc_client *cl = get_monitor_blocking_rpc_client();
    assert(cl != NULL);

    // Get the bootinfo and map it in.
    struct capref bootinfo_frame;
    size_t bootinfo_size;
    struct bootinfo *bootinfo;

    msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size);
    if (err_is_fail(msgerr) || err_is_fail(err)) {
        USER_PANIC_ERR(err_is_fail(msgerr) ? msgerr : err, "failed in get_bootinfo");
    }

    err = vspace_map_one_frame((void**)&bootinfo, bootinfo_size, bootinfo_frame,
                               NULL, NULL);
    assert(err_is_ok(err));

    /* Initialize the memory allocator to handle PhysAddr caps */
    static struct range_slot_allocator devframes_allocator;
    err = range_slot_alloc_init(&devframes_allocator, PCI_CNODE_SLOTS, NULL);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_SLOT_ALLOC_INIT);
    }

    err = mm_init(&pci_mm_physaddr, ObjType_DevFrame, 0, 48,
                  /* This next parameter is important. It specifies the maximum
                   * amount that a cap may be "chunked" (i.e. broken up) at each
                   * level in the allocator. Setting it higher than 1 reduces the
                   * memory overhead of keeping all the intermediate caps around,
                   * but leads to problems if you chunk up a cap too small to be
                   * able to allocate a large subregion. This caused problems
                   * for me with a large framebuffer... -AB 20110810 */
                  1, /*was DEFAULT_CNODE_BITS,*/
                  slab_default_refill, slot_alloc_dynamic, &devframes_allocator, false);
    if (err_is_fail(err)) {
        return err_push(err, MM_ERR_MM_INIT);
    }

    // Request I/O Cap
    struct capref requested_caps;
    errval_t error_code;
    err = cl->vtbl.get_io_cap(cl, &requested_caps, &error_code);
    assert(err_is_ok(err) && err_is_ok(error_code));
    // Copy into correct slot
    struct capref caps_io = {
        .cnode = cnode_task,
        .slot  = TASKCN_SLOT_IO
    };
    err = cap_copy(caps_io, requested_caps);

    // XXX: The code below is confused about gen/l/paddrs.
    // Caps should be managed in genpaddr, while the bus mgmt must be in lpaddr.
    err = cl->vtbl.get_phyaddr_cap(cl, &requested_caps, &error_code);
    assert(err_is_ok(err) && err_is_ok(error_code));
    physical_caps = requested_caps;

    // Build the capref for the first physical address capability
    struct capref phys_cap;
    phys_cap.cnode = build_cnoderef(requested_caps, PHYSADDRCN_BITS);
    phys_cap.slot = 0;

    struct cnoderef devcnode;
    err = slot_alloc(&my_devframes_cnode);
    assert(err_is_ok(err));
    cslot_t slots;
    err = cnode_create(&my_devframes_cnode, &devcnode, 255, &slots);
    if (err_is_fail(err)) { USER_PANIC_ERR(err, "cnode create"); }
    struct capref devframe;
    devframe.cnode = devcnode;
    devframe.slot = 0;

    for (int i = 0; i < bootinfo->regions_length; i++) {
		struct mem_region *mrp = &bootinfo->regions[i];
		if (mrp->mr_type == RegionType_Module) {
			skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).",
						mrp->mr_base,
						0,
						mrp->mrmod_size,
						mrp->mr_type,
						mrp->mrmod_data);
		}
		else {
			skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).",
						mrp->mr_base,
						mrp->mr_bits,
						((size_t)1) << mrp->mr_bits,
						mrp->mr_type,
						mrp->mrmod_data);
		}

        if (mrp->mr_type == RegionType_PhyAddr ||
            mrp->mr_type == RegionType_PlatformData) {
            ACPI_DEBUG("Region %d: %"PRIxGENPADDR" - %"PRIxGENPADDR" %s\n",
                       i, mrp->mr_base,
                       mrp->mr_base + (((size_t)1)<<mrp->mr_bits),
                       mrp->mr_type == RegionType_PhyAddr ?
                       "physical address" : "platform data");

            err = cap_retype(devframe, phys_cap, ObjType_DevFrame, mrp->mr_bits);
            if (err_no(err) == SYS_ERR_REVOKE_FIRST) {
                printf("cannot retype region %d: need to revoke first; ignoring it\n", i);
            } else {
                assert(err_is_ok(err));

                err = mm_add(&pci_mm_physaddr, devframe,
                             mrp->mr_bits, mrp->mr_base);
                if (err_is_fail(err)) {
                    USER_PANIC_ERR(err, "adding region %d FAILED\n", i);
                }
            }

            phys_cap.slot++;
            devframe.slot++;
        }
    }

    return SYS_ERR_OK;
}
Exemple #18
0
int map_unmap(void)
{
    errval_t err;
    struct capref mem;

    DEBUG_MAP_UNMAP("ram_alloc\n");
    err = ram_alloc(&mem, BASE_PAGE_BITS);
    if (err_is_fail(err)) {
        printf("ram_alloc: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

    struct capref frame;
    DEBUG_MAP_UNMAP("retype\n");
    err = slot_alloc(&frame);
    if (err_is_fail(err)) {
        printf("slot_alloc: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }
    err = cap_retype(frame, mem, ObjType_Frame, BASE_PAGE_BITS);
    if (err_is_fail(err)) {
        printf("cap_retype: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

    DEBUG_MAP_UNMAP("delete ram cap\n");
    err = cap_destroy(mem);
    if (err_is_fail(err)) {
        printf("cap_delete(mem): %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

    struct frame_identity fi;
    err = invoke_frame_identify(frame, &fi);
    if (err_is_fail(err)) {
        printf("frame_identify: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }
    DEBUG_MAP_UNMAP("frame: base = 0x%"PRIxGENPADDR", bits = %d\n", fi.base, fi.bits);

#ifdef NKMTEST_DEBUG_MAP_UNMAP
    dump_pmap(get_current_pmap());
#endif

    struct vregion *vr;
    struct memobj *memobj;
    void *vaddr;
    DEBUG_MAP_UNMAP("map\n");
    err = vspace_map_one_frame(&vaddr, BASE_PAGE_SIZE, frame, &memobj, &vr);
    if (err_is_fail(err)) {
        printf("vspace_map_one_frame: %s (%"PRIuERRV")\n", err_getstring(err), err);
    }
    char *memory = vaddr;
    DEBUG_MAP_UNMAP("vaddr = %p\n", vaddr);

#ifdef NKMTEST_DEBUG_MAP_UNMAP
    dump_pmap(get_current_pmap());
#endif

    DEBUG_MAP_UNMAP("write 1\n");
    int i;
    for (i = 0; i < BASE_PAGE_SIZE; i++) {
        memory[i] = i % INT8_MAX;
    }
    DEBUG_MAP_UNMAP("verify 1\n");
    for (i = 0; i < BASE_PAGE_SIZE; i++) {
        assert(memory[i] == i % INT8_MAX);
    }

    DEBUG_MAP_UNMAP("delete frame cap\n");
    err = cap_destroy(frame);
    if (err_is_fail(err)) {
        printf("cap_delete(frame): %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }

#ifdef NKMTEST_DEBUG_MAP_UNMAP
    // no mapping should remain here
    dump_pmap(get_current_pmap());
    err = debug_dump_hw_ptables();
    if (err_is_fail(err)) {
        printf("kernel dump ptables: %s (%"PRIuERRV")\n", err_getstring(err), err);
        return 1;
    }
#endif

    printf("%s: done\n", __FUNCTION__);
    return 0;
}
Exemple #19
0
/**
 * \brief A monitor receives request to setup a connection
 * with another newly booted monitor from a third monitor
 */
static void bind_monitor_request_scc(struct intermon_binding *b,
                                     coreid_t core_id,
                                     intermon_caprep_t caprep,
                                     chanid_t chan_id,
                                     coreid_t from_core_id)
{
    struct intermon_ump_ipi_binding *umpb = NULL;
    errval_t err;

    /* Create the cap */
    struct capability cap_raw;
    caprep_to_capability(&caprep, &cap_raw);
    if (cap_raw.type != ObjType_Frame) {
        err = MON_ERR_WRONG_CAP_TYPE;
        goto error;
    }

    struct capref frame;
    err = slot_alloc(&frame);
    if (err_is_fail(err)) {
        goto error;
    }

    ram_set_affinity(cap_raw.u.frame.base, cap_raw.u.frame.base + ((genpaddr_t)1 << cap_raw.u.frame.bits));
    err = frame_alloc(&frame, ((genpaddr_t)1 << cap_raw.u.frame.bits), NULL);
    ram_set_affinity(0,0);

    /* err = monitor_cap_create(frame, &cap_raw, core_id); */
    if (err_is_fail(err)) {
        goto error;
    }

    struct frame_identity frameid = { .base = 0, .bits = 0 };
    err = invoke_frame_identify(frame, &frameid);
    assert(err == SYS_ERR_OK);

    printf("bind_monitor_request: URPC physical frame at 0x%llx\n", frameid.base);

    /* Setup the connection */
    void *buf;
    err = vspace_map_one_frame_attr(&buf, MON_URPC_SIZE, frame,
                                    VREGION_FLAGS_READ_WRITE_NOCACHE, NULL,
                                    NULL);
    if (err_is_fail(err)) {
        err = err_push(err, LIB_ERR_VSPACE_MAP);
        goto error;
    }

    // Create remote notify cap
    struct capref notify_cap;
    err = notification_create_cap(chan_id, core_id, &notify_cap);
    assert(err == SYS_ERR_OK);

    // Allocate my own notification caps
    struct capref ep, my_notify_cap;
    struct lmp_endpoint *iep;
    int chanid;
    err = endpoint_create(LMP_RECV_LENGTH, &ep, &iep);
    assert(err_is_ok(err));
    err = notification_allocate(ep, &chanid);
    assert(err == SYS_ERR_OK);
    err = notification_create_cap(chanid, my_core_id, &my_notify_cap);
    assert(err == SYS_ERR_OK);

    // setup our side of the binding
    umpb = malloc(sizeof(struct intermon_ump_ipi_binding));
    assert(umpb != NULL);

    err = intermon_ump_ipi_init(umpb, get_default_waitset(),
                                buf + MON_URPC_CHANNEL_LEN,
                                MON_URPC_CHANNEL_LEN,
                                buf, MON_URPC_CHANNEL_LEN, notify_cap,
                                my_notify_cap, ep, iep);
    assert(err_is_ok(err));

    // Identify UMP frame for tracing
    struct frame_identity umpid;
    err = invoke_frame_identify(frame, &umpid);
    assert(err_is_ok(err));
    umpb->ump_state.chan.recvid = (uintptr_t)umpid.base;
    umpb->ump_state.chan.sendid =
        (uintptr_t)(umpid.base + MON_URPC_CHANNEL_LEN);

    // connect it to our request handlers
    err = intermon_init(&umpb->b, core_id);
    assert(err_is_ok(err));

    /* Send reply */
reply:
    assert(umpb != NULL);
    bind_monitor_reply_scc_cont(&umpb->b, err, chanid);
    return;

error:
    assert(!"Argh");
    // FIXME: cleanup!
    goto reply;
}

/**
 * \brief The monitor that proxied the request for one monitor to
 * setup a connection with another monitor gets the reply
 */
static void bind_monitor_reply_scc(struct intermon_binding *binding,
                                   errval_t err, chanid_t chan_id,
                                   coreid_t core_id)
{
    struct intermon_ump_ipi_binding *b = (struct intermon_ump_ipi_binding *)binding;

    // Create notify cap to that core
    struct capref notify_cap;
    err = notification_create_cap(chan_id, core_id, &notify_cap);
    assert(err == SYS_ERR_OK);

    // And assign it to the binding
    err = ipi_notify_set(&b->ipi_notify, notify_cap);
    assert(err_is_ok(err));

    if (err_is_fail(err)) { // XXX
        DEBUG_ERR(err, "Got error in bind monitor reply");
    }
}

/******* stack-ripped bind_monitor_proxy_scc *******/

static void bind_monitor_request_scc_handler(struct intermon_binding *b,
        struct intermon_msg_queue_elem *e);

struct bind_monitor_request_scc_state {
    struct intermon_msg_queue_elem elem;
    struct intermon_bind_monitor_request_scc__args args;
};

static void bind_monitor_request_scc_cont(struct intermon_binding *dst_binding,
        coreid_t src_core_id,
        intermon_caprep_t caprep,
        chanid_t chan_id,
        coreid_t core_id)
{
    errval_t err;

    err = dst_binding->tx_vtbl.
          bind_monitor_request_scc(dst_binding, NOP_CONT, src_core_id,
                                   caprep, chan_id, core_id);
    if (err_is_fail(err)) {
        if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
            struct bind_monitor_request_scc_state *me =
                malloc(sizeof(struct bind_monitor_request_scc_state));
            assert(me != NULL);
            struct intermon_state *ist = dst_binding->st;
            assert(ist != NULL);
            me->args.core_id = src_core_id;
            me->args.cap = caprep;
            me->args.chan_id = chan_id;
            me->args.from_core_id = core_id;
            me->elem.cont = bind_monitor_request_scc_handler;

            err = intermon_enqueue_send(dst_binding, &ist->queue,
                                        get_default_waitset(), &me->elem.queue);
            assert(err_is_ok(err));
            return;
        }

        DEBUG_ERR(err, "forwarding bind request failed");
    }
}
Exemple #20
0
static errval_t do_map(struct pmap_arm *pmap, genvaddr_t vaddr,
                       struct capref frame, size_t offset, size_t size,
                       vregion_flags_t flags, size_t *retoff, size_t *retsize)
{
    errval_t err;

    size = ROUND_UP(size, BASE_PAGE_SIZE);
    size_t pte_count = DIVIDE_ROUND_UP(size, BASE_PAGE_SIZE);
    genvaddr_t vend = vaddr + size;

    if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
        // fast path
        err = do_single_map(pmap, vaddr, vend, frame, offset, pte_count, flags);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "[do_map] in fast path");
            return err_push(err, LIB_ERR_PMAP_DO_MAP);
        }
    } else { // multiple leaf page tables
        // first leaf
        uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
        genvaddr_t temp_end = vaddr + c * BASE_PAGE_SIZE;
        err = do_single_map(pmap, vaddr, temp_end, frame, offset, c, flags);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_PMAP_DO_MAP);
        }

        // map full leaves
        while (ARM_L1_OFFSET(temp_end) < ARM_L1_OFFSET(vend)) { // update vars
            vaddr = temp_end;
            temp_end = vaddr + ARM_L2_MAX_ENTRIES * BASE_PAGE_SIZE;
            offset += c * BASE_PAGE_SIZE;
            c = ARM_L2_MAX_ENTRIES;
            // copy cap
            struct capref next;
            err = slot_alloc(&next);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
            err = cap_copy(next, frame);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
            frame = next;

            // do mapping
            err = do_single_map(pmap, vaddr, temp_end, frame, offset, ARM_L2_MAX_ENTRIES, flags);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
        }

        // map remaining part
        offset += c * BASE_PAGE_SIZE;
        c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(temp_end);
        if (c) {
            // copy cap
            struct capref next;
            err = slot_alloc(&next);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
            err = cap_copy(next, frame);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }

            // do mapping
            err = do_single_map(pmap, temp_end, vend, next, offset, c, flags);
            if (err_is_fail(err)) {
                return err_push(err, LIB_ERR_PMAP_DO_MAP);
            }
        }
    }
    if (retoff) {
        *retoff = offset;
    }
    if (retsize) {
        *retsize = size;
    }
    //has_vnode_debug = false;
    return SYS_ERR_OK;
#if 0
    errval_t err;
    uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);

    for (size_t i = offset; i < offset + size; i += BASE_PAGE_SIZE) {

        vaddr += BASE_PAGE_SIZE;
    }

    if (retoff) {
        *retoff = offset;
    }
    if (retsize) {
        *retsize = size;
    }
    return SYS_ERR_OK;
#endif
}
Exemple #21
0
static errval_t cow_init(size_t bufsize, size_t granularity,
        struct cnoderef *cow_cn, size_t *frame_count)
{
    assert(cow_cn);
    assert(frame_count);

    errval_t err;
    struct capref frame, cncap;
    struct cnoderef cnode;

    // get RAM cap bufsize = (bufsize / granularity + 1) * granularity;
    err = slot_alloc(&frame);
    assert(err_is_ok(err));
    size_t rambits = log2floor(bufsize);
    debug_printf("bits = %zu\n", rambits);
    err = ram_alloc(&frame, rambits);
    assert(err_is_ok(err));
    // calculate #slots
    cslot_t cap_count = bufsize / granularity;
    cslot_t slots;
    // get CNode
    err = cnode_create(&cncap, &cnode, cap_count, &slots);
    assert(err_is_ok(err));
    assert(slots >= cap_count);
    // retype RAM into Frames
    struct capref first_frame = (struct capref) { .cnode = cnode, .slot = 0 };
    err = cap_retype(first_frame, frame, ObjType_Frame, log2floor(granularity));
    assert(err_is_ok(err));
    err = cap_destroy(frame);
    assert(err_is_ok(err));
    *frame_count = slots;
    *cow_cn = cnode;
    return SYS_ERR_OK;
}

// create cow-enabled vregion & backing
// Can copy-on-write in granularity-sized chunks
static errval_t vspace_map_one_frame_cow(void **buf, size_t size,
        struct capref frame, vregion_flags_t flags,
        struct memobj **memobj, struct vregion **vregion,
        size_t granularity)
{
    errval_t err;
    if (!memobj) {
        memobj = malloc(sizeof(*memobj));
    }
    assert(memobj);
    if (!vregion) {
        vregion = malloc(sizeof(*vregion));
    }
    assert(vregion);
    err = vspace_map_anon_attr(buf, memobj, vregion, size, &size, flags);
    assert(err_is_ok(err));
    size_t chunks = size / granularity;
    cslot_t slots;
    struct capref cncap;
    struct cnoderef cnode;
    err = cnode_create(&cncap, &cnode, chunks, &slots);
    assert(err_is_ok(err));
    assert(slots >= chunks);
    struct capref fc = (struct capref) { .cnode = cnode, .slot = 0 };
    for (int i = 0; i < chunks; i++) {
        err = cap_copy(fc, frame);
        assert(err_is_ok(err));
        err = (*memobj)->f.fill_foff(*memobj, i * granularity, fc, granularity, i*granularity);
        assert(err_is_ok(err));
        err = (*memobj)->f.pagefault(*memobj, *vregion, i * granularity, 0);
        assert(err_is_ok(err));
        fc.slot++;
    }
    return SYS_ERR_OK;
}

int main(int argc, char *argv[])
{
    errval_t err;
    struct capref frame;
    size_t retsize;
    void *vbuf;
    struct vregion *vregion;
    uint8_t *buf;

    debug_printf("%s:%d\n", __FUNCTION__, __LINE__);
    err = frame_alloc(&frame, BUFSIZE, &retsize);
    assert(retsize >= BUFSIZE);
    if (err_is_fail(err)) {
        debug_printf("frame_alloc: %s\n", err_getstring(err));
        return 1;
    }
    debug_printf("%s:%d: %zu\n", __FUNCTION__, __LINE__, retsize);
    // setup region
    err = vspace_map_one_frame_attr(&vbuf, retsize, frame,
            VREGION_FLAGS_READ_WRITE, NULL, &vregion);
    if (err_is_fail(err)) {
        debug_printf("vspace_map: %s\n", err_getstring(err));
        return 1;
    }
    debug_printf("vaddr: %p\n", vbuf);

    // write stuff to region
    buf = vbuf;
    debug_printf("%s:%d: %p, %lu pages\n", __FUNCTION__, __LINE__, buf, BUFSIZE / BASE_PAGE_SIZE);
    memset(buf, 0xAA, BUFSIZE);

    debug_printf("%s:%d\n", __FUNCTION__, __LINE__);
    // create cow copy
    //  setup exception handler
    thread_set_exception_handler(handler, NULL, ex_stack,
            ex_stack+EX_STACK_SIZE, NULL, NULL);
    assert(err_is_ok(err));
    debug_printf("%s:%d\n", __FUNCTION__, __LINE__);
    err = cow_init(BUFSIZE, BASE_PAGE_SIZE, &cow_frames, &cow_frame_count);
    assert(err_is_ok(err));
    //  create r/o copy of region and tell exception handler bounds
    debug_printf("%s:%d\n", __FUNCTION__, __LINE__);
    err = vspace_map_one_frame_cow(&cow_vbuf, retsize, frame,
            VREGION_FLAGS_READ, NULL, &cow_vregion, BASE_PAGE_SIZE);
    if (err_is_fail(err)) {
        debug_printf("vspace_map: %s\n", err_getstring(err));
        return 1;
    }
    debug_printf("cow_vaddr: %p\n", cow_vbuf);

    // do stuff cow copy
    uint8_t *cbuf = cow_vbuf;
    for (int i = 0; i < BUFSIZE / BASE_PAGE_SIZE; i+=2) {
        cbuf[i * BASE_PAGE_SIZE + 1] = 0x55;
    }
    // verify results
    for (int i = 0; i < BUFSIZE / BASE_PAGE_SIZE; i++) {
        printf("page %d\n", i);
        printf("buf[0] = %d; cbuf[0] = %d\n", buf[i*BASE_PAGE_SIZE],
                cbuf[i*BASE_PAGE_SIZE]);
        printf("buf[1] = %d; cbuf[1] = %d\n", buf[i*BASE_PAGE_SIZE+1],
                cbuf[i*BASE_PAGE_SIZE+1]);
    }
    debug_dump_hw_ptables();
    return EXIT_SUCCESS;
}
int invalid_mappings(void)
{
    // outline:
    // get pml4, pdpt, pdir, ptable, and frame
    // check all combinations to make sure that restrictions are implemented
    // correctly in kernel space
    // VALID:
    // map pdpt in pml4
    // map pdir in pdpt
    // map pt   in pdir
    // map frame in {pt, pdir, pdpt}
    // INVALID:
    // all other combinations

    errval_t err;
    struct capref caps[7];
    struct capref mapping;

    // allocate slot for mapping cap: can reuse`
    err = slot_alloc(&mapping);
    if (err_is_fail(err)) {
        debug_printf("slot_alloc: %s (%ld)\n", err_getstring(err), err);
        return 1;
    }

    // allocate caps
    for (int i = 0; i < 5; i++) {
        // get 4k block
        struct capref mem;
        err = ram_alloc(&mem, BASE_PAGE_BITS);
        if (err_is_fail(err)) {
            debug_printf("ram_alloc: %s (%ld)\n", err_getstring(err), err);
            return 1;
        }

        // get slot for retype dest
        err = slot_alloc(&caps[i]);
        if (err_is_fail(err)) {
            debug_printf("slot_alloc: %s (%ld)\n", err_getstring(err), err);
            return 1;
        }
        // retype to selected type
        err = cap_retype(caps[i], mem, types[i], BASE_PAGE_BITS);
        if (err_is_fail(err)) {
            debug_printf("cap_retype: %s (%ld)\n", err_getstring(err), err);
            return 1;
        }

        // cleanup source cap
        DEBUG_INVALID_MAPPINGS("delete ram cap\n");
        err = cap_destroy(mem);
        if (err_is_fail(err)) {
            debug_printf("cap_delete(mem): %s (%ld)\n", err_getstring(err), err);
            return 1;
        }
    }
    // cap 6: 2M frame
    size_t rb = 0;
    err = frame_alloc(&caps[5], X86_64_LARGE_PAGE_SIZE, &rb);
    if (err_is_fail(err) || rb != X86_64_LARGE_PAGE_SIZE) {
        debug_printf("frame_alloc: %s (%ld)\n", err_getstring(err), err);
        return 1;
    }
    // cap 7: 1G frame
    err = frame_alloc(&caps[6], X86_64_HUGE_PAGE_SIZE, &rb);
    if (err_is_fail(err) || rb != X86_64_HUGE_PAGE_SIZE) {
        debug_printf("frame_alloc: %s (%ld)\n", err_getstring(err), err);
        return 1;
    }

    paging_x86_64_flags_t attr = 0;
    // select dest (ignore frame, asserts)
    for (int i = 0; i < 4; i++) {
        // select source
        for (int j = 0; j < 7; j++) {
            if (j >= 4) {
                // frame
                attr = FRAME_ACCESS_DEFAULT;
            } else {
                // ptable
                attr = PTABLE_ACCESS_DEFAULT;
            }
            // try mapping
            err = vnode_map(caps[i], caps[j], /*slot*/0, attr, /*off*/0,
                            /*count*/1, mapping);
            check_result(err, i, j);
            // unmap if mapping succeeded
            if (err_is_ok(err)) {
                err = vnode_unmap(caps[i], mapping);
                if (err_is_fail(err)) {
                    DEBUG_ERR(err, "vnode_unmap");
                }
                assert(err_is_ok(err));
                // XXX: better API?
                err = cap_delete(mapping);
                assert(err_is_ok(err));
            }
        }
    }

    printf("All tests executed: %d PASSED, %d FAILED\n", pass, fail);

    return 0;
}
Exemple #23
0
void
retrieve_request__rx(struct intermon_binding *b,
                     intermon_caprep_t caprep,
                     genvaddr_t st)
{
    errval_t err, err2;
    struct intermon_state *inter_st = (struct intermon_state*)b->st;

    struct retrieve_response_st *rst;
    err = calloce(1, sizeof(*rst), &rst);
    PANIC_IF_ERR(err, "allocating retrieve respones state");
    rst->st = st;
    rst->from = inter_st->core_id;

    struct capability rawcap;
    caprep_to_capability(&caprep, &rawcap);

    struct capref cap;
    err = slot_alloc(&cap);
    GOTO_IF_ERR(err, respond_err);

    err = monitor_copy_if_exists(&rawcap, cap);
    GOTO_IF_ERR(err, free_slot);

    distcap_state_t state;
    err = dom_cnode_get_state(get_cap_domref(cap), &state);
    GOTO_IF_ERR(err, delete_cap);

    if (distcap_state_is_busy(state)) {
        err = MON_ERR_REMOTE_CAP_RETRY;
        goto delete_cap;
    }
    if (distcap_state_is_foreign(state)) {
        err = MON_ERR_CAP_FOREIGN;
        goto delete_cap;
    }

    uint8_t relations, remote_relations;
    err = monitor_cap_has_relations(cap, 0xFF, &relations);
    GOTO_IF_ERR(err, delete_cap);

    err = monitor_remote_relations(cap, 0, 0, &remote_relations);
    GOTO_IF_ERR(err, delete_cap);

    rst->relations = relations | remote_relations | RRELS_COPY_BIT;

    err = monitor_set_cap_owner(cap_root, get_cap_addr(cap),
                                get_cap_valid_bits(cap),
                                rst->from);

delete_cap:
    err2 = cap_delete(cap);
    DEBUG_IF_ERR(err2, "while deleting temp cap for retrieve");

free_slot:
    err2 = slot_free(cap);
    DEBUG_IF_ERR(err2, "freeing temp cap slot for retrieve");

respond_err:
    retrieve_result__enq(err, rst);
}
Exemple #24
0
static void intermon_bind_ump_request(struct intermon_binding *ib,
                                      iref_t iref, con_id_t your_mon_id,
                                      uint32_t channel_length_in,
                                      uint32_t channel_length_out,
                                      genpaddr_t framebase, uint8_t framebits,
                                      intermon_caprep_t caprep)
{
    errval_t err;

    /* Get client's core_id */
    struct intermon_state *ist = ib->st;
    assert(ist != NULL);
    coreid_t core_id = ist->core_id;

    /* Construct the frame capability */
    struct capability frame_cap = {
        .type = ObjType_Frame,
        .rights = CAPRIGHTS_READ_WRITE, // XXX
        .u.frame = {
            .base = framebase,
            .bits = framebits
        }
    };

    // Construct the notify cap
    struct capref notify_cap = NULL_CAP;
    struct capability capability;
    caprep_to_capability(&caprep, &capability);
    if(capability.type != ObjType_Null) {
        err = slot_alloc(&notify_cap);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "Failed to allocate slot from channel_alloc");
        }
        err = monitor_cap_create(notify_cap, &capability, core_id);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "monitor_cap_create failed");
        }
    }

    // XXX: Put frame cap on a separate allocator as it is not deleted anymore
    struct capref frame;
    err = slot_alloc(&frame);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "Failed to allocate slot from channel_alloc");
    }
    err = monitor_cap_create(frame, &frame_cap, core_id);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "monitor_cap_create failed");
    }

    /* Get the server's connection */
    struct monitor_binding *domain_binding = NULL;
    err = iref_get_binding(iref, &domain_binding);
    assert(err_is_ok(err));

    /* Get the service id */
    uintptr_t service_id = 0;
    err = iref_get_service_id(iref, &service_id);
    assert(err_is_ok(err));

    /* Create a new connection state */
    uintptr_t my_mon_id;
    struct remote_conn_state *con;
    err = remote_conn_alloc(&con, &my_mon_id, REMOTE_CONN_UMP);
    assert(err_is_ok(err));

    // Set the monitor portion of it
    con->mon_id = your_mon_id;
    con->mon_binding = ib;
    con->x.ump.frame = frame;
    con->core_id = core_id;

    bind_ump_service_request_cont(domain_binding, service_id, my_mon_id,
                                  frame, channel_length_in, channel_length_out,
                                  notify_cap, ib, your_mon_id);
}
Exemple #25
0
errval_t initialize_mem_serv(void)
{
    errval_t err;

    /* Step 1: Initialize slot allocator by passing a cnode cap for it to start with */
    struct capref cnode_cap;
    err = slot_alloc(&cnode_cap);
    assert(err_is_ok(err));
    struct capref cnode_start_cap = { .slot  = 0 };

    struct capref ram;
    err = ram_alloc_fixed(&ram, BASE_PAGE_BITS, 0, 0);
    assert(err_is_ok(err));
    err = cnode_create_from_mem(cnode_cap, ram, &cnode_start_cap.cnode,
                              DEFAULT_CNODE_BITS);
    assert(err_is_ok(err));

    /* location where slot allocator will place its top-level cnode */
    struct capref top_slot_cap = {
        .cnode = cnode_root,
        .slot = ROOTCN_SLOT_SLOT_ALLOCR,
    };

    /* clear mm_ram struct */
    memset(&mm_ram, 0, sizeof(mm_ram));

    /* init slot allocator */
    err = slot_prealloc_init(&ram_slot_alloc, top_slot_cap, MAXCHILDBITS,
                           CNODE_BITS, cnode_start_cap,
                           1UL << DEFAULT_CNODE_BITS, &mm_ram);
    assert(err_is_ok(err));

    // FIXME: remove magic constant for lowest valid RAM address
    err = mm_init(&mm_ram, ObjType_RAM, 0x80000000,
                MAXSIZEBITS, MAXCHILDBITS, NULL,
                slot_alloc_prealloc, &ram_slot_alloc, true);
    assert(err_is_ok(err));

    /* Step 2: give MM allocator static storage to get it started */
    static char nodebuf[SLAB_STATIC_SIZE(MINSPARENODES, MM_NODE_SIZE(MAXCHILDBITS))];
    slab_grow(&mm_ram.slabs, nodebuf, sizeof(nodebuf));

    /* Step 3: walk bootinfo and add all unused RAM caps to allocator */
    struct capref mem_cap = {
        .cnode = cnode_super,
        .slot = 0,
    };

    for (int i = 0; i < bi->regions_length; i++) {
        if (bi->regions[i].mr_type == RegionType_Empty) {
            //dump_ram_region(i, bi->regions + i);

            mem_total += ((size_t)1) << bi->regions[i].mr_bits;

            if (bi->regions[i].mr_consumed) {
                // region consumed by init, skipped
                mem_cap.slot++;
                continue;
            }

            err = mm_add(&mm_ram, mem_cap, bi->regions[i].mr_bits,
                         bi->regions[i].mr_base);
            if (err_is_ok(err)) {
                mem_avail += ((size_t)1) << bi->regions[i].mr_bits;
            } else {
                DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%d) FAILED",
                          i, bi->regions[i].mr_base, bi->regions[i].mr_bits);
            }

            /* try to refill slot allocator (may fail if the mem allocator is empty) */
            err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
            if (err_is_fail(err) && err_no(err) != MM_ERR_SLOT_MM_ALLOC) {
                DEBUG_ERR(err, "in slot_prealloc_refill() while initialising"
                               " memory allocator");
                abort();
            }

            /* refill slab allocator if needed and possible */
            if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES
                && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2
                                + 10 * BASE_PAGE_SIZE) {
                slab_default_refill(&mm_ram.slabs); // may fail
            }
            mem_cap.slot++;
        }
    }

    err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
    if (err_is_fail(err)) {
        debug_printf("Fatal internal error in RAM allocator: failed to initialise "
               "slot allocator\n");
        DEBUG_ERR(err, "failed to init slot allocator");
        abort();
    }

    debug_printf("RAM allocator initialised, %zd MB (of %zd MB) available\n",
           mem_avail / 1024 / 1024, mem_total / 1024 / 1024);


    // setup proper multi slot alloc
    err = multi_slot_alloc_init(&msa, DEFAULT_CNODE_SLOTS, NULL);
    if(err_is_fail(err)) {
        USER_PANIC_ERR(err, "multi_slot_alloc_init");
    }
    debug_printf("MSA initialised\n");

    // switch over ram alloc to proper ram allocator
    ram_alloc_set(memserv_alloc);

    return SYS_ERR_OK;
}
Exemple #26
0
/**
 * \brief Boot a app core of x86_64 type
 *
 * The processors are started by a sequency of INIT and STARTUP IPIs
 * which are sent by this function.
 * CMOS writes to the shutdown status byte are used to execute
 * different memory locations.
 *
 * \param core_id   APIC ID of the core to try booting
 * \param entry     Entry address for new kernel in the destination
 *                  architecture's lvaddr_t given in genvaddr_t
 *
 * \returns Zero on successful boot, non-zero (error code) on failure
 */
int start_aps_x86_64_start(uint8_t core_id, genvaddr_t entry)
{
    DEBUG("%s:%d: start_aps_x86_64_start\n", __FILE__, __LINE__);

    errval_t err;

    // Copy the startup code to the real-mode address
    uint8_t *real_src = (uint8_t *) &x86_64_start_ap;
    uint8_t *real_end = (uint8_t *) &x86_64_start_ap_end;

    struct capref bootcap;

#ifdef __k1om__
    struct capref realmodecap;

    realmodecap.cnode = cnode_task;
    realmodecap.slot  = TASKCN_SLOT_COREBOOT;
    err = slot_alloc(&bootcap);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "Allocating a new slot");
    }

    err = cap_copy(bootcap, realmodecap);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "Copying capability");
    }


#else
    struct acpi_rpc_client* acl = get_acpi_rpc_client();
    errval_t error_code;
    err = acl->vtbl.mm_realloc_range_proxy(acl, 16, 0x0,
                                                    &bootcap, &error_code);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "mm_alloc_range_proxy failed.");
    }
    if (err_is_fail(error_code)) {
        USER_PANIC_ERR(error_code, "mm_alloc_range_proxy return failed.");
    }
#endif

    void* real_base;
    err = vspace_map_one_frame(&real_base, 1<<16, bootcap, NULL, NULL);
    uint8_t* real_dest = (uint8_t*)real_base + X86_64_REAL_MODE_LINEAR_OFFSET;

    memcpy(real_dest, real_src, real_end - real_src);

    /* Pointer to the entry point called from init_ap.S */
    volatile uint64_t *absolute_entry_ptr = (volatile uint64_t *)
                                            ((
                                             (lpaddr_t) &x86_64_init_ap_absolute_entry -
                                             (lpaddr_t) &x86_64_start_ap
                                            )
                                            +
                                            real_dest);
    //copy the address of the function start (in boot.S) to the long-mode
    //assembler code to be able to perform an absolute jump
    *absolute_entry_ptr = entry;

    // pointer to the shared global variable amongst all kernels
    volatile uint64_t *ap_global = (volatile uint64_t *)
                                   ((
                                    (lpaddr_t) &x86_64_init_ap_global -
                                    (lpaddr_t) &x86_64_start_ap
                                   )
                                   + real_dest);


    genpaddr_t global;
    struct monitor_blocking_rpc_client *mc =
        get_monitor_blocking_rpc_client();
    err = mc->vtbl.get_global_paddr(mc, &global);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "invoke spawn core");
        return err_push(err, MON_ERR_SPAWN_CORE);
    }
    *ap_global = (uint64_t)(genpaddr_t)global;

    // pointer to the pseudo-lock used to detect boot up of new core
    volatile uint32_t *ap_wait = (volatile uint32_t *)
                                         ((lpaddr_t) &x86_64_init_ap_wait -
                                         ((lpaddr_t) &x86_64_start_ap) +
                                         real_dest);

    // Pointer to the lock variable in the realmode code
    volatile uint8_t *ap_lock = (volatile uint8_t *)
                                        ((lpaddr_t) &x86_64_init_ap_lock -
                                        ((lpaddr_t) &x86_64_start_ap) +
                                        real_dest);

    *ap_wait = AP_STARTING_UP;

    end = bench_tsc();

#if  defined(__k1om__)
    delay_ms(10);
#endif
    err = invoke_send_init_ipi(ipi_cap, core_id);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "invoke send init ipi");
        return err;
    }

#if  defined(__k1om__)
    delay_ms(200);
#endif

    // x86 protocol actually would like us to do this twice
    err = invoke_send_start_ipi(ipi_cap, core_id, entry);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "invoke sipi");
        return err;
    }

    // Give the new core a bit time to start-up and set the lock
    for (uint64_t i = 0; i < STARTUP_TIMEOUT; i++) {
        if (*ap_lock != 0) {
            break;
        }
    }

    // If the lock is set, the core has been started, otherwise assume, that
    // a core with this APIC ID doesn't exist.
    if (*ap_lock != 0) {
        while (*ap_wait != AP_STARTED);
        trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_CORE_START_REQUEST_ACK, core_id);
        *ap_lock = 0;
        return 0;
    }

    assert(!"badness");
    return -1;
}
Exemple #27
0
static errval_t spawn(char *path, char *const argv[], char *argbuf,
                      size_t argbytes, char *const envp[],
                      struct capref inheritcn_cap, struct capref argcn_cap,
                      domainid_t *domainid)
{
    errval_t err, msgerr;

    /* read file into memory */
    vfs_handle_t fh;
    err = vfs_open(path, &fh);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_LOAD);
    }

    struct vfs_fileinfo info;
    err = vfs_stat(fh, &info);
    if (err_is_fail(err)) {
        vfs_close(fh);
        return err_push(err, SPAWN_ERR_LOAD);
    }

    assert(info.type == VFS_FILE);
    uint8_t *image = malloc(info.size);
    if (image == NULL) {
        vfs_close(fh);
        return err_push(err, SPAWN_ERR_LOAD);
    }

    size_t pos = 0, readlen;
    do {
        err = vfs_read(fh, &image[pos], info.size - pos, &readlen);
        if (err_is_fail(err)) {
            vfs_close(fh);
            free(image);
            return err_push(err, SPAWN_ERR_LOAD);
        } else if (readlen == 0) {
            vfs_close(fh);
            free(image);
            return SPAWN_ERR_LOAD; // XXX
        } else {
            pos += readlen;
        }
    } while (err_is_ok(err) && readlen > 0 && pos < info.size);

    err = vfs_close(fh);
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "failed to close file %s", path);
    }

    // find short name (last part of path)
    char *name = strrchr(path, VFS_PATH_SEP);
    if (name == NULL) {
        name = path;
    } else {
        name++;
    }

    /* spawn the image */
    struct spawninfo si;
    err = spawn_load_image(&si, (lvaddr_t)image, info.size, CURRENT_CPU_TYPE,
                           name, my_core_id, argv, envp, inheritcn_cap,
                           argcn_cap);
    if (err_is_fail(err)) {
        free(image);
        return err;
    }

    free(image);

    /* request connection from monitor */
    struct monitor_blocking_rpc_client *mrpc = get_monitor_blocking_rpc_client();
    struct capref monep;
    err = mrpc->vtbl.alloc_monitor_ep(mrpc, &msgerr, &monep);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
    } else if (err_is_fail(msgerr)) {
        return msgerr;
    }

    /* copy connection into the new domain */
    struct capref destep = {
        .cnode = si.rootcn,
        .slot  = ROOTCN_SLOT_MONITOREP,
    };
    err = cap_copy(destep, monep);
    if (err_is_fail(err)) {
        spawn_free(&si);
        cap_destroy(monep);
        return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
    }

    err = cap_destroy(monep);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
    }

    debug_printf("spawning %s on core %u\n", path, my_core_id);

    /* give the perfmon capability */
    struct capref dest, src;
    dest.cnode = si.taskcn;
    dest.slot = TASKCN_SLOT_PERF_MON;
    src.cnode = cnode_task;
    src.slot = TASKCN_SLOT_PERF_MON;
    err = cap_copy(dest, src);
    if (err_is_fail(err)) {
        return err_push(err, INIT_ERR_COPY_PERF_MON);
    }

    /* run the domain */
    err = spawn_run(&si);
    if (err_is_fail(err)) {
        spawn_free(&si);
        return err_push(err, SPAWN_ERR_RUN);
    }

    // Allocate domain id
    struct ps_entry *pe = malloc(sizeof(struct ps_entry));
    assert(pe != NULL);
    memset(pe, 0, sizeof(struct ps_entry));
    memcpy(pe->argv, argv, MAX_CMDLINE_ARGS*sizeof(*argv));
    pe->argbuf = argbuf;
    pe->argbytes = argbytes;
    /*
     * NB: It's important to keep a copy of the DCB *and* the root
     * CNode around.  We need to revoke both (in the right order, see
     * kill_domain() below), so that we ensure no one else is
     * referring to the domain's CSpace anymore. Especially the loop
     * created by placing rootcn into its own address space becomes a
     * problem here.
     */
    err = slot_alloc(&pe->rootcn_cap);
    assert(err_is_ok(err));
    err = cap_copy(pe->rootcn_cap, si.rootcn_cap);
    pe->rootcn = si.rootcn;
    assert(err_is_ok(err));
    err = slot_alloc(&pe->dcb);
    assert(err_is_ok(err));
    err = cap_copy(pe->dcb, si.dcb);
    assert(err_is_ok(err));
    pe->status = PS_STATUS_RUNNING;
    err = ps_allocate(pe, domainid);
    if(err_is_fail(err)) {
        free(pe);
    }

    // Store in target dispatcher frame
    struct dispatcher_generic *dg = get_dispatcher_generic(si.handle);
    dg->domain_id = *domainid;

    /* cleanup */
    err = spawn_free(&si);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_FREE);
    }

    return SYS_ERR_OK;
}

static void retry_use_local_memserv_response(void *a)
{
    errval_t err;

    struct spawn_binding *b = (struct spawn_binding*)a;

    err = b->tx_vtbl.use_local_memserv_response(b, NOP_CONT);

    if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
        // try again
        err = b->register_send(b, get_default_waitset(),
                               MKCONT(retry_use_local_memserv_response,a));
    }
    if (err_is_fail(err)) {
        DEBUG_ERR(err, "error sending use_local_memserv reply\n");
    }

}
Exemple #28
0
static void intermon_bind_ump_reply(struct intermon_binding *ib, 
                                    uint64_t my_mon_id, uint64_t your_mon_id,
                                    errval_t msgerr, 
                                    intermon_caprep_t caprep)
{
    errval_t err;
    struct remote_conn_state *con = remote_conn_lookup(my_mon_id);
    if (con == NULL) {
        USER_PANIC_ERR(0, "unknown mon_id in UMP bind reply");
        return;
    }

    uintptr_t domain_id = con->domain_id;
    struct monitor_binding *domain_binding = con->domain_binding;
    struct capref notify_cap = NULL_CAP;

    if (err_is_ok(msgerr)) { /* bind succeeded */
        con->mon_id = your_mon_id;
        con->mon_binding = ib;

#if 0
        /* map in UMP channel state */
        void *buf;
        err = vspace_map_one_frame_attr(&buf,
              2 * (UMP_CHANNEL_SIZE + con->localchan.size * sizeof(uintptr_t)),
                                        con->frame, VREGION_FLAGS_READ,
                                        NULL, NULL);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "vspace_map_one_frame failed");
            // XXX: should not be an assert, but we don't have any way to do
            // connection teardown here!
            assert(buf != NULL);
        }
        con->sharedchan = buf;
        con->localchan.buf = buf + 2 * UMP_CHANNEL_SIZE;

        // XXX: Put frame cap on a separate allocator as it is not deleted anymore
        struct capref frame_copy;
        err = slot_alloc(&frame_copy);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "Failed to allocator slot from channel_alloc");
        }
        err = cap_copy(frame_copy, con->frame);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "Failed create copy of frame cap");
        }
        err = cap_destroy(con->frame);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "cap_destroy_default failed");
        }
        con->frame = frame_copy;
#endif

        struct capability capability;
        caprep_to_capability(&caprep, &capability);

        if(capability.type != ObjType_Null) {
            // Get core id of sender
            coreid_t core_id = ((struct intermon_state *)ib->st)->core_id;

            // Construct the notify cap
            err = slot_alloc(&notify_cap);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "Failed to allocate slot from channel_alloc");
            }

            err = monitor_cap_create(notify_cap, &capability, core_id);
            if (err_is_fail(err)) {
                USER_PANIC_ERR(err, "monitor_cap_create failed");
            }
        }
    } else { /* bind refused */
        err = cap_destroy(con->x.ump.frame);
        if (err_is_fail(err)) {
            USER_PANIC_ERR(err, "cap_destroy_default failed");
        }
        err = remote_conn_free(my_mon_id);
        assert(err_is_ok(err));
    }

    bind_ump_reply_client_cont(domain_binding, my_mon_id, domain_id, msgerr,
                               notify_cap);
}
errval_t vspace_mmu_aware_reset(struct vspace_mmu_aware *state,
                                struct capref frame, size_t size)
{
    errval_t err;
    struct vregion *vregion;
    struct capref oldframe;
    void *vbuf;

    // create copy of new region
    err = slot_alloc(&oldframe);
    if (err_is_fail(err)) {
        return err;
    }
    err = cap_copy(oldframe, frame);
    if (err_is_fail(err)) {
        return err;
    }
    err = vspace_map_one_frame_attr_aligned(&vbuf, size, oldframe,
            VREGION_FLAGS_READ_WRITE | VREGION_FLAGS_LARGE, LARGE_PAGE_SIZE,
            NULL, &vregion);
    if (err_is_fail(err)) {
        return err;
    }
    // copy over data to new frame
    genvaddr_t gen_base = vregion_get_base_addr(&state->vregion);
    memcpy(vbuf, (void*)(lvaddr_t)gen_base, state->mapoffset);

    err = vregion_destroy(vregion);
    if (err_is_fail(err)) {
        return err;
    }

    genvaddr_t offset = 0;
    // Unmap backing frames for [0, size) in state.vregion
    do {
        err = state->memobj.m.f.unfill(&state->memobj.m, 0, &oldframe,
                &offset);
        if (err_is_fail(err) &&
            err_no(err) != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET)
        {
            return err_push(err, LIB_ERR_MEMOBJ_UNMAP_REGION);
        }
        struct frame_identity fi;
        // increase address
        err = invoke_frame_identify(oldframe, &fi);
        if (err_is_fail(err)) {
            return err;
        }
        offset += (1UL<<fi.bits);
        err = cap_destroy(oldframe);
        if (err_is_fail(err)) {
            return err;
        }
    } while(offset < state->mapoffset);

    // Map new frame in
    err = state->memobj.m.f.fill(&state->memobj.m, 0, frame, size);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_MEMOBJ_FILL);
    }
    err = state->memobj.m.f.pagefault(&state->memobj.m, &state->vregion, 0, 0);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
    }

    state->mapoffset = size;
    return SYS_ERR_OK;
}