Exemplo n.º 1
0
static inline void read_pt_entry(struct capability *pgtable, size_t slot, genpaddr_t *paddr)
{
    assert(type_is_vnode(pgtable->type));
    assert(paddr);

    genpaddr_t gp = get_address(pgtable);
    lpaddr_t lp = gen_phys_to_local_phys(gp);
    lvaddr_t lv = local_phys_to_mem(lp);

    switch (pgtable->type) {
        case ObjType_VNode_AARCH64_l0:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->d.base) << 12;
            return;
        }
        case ObjType_VNode_AARCH64_l1:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->d.base) << 12;
            return;
        }
        case ObjType_VNode_AARCH64_l2:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->d.base) << 12;
            return;
        }
        case ObjType_VNode_AARCH64_l3:
        {
            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
            *paddr = (genpaddr_t) (e->page.base) << 12;
            return;
        }
        default:
            assert(!"Should not get here");
    }
}
Exemplo n.º 2
0
struct dcb *spawn_app_init(struct arm_core_data *core_data,
                           const char *name, alloc_phys_func alloc_phys)
{
	errval_t err;

	/* Construct cmdline args */
	// Core id of the core that booted this core
	char coreidchar[10];
	snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);

	// IPI channel id of core that booted this core
	char chanidchar[30];
	snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);

	// Arch id of the core that booted this core
	char archidchar[30];
	snprintf(archidchar, sizeof(archidchar), "archid=%d",
			core_data->src_arch_id);

	const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
	int argc = 4;

    struct dcb *init_dcb = spawn_init_common(name, argc, argv,0, alloc_phys);

    // Urpc frame cap
    struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
    		TASKCN_SLOT_MON_URPC);
    // XXX: Create as devframe so the memory is not zeroed out
    err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base,
    		core_data->urpc_frame_bits,
    		core_data->urpc_frame_bits, urpc_frame_cte);
    assert(err_is_ok(err));
    urpc_frame_cte->cap.type = ObjType_Frame;
    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);

    /* Map urpc frame at MON_URPC_BASE */
    spawn_init_map(init_l2, INIT_VBASE, MON_URPC_VBASE, urpc_ptr, MON_URPC_SIZE,
    			   INIT_PERM_RW);

    struct startup_l2_info l2_info = { init_l2, INIT_VBASE };

    // elf load the domain
    genvaddr_t entry_point, got_base=0;
    err = elf_load(EM_ARM, startup_alloc_init, &l2_info,
    		local_phys_to_mem(core_data->monitor_binary),
    		core_data->monitor_binary_size, &entry_point);
    if (err_is_fail(err)) {
    	//err_print_calltrace(err);
    	panic("ELF load of init module failed!");
    }

    // TODO: Fix application linkage so that it's non-PIC.
    struct Elf32_Shdr* got_shdr =
    		elf32_find_section_header_name(local_phys_to_mem(core_data->monitor_binary),
    									   core_data->monitor_binary_size, ".got");
    if (got_shdr)
    {
    	got_base = got_shdr->sh_addr;
    }

    struct dispatcher_shared_arm *disp_arm =
    		get_dispatcher_shared_arm(init_dcb->disp);
    disp_arm->enabled_save_area.named.r10  = got_base;
    disp_arm->got_base = got_base;

    disp_arm->disabled_save_area.named.pc   = entry_point;
#ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field
    disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
#endif
    disp_arm->disabled_save_area.named.r10  = got_base;
    //disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;

    return init_dcb;
}
Exemplo n.º 3
0
struct dcb *spawn_app_init(struct armv8_core_data *core_data,
                           const char *name)
{
    errval_t err;

    MSG("spawning '%s' on APP core\n", name);

    /* Only the app core can run this code */
    assert(!cpu_is_bsp());

    /* Construct cmdline args */
    // Core id of the core that booted this core
    char coreidchar[10];
    snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);

    // IPI channel id of core that booted this core
    char chanidchar[30];
    snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);

    // Arch id of the core that booted this core
    char archidchar[30];
    snprintf(archidchar, sizeof(archidchar), "archid=%d",
            core_data->src_arch_id);

    const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
    int argc = 4;



    struct dcb *init_dcb= spawn_init_common(name, argc, argv, 0, app_alloc_phys,
                                            app_alloc_phys_aligned);


    MSG("creating monitor URPC frame cap\n");
    // Urpc frame cap
    struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
                                                  TASKCN_SLOT_MON_URPC);

    // XXX: Create as devframe so the memory is not zeroed out
    err = caps_create_new(ObjType_DevFrame,
                          core_data->urpc_frame.base,
                          core_data->urpc_frame.length,
                          core_data->urpc_frame.length,
                          my_core_id,
                          urpc_frame_cte);
    assert(err_is_ok(err));
    urpc_frame_cte->cap.type = ObjType_Frame;
    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);


    /* Map urpc frame at MON_URPC_BASE */
    MSG("mapping URPC frame cap %" PRIxLPADDR" \n",urpc_ptr );
    spawn_init_map(init_l3, ARMV8_INIT_VBASE, MON_URPC_VBASE, urpc_ptr,
                   MON_URPC_SIZE, INIT_PERM_RW);

    struct startup_l3_info l3_info = { init_l3, ARMV8_INIT_VBASE };

    // elf load the domain
    genvaddr_t entry_point, got_base=0;

    MSG("loading elf '%s' @ %" PRIxLPADDR "\n", name,
        local_phys_to_mem(core_data->monitor_binary.base));

    err = elf_load(EM_AARCH64, startup_alloc_init, &l3_info,
            local_phys_to_mem(core_data->monitor_binary.base),
            core_data->monitor_binary.length, &entry_point);
    if (err_is_fail(err)) {
        //err_print_calltrace(err);
        panic("ELF load of init module failed!");
    }

    // TODO: Fix application linkage so that it's non-PIC.
    struct Elf64_Shdr* got_shdr;
    got_shdr = elf64_find_section_header_name(local_phys_to_mem(core_data->monitor_binary.base),
                                           core_data->monitor_binary.length, ".got");
    if (got_shdr)
    {
        got_base = got_shdr->sh_addr;
    }

    MSG("init loaded with entry=0x%" PRIxGENVADDR " and GOT=0x%" PRIxGENVADDR "\n",
        entry_point, got_base);

    struct dispatcher_shared_aarch64 *disp_aarch64 =
            get_dispatcher_shared_aarch64(init_dcb->disp);

    disp_aarch64->got_base = got_base;
    disp_aarch64->enabled_save_area.named.x10  = got_base;
    disp_aarch64->disabled_save_area.named.x10  = got_base;

    /* setting entry points */
    disp_aarch64->disabled_save_area.named.pc   = entry_point;
    disp_aarch64->disabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_F_MASK;
    //arch_set_thread_register(INIT_DISPATCHER_VBASE);

    MSG("init dcb set up\n");

    return init_dcb;

}
/// Map within a x86_64 non leaf ptable
static errval_t x86_64_non_ptable(struct capability *dest, cslot_t slot,
                                  struct capability *src, uintptr_t flags,
                                  uintptr_t offset, size_t pte_count)
{
    //printf("page_mappings_arch:x86_64_non_ptable\n");
    if (slot >= X86_64_PTABLE_SIZE) { // Within pagetable
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (type_is_vnode(src->type) && pte_count != 1) { // only allow single ptable mappings
        printf("src type and count mismatch\n");
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (slot + pte_count > X86_64_PTABLE_SIZE) { // mapping size ok
        printf("mapping size invalid (%zd)\n", pte_count);
        return SYS_ERR_VM_MAP_SIZE;
    }

    size_t page_size = 0;
    paging_x86_64_flags_t flags_large = 0;
    switch (dest->type) {
        case ObjType_VNode_x86_64_pml4:
            if (src->type != ObjType_VNode_x86_64_pdpt) { // Right mapping
                printf("src type invalid\n");
                return SYS_ERR_WRONG_MAPPING;
            }
            if(slot >= X86_64_PML4_BASE(MEMORY_OFFSET)) { // Kernel mapped here
                return SYS_ERR_VNODE_SLOT_RESERVED;
            }
            break;
        case ObjType_VNode_x86_64_pdpt:
            // huge page support
            if (src->type != ObjType_VNode_x86_64_pdir) { // Right mapping
                // TODO: check if the system allows 1GB mappings
                page_size = X86_64_HUGE_PAGE_SIZE;
                // check offset within frame
                genpaddr_t off = offset;

                if (off + pte_count * X86_64_HUGE_PAGE_SIZE > get_size(src)) {
                    return SYS_ERR_FRAME_OFFSET_INVALID;
                }
                // Calculate page access protection flags /
                // Get frame cap rights
                flags_large = paging_x86_64_cap_to_page_flags(src->rights);
                // Mask with provided access rights mask
                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
                // Add additional arch-specific flags
                flags_large |= X86_64_PTABLE_FLAGS(flags);
                // Unconditionally mark the page present
                flags_large |= X86_64_PTABLE_PRESENT;
            }
            break;
        case ObjType_VNode_x86_64_pdir:
            // superpage support
            if (src->type != ObjType_VNode_x86_64_ptable) { // Right mapping
                page_size = X86_64_LARGE_PAGE_SIZE;

                // check offset within frame
                genpaddr_t off = offset;

                if (off + pte_count * X86_64_LARGE_PAGE_SIZE > get_size(src)) {
                    return SYS_ERR_FRAME_OFFSET_INVALID;
                }
                // Calculate page access protection flags /
                // Get frame cap rights
                flags_large = paging_x86_64_cap_to_page_flags(src->rights);
                // Mask with provided access rights mask
                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
                // Add additional arch-specific flags
                flags_large |= X86_64_PTABLE_FLAGS(flags);
                // Unconditionally mark the page present
                flags_large |= X86_64_PTABLE_PRESENT;

            }
            break;
        default:
            printf("dest type invalid\n");
            return SYS_ERR_DEST_TYPE_INVALID;
    }

    // Convert destination base address
    genpaddr_t dest_gp   = get_address(dest);
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    // Convert source base address
    genpaddr_t src_gp   = get_address(src);
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);

    // set metadata
    struct cte *src_cte = cte_for_cap(src);
    src_cte->mapping_info.pte = dest_lp + slot * sizeof(union x86_64_ptable_entry);
    src_cte->mapping_info.pte_count = pte_count;
    src_cte->mapping_info.offset = offset;

    cslot_t last_slot = slot + pte_count;
    for (; slot < last_slot; slot++, offset += page_size) {
        // Destination
        union x86_64_pdir_entry *entry = (union x86_64_pdir_entry *)dest_lv + slot;

        if (X86_64_IS_PRESENT(entry)) {
            // cleanup mapping info
            // TODO: cleanup already mapped pages
            memset(&src_cte->mapping_info, 0, sizeof(struct mapping_info));
            printf("slot in use\n");
            return SYS_ERR_VNODE_SLOT_INUSE;
        }

        // determine if we map a large/huge page or a normal entry
        if (page_size == X86_64_LARGE_PAGE_SIZE)
        {
            //a large page is mapped
            paging_x86_64_map_large((union x86_64_ptable_entry *)entry, src_lp + offset, flags_large);
        } else if (page_size == X86_64_HUGE_PAGE_SIZE) {
            // a huge page is mapped
            paging_x86_64_map_huge((union x86_64_ptable_entry *)entry, src_lp + offset, flags_large);
        } else {
            //a normal paging structure entry is mapped
            paging_x86_64_map_table(entry, src_lp + offset);
        }
    }

    return SYS_ERR_OK;
}
/// Map within a x86_64 ptable
static errval_t x86_64_ptable(struct capability *dest, cslot_t slot,
                              struct capability *src, uintptr_t mflags,
                              uintptr_t offset, size_t pte_count)
{
    //printf("page_mappings_arch:x86_64_ptable\n");
    if (slot >= X86_64_PTABLE_SIZE) { // Within pagetable
        printf("    vnode_invalid\n");
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (slot + pte_count > X86_64_PTABLE_SIZE) { // mapping size ok
        printf("mapping size invalid (%zd)\n", pte_count);
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_Frame &&
        src->type != ObjType_DevFrame) { // Right mapping
        printf("src type invalid\n");
        return SYS_ERR_WRONG_MAPPING;
    }

    // check offset within frame
    genpaddr_t off = offset;
    if (off + pte_count * X86_64_BASE_PAGE_SIZE > get_size(src)) {
        printf("frame offset invalid\n");
        return SYS_ERR_FRAME_OFFSET_INVALID;
    }


    /* Calculate page access protection flags */
    // Get frame cap rights
    paging_x86_64_flags_t flags =
        paging_x86_64_cap_to_page_flags(src->rights);
    // Mask with provided access rights mask
    flags = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(mflags));
    // Add additional arch-specific flags
    flags |= X86_64_PTABLE_FLAGS(mflags);
    // Unconditionally mark the page present
    flags |= X86_64_PTABLE_PRESENT;

    // Convert destination base address
    genpaddr_t dest_gp   = get_address(dest);
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    // Convert source base address
    genpaddr_t src_gp   = get_address(src);
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
    // Set metadata
    struct cte *src_cte = cte_for_cap(src);
    src_cte->mapping_info.pte = dest_lp + slot * sizeof(union x86_64_ptable_entry);
    src_cte->mapping_info.pte_count = pte_count;
    src_cte->mapping_info.offset = offset;

    cslot_t last_slot = slot + pte_count;
    for (; slot < last_slot; slot++, offset += X86_64_BASE_PAGE_SIZE) {
        union x86_64_ptable_entry *entry =
            (union x86_64_ptable_entry *)dest_lv + slot;

        /* FIXME: Flush TLB if the page is already present
         * in the meantime, since we don't do this, we just fail to avoid
         * ever reusing a VA mapping */
        if (X86_64_IS_PRESENT(entry)) {
            // TODO: cleanup already mapped pages
            memset(&src_cte->mapping_info, 0, sizeof(struct mapping_info));
            debug(LOG_WARN, "Trying to remap an already-present page is NYI, but "
                  "this is most likely a user-space bug!\n");
            return SYS_ERR_VNODE_SLOT_INUSE;
        }

        // Carry out the page mapping
        paging_x86_64_map(entry, src_lp + offset, flags);
    }

    return SYS_ERR_OK;
}
Exemplo n.º 6
0
/// Map within a x86_32 pdir
static errval_t x86_32_pdir(struct capability *dest, cslot_t slot,
                            struct capability * src, uintptr_t flags,
                            uintptr_t offset, uintptr_t pte_count,
                            struct cte *mapping_cte)
{
    //printf("x86_32_pdir\n");
    if (slot >= X86_32_PTABLE_SIZE) { // Slot within page table
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (slot + pte_count > X86_32_PTABLE_SIZE) {
        // check that mapping fits page directory
        return SYS_ERR_VM_MAP_SIZE;
    }

#ifndef CONFIG_PAE
    if(slot >= X86_32_PDIR_BASE(X86_32_MEMORY_OFFSET)) { // Kernel mapped here
        return SYS_ERR_VNODE_SLOT_RESERVED;
    }
#endif

    // large page code
    if(src->type == ObjType_Frame || src->type == ObjType_DevFrame)
    {
        cslot_t last_slot = slot + pte_count;

        // check offset within frame
        if (offset + pte_count * X86_32_LARGE_PAGE_SIZE > get_size(src)) {
            return SYS_ERR_FRAME_OFFSET_INVALID;
        }

        /* Calculate page access protection flags */
        // Get frame cap rights
        paging_x86_32_flags_t flags_large =
            paging_x86_32_cap_to_page_flags(src->rights);
        // Mask with provided access rights mask
        flags_large = paging_x86_32_mask_attrs(flags_large, X86_32_PTABLE_ACCESS(flags));
        // Add additional arch-specific flags
        flags_large |= X86_32_PTABLE_FLAGS(flags);
        // Unconditionally mark the page present
        flags_large |= X86_32_PTABLE_PRESENT;

        // Convert destination base address
        genpaddr_t dest_gp   = get_address(dest);
        lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
        lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
        // Convert source base address
        genpaddr_t src_gp   = get_address(src);
        lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
        // Set metadata
        create_mapping_cap(mapping_cte, src,
                           dest_lp + slot * sizeof(union x86_32_ptable_entry),
                           offset,
                           pte_count);

        for (; slot < last_slot; slot++, offset += X86_32_LARGE_PAGE_SIZE) {
            union x86_32_ptable_entry *entry =
                (union x86_32_ptable_entry *)dest_lv + slot;

            /* FIXME: Flush TLB if the page is already present
             * in the meantime, since we don't do this, we just assert that
             * we never reuse a VA mapping */
            if (X86_32_IS_PRESENT(entry)) {
                printf("Trying to map into an already present page NYI.\n");
                return SYS_ERR_VNODE_SLOT_INUSE;
            }

            // Carry out the page mapping
            paging_x86_32_map_large(entry, src_lp + offset, flags_large);
        }

        return SYS_ERR_OK;
    }

    if (src->type != ObjType_VNode_x86_32_ptable) { // Right mapping
        return SYS_ERR_WRONG_MAPPING;
    }

    // Destination
    genpaddr_t dest_gp   = dest->u.vnode_x86_32_pdir.base;
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    union x86_32_pdir_entry *entry =
        (union x86_32_pdir_entry *)dest_lv + slot;

    // Set metadata
    create_mapping_cap(mapping_cte, src,
                       dest_lp + slot * sizeof(union x86_32_pdir_entry),
                       pte_count);


    // Source
    // XXX: offset is ignored
    genpaddr_t src_gp   = src->u.vnode_x86_32_pdir.base;
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
    paging_x86_32_map_table(entry, src_lp);

    return SYS_ERR_OK;
}
Exemplo n.º 7
0
/// Map within a x86_32 ptable
static errval_t x86_32_ptable(struct capability *dest, cslot_t slot,
                              struct capability * src, uintptr_t uflags,
                              uintptr_t offset, uintptr_t pte_count,
                              struct cte *mapping_cte)
{
    //printf("x86_32_ptable\n");
    if (slot >= X86_32_PTABLE_SIZE) { // Slot within page table
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    cslot_t last_slot = slot + pte_count;

    if (last_slot > X86_32_PTABLE_SIZE) {
        printf("slot = %"PRIuCSLOT", last_slot = %"PRIuCSLOT", PTABLE_SIZE = %d\n", slot, last_slot, X86_32_PTABLE_SIZE);
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_Frame &&
        src->type != ObjType_DevFrame) { // Right mapping
        return SYS_ERR_WRONG_MAPPING;
    }

    // check offset within frame
    if (offset + pte_count * X86_32_BASE_PAGE_SIZE > get_size(src)) {
        return SYS_ERR_FRAME_OFFSET_INVALID;
    }

    /* Calculate page access protection flags */
    // Get frame cap rights
    paging_x86_32_flags_t flags =
        paging_x86_32_cap_to_page_flags(src->rights);
    // Mask with provided access rights mask
    flags = paging_x86_32_mask_attrs(flags, X86_32_PTABLE_ACCESS(uflags));
    // Add additional arch-specific flags
    flags |= X86_32_PTABLE_FLAGS(uflags);
    // Unconditionally mark the page present
    flags |= X86_32_PTABLE_PRESENT;

    // Convert destination base address
    genpaddr_t dest_gp   = get_address(dest);
    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
    // Convert source base address
    genpaddr_t src_gp   = get_address(src);
    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
    // Set metadata
    create_mapping_cap(mapping_cte, src,
                       dest_lp + slot * sizeof(union x86_32_ptable_entry),
                       offset,
                       pte_count);


    for (; slot < last_slot; slot++, offset += X86_32_BASE_PAGE_SIZE) {
        union x86_32_ptable_entry *entry =
            (union x86_32_ptable_entry *)dest_lv + slot;

        /* FIXME: Flush TLB if the page is already present
         * in the meantime, since we don't do this, we just assert that
         * we never reuse a VA mapping */
        if (X86_32_IS_PRESENT(entry)) {
            panic("Trying to map into an already present page NYI.");
        }

        // Carry out the page mapping
        paging_x86_32_map(entry, src_lp + offset, flags);
    }

    return SYS_ERR_OK;
}
Exemplo n.º 8
0
static errval_t
caps_map_l3(struct capability* dest,
            cslot_t            slot,
            struct capability* src,
            uintptr_t          kpi_paging_flags,
            uintptr_t          offset,
            uintptr_t          pte_count,
            struct cte*        mapping_cte)
{
    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));

    // ARM L3 has 256 entries, but we treat a 4K page as a consecutive
    // region of L3 with a single index. 4K == 4 * 1K
    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
        panic("oops: slot >= 512");
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
        panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
        return SYS_ERR_WRONG_MAPPING;
    }

    // check offset within frame
    if ((offset + BASE_PAGE_SIZE > get_size(src)) ||
        ((offset % BASE_PAGE_SIZE) != 0)) {
        panic("oops: frame offset invalid");
        return SYS_ERR_FRAME_OFFSET_INVALID;
    }

    // check mapping does not overlap leaf page table
    if (slot + pte_count > VMSAv8_64_PTABLE_NUM_ENTRIES ) {
        return SYS_ERR_VM_MAP_SIZE;
    }

    // Destination
    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);

    union armv8_ttable_entry *entry = (union armv8_ttable_entry *)dest_lvaddr + slot;
    if (entry->page.valid) {
        panic("Remapping valid page.");
    }

    lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
    if ((src_lpaddr & (BASE_PAGE_SIZE - 1))) {
        panic("Invalid target");
    }

    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);

    for (int i = 0; i < pte_count; i++) {
        entry->raw = 0;

        entry->page.valid = 1;
        entry->page.mb1 = 1;
        paging_set_flags(entry, kpi_paging_flags);
        entry->page.base = (src_lpaddr + i * BASE_PAGE_SIZE) >> 12;

        debug(SUBSYS_PAGING, "L3 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx64"\n",
               dest_lvaddr, slot, entry, entry->raw);

		entry++;

    }

    // Flush TLB if remapping.
    sysreg_invalidate_tlb();

    return SYS_ERR_OK;
}
Exemplo n.º 9
0
static errval_t
caps_map_l2(struct capability* dest,
            cslot_t            slot,
            struct capability* src,
            uintptr_t          kpi_paging_flags,
            uintptr_t          offset,
            uintptr_t          pte_count,
            struct cte*        mapping_cte)
{
    //
    // Note:
    //
    // We have chicken-and-egg problem in initializing resources so
    // instead of treating an L3 table it's actual 1K size, we treat
    // it as being 4K. As a result when we map an "L3" table we actually
    // map a page of memory as if it is 4 consecutive L3 tables.
    //
    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
    //
    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
        printf("slot = %"PRIuCSLOT"\n", slot);
        panic("oops: slot id >= 512");
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (pte_count != 1) {
        printf("pte_count = %zu\n",(size_t) pte_count);
        panic("oops: pte_count");
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_VNode_AARCH64_l3) {
        panic("oops: l2 wrong src type");
        return SYS_ERR_WRONG_MAPPING;
    }

    if (slot > VMSAv8_64_PTABLE_NUM_ENTRIES) {
        printf("slot = %"PRIuCSLOT"\n",slot);
        panic("oops: l2 slot id");
        return SYS_ERR_VNODE_SLOT_RESERVED;
    }

    // Destination
    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);

    union armv8_ttable_entry* entry = (union armv8_ttable_entry*) dest_lvaddr + slot;

    // Source
    genpaddr_t src_gpaddr = get_address(src);
    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr);

    assert(offset == 0);
    assert(aligned(src_lpaddr, 1u << 12));
    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 4096));

    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);

    entry->raw = 0;
    entry->d.valid = 1;
    entry->d.mb1 = 1;
    entry->d.base = (src_lpaddr) >> 12;
    debug(SUBSYS_PAGING, "L2 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
              slot, entry, entry->raw);

    sysreg_invalidate_tlb();

    return SYS_ERR_OK;
}
Exemplo n.º 10
0
static errval_t
caps_map_l1(struct capability* dest,
            cslot_t            slot,
            struct capability* src,
            uintptr_t          kpi_paging_flags,
            uintptr_t          offset,
            uintptr_t          pte_count)
{
    //
    // Note:
    //
    // We have chicken-and-egg problem in initializing resources so
    // instead of treating an L2 table it's actual 1K size, we treat
    // it as being 4K. As a result when we map an "L2" table we actually
    // map a page of memory as if it is 4 consecutive L2 tables.
    //
    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
    //
    const int ARM_L1_SCALE = 4;

    if (slot >= 1024) {
        printf("slot = %"PRIuCSLOT"\n",slot);
        panic("oops: slot id >= 1024");
        return SYS_ERR_VNODE_SLOT_INVALID;
    }

    if (pte_count != 1) {
        printf("pte_count = %zu\n",(size_t)pte_count);
        panic("oops: pte_count");
        return SYS_ERR_VM_MAP_SIZE;
    }

    if (src->type != ObjType_VNode_ARM_l2) {
        //large page mapping goes here
        printf("kernel large page\n");
        //panic("oops: wrong src type");
        assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));

        // ARM L1 has 4K entries, but we treat it as if it had 1K
        if (slot >= (256 * 4)) {
            panic("oops: slot >= (256 * 4)");
            return SYS_ERR_VNODE_SLOT_INVALID;
        }

        if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
            panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
            return SYS_ERR_WRONG_MAPPING;
        }

        // check offset within frame
        if ((offset + BYTES_PER_SECTION > get_size(src)) ||
            ((offset % BYTES_PER_SECTION) != 0)) {
            panic("oops: frame offset invalid");
            return SYS_ERR_FRAME_OFFSET_INVALID;
        }

        // check mapping does not overlap leaf page table
        if (slot + pte_count > (256 * 4)) {
            return SYS_ERR_VM_MAP_SIZE;
        }

        // Destination
        lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
        lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);

        union arm_l1_entry* entry = (union arm_l1_entry*)dest_lvaddr + slot;
        if (entry->invalid.type != L1_TYPE_INVALID_ENTRY) {
            panic("Remapping valid page.");
        }

        lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
        if ((src_lpaddr & (LARGE_PAGE_SIZE - 1))) {
            panic("Invalid target");
        }

        struct cte *src_cte = cte_for_cap(src);
        src_cte->mapping_info.pte_count = pte_count;
        src_cte->mapping_info.pte = dest_lpaddr;
        src_cte->mapping_info.offset = offset;

        for (int i = 0; i < pte_count; i++) {
            entry->raw = 0;

            entry->section.type = L1_TYPE_SECTION_ENTRY;
            entry->section.bufferable = 1;
            entry->section.cacheable = (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE)? 0: 1;
            entry->section.ap10 = (kpi_paging_flags & KPI_PAGING_FLAGS_READ)? 2:0;
            entry->section.ap10 |= (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE)? 3:0;
            entry->section.ap2 = 0;
            entry->section.base_address = (src_lpaddr + i * BYTES_PER_SECTION) >> 12;

            entry++;

            debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx32"\n",
                   dest_lvaddr, slot, entry, entry->raw);
        }

        // Flush TLB if remapping.
        cp15_invalidate_tlb();
        return SYS_ERR_OK;
        return SYS_ERR_WRONG_MAPPING;
    }
Exemplo n.º 11
0
/* FIXME: lots of missing argument checks in this function */
struct sysret
sys_dispatcher_setup(struct capability *to, capaddr_t cptr, int depth,
                     capaddr_t vptr, capaddr_t dptr, bool run, capaddr_t odptr)
{
    errval_t err = SYS_ERR_OK;
    assert(to->type == ObjType_Dispatcher);
    struct dcb *dcb = to->u.dispatcher.dcb;

    lpaddr_t lpaddr;

    /* 1. set cspace root */
    if (cptr != CPTR_NULL) {
        struct cte *root;
        err = caps_lookup_slot(&dcb_current->cspace.cap, cptr, depth,
                               &root, CAPRIGHTS_READ);
        if (err_is_fail(err)) {
            return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_ROOT));
        }
        if (root->cap.type != ObjType_CNode) {
            return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_INVALID));
        }
        err = caps_copy_to_cte(&dcb->cspace, root, false, 0, 0);
        if (err_is_fail(err)) {
            return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_ROOT));
        }
    }

    /* 2. set vspace root */
    if (vptr != CPTR_NULL) {
        struct capability *vroot;
        err = caps_lookup_cap(&dcb_current->cspace.cap, vptr, CPTR_BITS,
                              &vroot, CAPRIGHTS_WRITE);
        if (err_is_fail(err)) {
            return SYSRET(err_push(err, SYS_ERR_DISP_VSPACE_ROOT));
        }

        // Insert as dispatcher's VSpace root
        switch(vroot->type) {
        case ObjType_VNode_x86_64_pml4:
            dcb->vspace =
                (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_x86_64_pml4.base);
            break;
#ifdef CONFIG_PAE
        case ObjType_VNode_x86_32_pdpt:
            dcb->vspace =
                (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_x86_32_pdpt.base);
            break;
#else
        case ObjType_VNode_x86_32_pdir:
            dcb->vspace =
                (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_x86_32_pdir.base);
            break;
#endif
        case ObjType_VNode_ARM_l1:
            dcb->vspace =
                (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_arm_l1.base);
            break;

        default:
            return SYSRET(err_push(err, SYS_ERR_DISP_VSPACE_INVALID));
        }
    }

    /* 3. set dispatcher frame pointer */
    if (dptr != CPTR_NULL) {
        struct cte *dispcte;
        err = caps_lookup_slot(&dcb_current->cspace.cap, dptr, CPTR_BITS,
                               &dispcte, CAPRIGHTS_WRITE);
        if (err_is_fail(err)) {
            return SYSRET(err_push(err, SYS_ERR_DISP_FRAME));
        }
        struct capability *dispcap = &dispcte->cap;
        if (dispcap->type != ObjType_Frame) {
            return SYSRET(err_push(err, SYS_ERR_DISP_FRAME_INVALID));
        }

        /* FIXME: check rights, check size */

        lpaddr = gen_phys_to_local_phys(dispcap->u.frame.base);
        dcb->disp = local_phys_to_mem(lpaddr);
        // Copy the cap to dcb also
        err = caps_copy_to_cte(&dcb->disp_cte, dispcte, false, 0, 0);
        // If copy fails, something wrong in kernel
        assert(err_is_ok(err));
    }

    /* 5. Make runnable if desired -- Set pointer to ipi_data */
    if (run) {
        if (dcb->vspace == 0 ||
        (!dcb->is_vm_guest &&
        (dcb->disp == 0 || dcb->cspace.cap.type != ObjType_CNode))) {
            return SYSRET(err_push(err, SYS_ERR_DISP_NOT_RUNNABLE));
        }

        // XXX: dispatchers run disabled the first time they start
        dcb->disabled = 1;
        //printf("DCB: %p %.*s\n", dcb, DISP_NAME_LEN, dcb->disp->name);
        make_runnable(dcb);
    }

    /* 6. Copy domain ID off given dispatcher */
    if(odptr != CPTR_NULL) {
        struct capability *odisp;
        err = caps_lookup_cap(&dcb_current->cspace.cap, odptr, CPTR_BITS,
                              &odisp, CAPRIGHTS_READ_WRITE);
        if (err_is_fail(err)) {
            return SYSRET(err_push(err, SYS_ERR_DISP_OCAP_LOOKUP));
        }
        dcb->domain_id = odisp->u.dispatcher.dcb->domain_id;
    }

    /* 7. (HACK) Set current core id */
    {
    struct dispatcher_shared_generic *disp =
        get_dispatcher_shared_generic(dcb->disp);
    disp->curr_core_id = my_core_id;
    }

    if(!dcb->is_vm_guest) {
        struct dispatcher_shared_generic *disp =
                    get_dispatcher_shared_generic(dcb->disp);
        err = trace_new_application(disp->name, (uintptr_t) dcb);

        if (err == TRACE_ERR_NO_BUFFER) {
            // Try to use the boot buffer.
            trace_new_boot_application(disp->name, (uintptr_t) dcb);
        }
    }

    return SYSRET(SYS_ERR_OK);
}
Exemplo n.º 12
0
errval_t unmap_capability(struct cte *mem)
{
    errval_t err;

    TRACE_CAP_MSG("unmapping", mem);

    genvaddr_t vaddr = 0;
    bool single_page_flush = false;
    int mapping_count = 0, unmap_count = 0;
    genpaddr_t faddr = get_address(&mem->cap);

    // iterate over all mappings associated with 'mem' and unmap them
    struct cte *next = mem;
    struct cte *to_delete = NULL;

    while ((next = mdb_successor(next)) && get_address(&next->cap) == faddr) {
        TRACE_CAP_MSG("looking at", next);
        if (next->cap.type == get_mapping_type(mem->cap.type) &&
            next->cap.u.frame_mapping.cap == &mem->cap)
        {
            TRACE_CAP_MSG("cleaning up mapping", next);
            mapping_count ++;

            // do unmap
            struct Frame_Mapping *mapping = &next->cap.u.frame_mapping;
            struct cte *pgtable = mapping->ptable;
            if (!pgtable) {
                debug(SUBSYS_PAGING, "mapping->ptable == 0: just deleting mapping\n");
                // mem is not mapped, so just return
                goto delete_mapping;
            }
            if (!type_is_vnode(pgtable->cap.type)) {
                debug(SUBSYS_PAGING,
                        "mapping->ptable.type not vnode (%d): just deleting mapping\n",
                        mapping->ptable->cap.type);
                // mem is not mapped, so just return
                goto delete_mapping;
            }

            lpaddr_t ptable_lp = gen_phys_to_local_phys(get_address(&pgtable->cap));
            lvaddr_t ptable_lv = local_phys_to_mem(ptable_lp);
            cslot_t slot = mapping->entry;

            // unmap
            do_unmap(ptable_lv, slot, mapping->pte_count);

            unmap_count ++;

            // TLB flush?
            if (unmap_count == 1) {
                err = compile_vaddr(pgtable, slot, &vaddr);
                if (err_is_ok(err) && mapping->pte_count == 1) {
                    single_page_flush = true;
                }
            }

delete_mapping:
            assert(!next->delete_node.next);
            // mark mapping cap for delete: cannot do delete here as it messes
            // up mdb_successor()
            next->delete_node.next = to_delete;
            to_delete = next;
        }
    }

    // delete mapping caps
    while (to_delete) {
        next = to_delete->delete_node.next;
        err = caps_delete(to_delete);
        if (err_is_fail(err)) {
            printk(LOG_NOTE, "caps_delete: %"PRIuERRV"\n", err);
        }
        to_delete = next;
    }

    TRACE_CAP_MSGF(mem, "unmapped %d/%d instances", unmap_count, mapping_count);

    // do TLB flush
    if (single_page_flush) {
        do_one_tlb_flush(vaddr);
    } else {
        do_full_tlb_flush();
    }

    return SYS_ERR_OK;
}
Exemplo n.º 13
0
struct dcb *spawn_app_init(struct x86_core_data *core_data,
                           const char *name, alloc_phys_func alloc_phys)
{
    errval_t err;

    /* Construct cmdline args */
    // Core id of the core that booted this core
    char coreidchar[10];
    snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);

    // IPI channel id of core that booted this core
    char chanidchar[30];
    snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);

    // Arch id of the core that booted this core
    char archidchar[30];
    snprintf(archidchar, sizeof(archidchar), "archid=%d",
             core_data->src_arch_id);

    const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
    int argc = 4;

#ifdef __scc__
    char urpc_frame_base_char[30];
    snprintf(urpc_frame_base_char, sizeof(urpc_frame_base_char),
             "frame=%" PRIuGENPADDR, core_data->urpc_frame_base);
    argv[argc++] = urpc_frame_base_char;
#endif

    struct dcb *init_dcb = spawn_init_common(&spawn_state, name, argc, argv,
                                             0, alloc_phys);

    // Urpc frame cap
    struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
                                                  TASKCN_SLOT_MON_URPC);
    // XXX: Create as devframe so the memory is not zeroed out
    err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base,
                          core_data->urpc_frame_bits,
                          core_data->urpc_frame_bits, core_data->src_core_id,
                          urpc_frame_cte);
    assert(err_is_ok(err));
    urpc_frame_cte->cap.type = ObjType_Frame;
    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);

    /* Map urpc frame at MON_URPC_BASE */
#ifdef CONFIG_PAE
    paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(MON_URPC_BASE)],
                            mem_to_local_phys((lvaddr_t)init_pdir));
#endif
    paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(MON_URPC_BASE)],
                            mem_to_local_phys((lvaddr_t)init_ptable));
    for (int i = 0; i < MON_URPC_SIZE / BASE_PAGE_SIZE; i++) {
        paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(MON_URPC_BASE) + i],
                   urpc_ptr + i * BASE_PAGE_SIZE,
                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
    }

    // elf load the domain
    genvaddr_t entry_point;
    err = elf_load(EM_386, startup_alloc_init, &spawn_state,
                   local_phys_to_mem(core_data->monitor_binary),
                   core_data->monitor_binary_size, &entry_point);
    if (err_is_fail(err)) {
        //err_print_calltrace(err);
        panic("ELF load of init module failed!");
    }

    struct dispatcher_shared_x86_32 *init_disp_x86_32 =
        get_dispatcher_shared_x86_32(init_dcb->disp);
    init_disp_x86_32->disabled_save_area.eip = entry_point;

    return init_dcb;
}