Example #1
0
static void handle_fault(L4_Word_t faddr, L4_Word_t fip, L4_MapItem_t *map)
{
	struct drop_param *param = get_ctx();
	L4_MsgTag_t tag = muidl_get_tag();
	int rwx = tag.X.label & 0x000f;
#if 0
	L4_ThreadId_t from = muidl_get_sender();
	diag("drop_pager: pf in %lu:%lu at %#lx, ip %#lx",
		L4_ThreadNo(from), L4_Version(from), faddr, fip);
#endif
	param->log_top = (param->log_top + 1) % LOG_SIZE;
	param->log[param->log_top] = L4_FpageLog2(faddr, 12);
	L4_Set_Rights(&param->log[param->log_top], rwx);

	int dpos = param->log_top - param->keep;
	if(dpos < 0) dpos += LOG_SIZE;
	assert(dpos >= 0 && dpos < LOG_SIZE);
	L4_Fpage_t drop = param->log[dpos];
	if(!L4_IsNilFpage(drop)
		&& L4_Address(drop) != (faddr & ~PAGE_MASK))
	{
#if 0
		diag("flushing %#lx:%#lx (dpos %d)",
			L4_Address(drop), L4_Size(drop), dpos);
#endif
		L4_Set_Rights(&drop, L4_FullyAccessible);
		L4_FlushFpage(drop);
	}

	/* pass it on. */
	L4_LoadBR(0, L4_CompleteAddressSpace.raw);
	L4_LoadMR(0, (L4_MsgTag_t){ .X.label = 0xffe0 | rwx,
		.X.u = 2 }.raw);
Example #2
0
static L4_Word_t thread_prepare_stack(AddrSpace_t *as, L4_Word_t sp, char *cmdline, int n, char **paths, HpfCapability *caps)
{
    L4_Word_t size;
    int i;

    assert(as != NULL);

    /* nothing to do in this case */
    if (cmdline == NULL) return 0;

    /* we need room for cmdline and for path/capability pairs */
    size = strlen(cmdline) + 1 + 4 * sizeof(uint8_t *);

    size += n * sizeof(HpfCapability);
    for (i = 0; i < n; i++) {
        size += strlen(paths[i]) + 1;
    }

    if (size < PAGE_SIZE) size = PAGE_SIZE;
    size = intlog2(size);

    L4_Fpage_t vp = L4_Fpage(sp - size, size);
    L4_Fpage_t fp = address_space_request_page(as, vp);
    if (L4_IsNilFpage(fp)) {
        debug("thread_prepare_stack: can't setup stack!\n");
        return 0;
    }

    uint8_t *start = (uint8_t *) L4_Address(fp);
    uint32_t *tmp = (uint32_t *) start;

    tmp[0] = L4_Address(vp) + 4 * sizeof(uint8_t *); /* pointer to cmdline */
    tmp[1] = n; /* number of path/capability pairs */
    tmp[2] = L4_Address(vp) + 4 * sizeof(uint8_t *) + strlen(cmdline) + 1; /* pointer to array of paths */
    tmp[3] = L4_Address(vp) + size - n * sizeof(HpfCapability); /* pointer to array of caps */

    strcpy(start + 4 * sizeof(uint8_t *), cmdline);

    memcpy(start + size - n * sizeof(HpfCapability), caps, n * sizeof(HpfCapability));
    start += 4 * sizeof(uint8_t *) + strlen(cmdline) + 1;
    for (i = 0; i < n; i++) {
        char *x = paths[i];
        while (*x) *start++ = *x++;
        *start++ = 0;
    }

    return size;
}
Example #3
0
void isr_exn_ud_bottom(struct x86_exregs *regs)
{
	/* see if it's a LOCK NOP. (this is why the "syscall" is so slow.) */

	/* NOTE: could extend the kernel data segment to the full address space,
	 * and wrap the user-space pointer. that'd remove the farting around with
	 * the mapping database, page tables, and supervisor space.
	 */
	struct thread *current = get_current_thread();
	uint8_t buf[2];
	size_t n = space_memcpy_from(current->space, buf, regs->eip, 2);
	if(n < 2) {
		panic("can't read from #UD eip? what.");
	}
	if(buf[0] == 0xf0 && buf[1] == 0x90) {
		/* it is L4_KernelInterface().
		 * TODO: proper values
		 */
		regs->eip += 2;
		regs->eax = L4_Address(current->space->kip_area);
		/* TODO: replace these with proper KIP accessors */
		regs->ecx = *(L4_Word_t *)(kip_mem + 0x04);		/* API VERSION */
		regs->edx = *(L4_Word_t *)(kip_mem + 0x08);		/* API FLAGS */
		/* id = 23 (because 2 + 3 = 5); subid = 17
		 * TODO: get proper values at some point.
		 */
		regs->esi = (23 << 24) | (17 << 16);	/* KERNEL ID */
	} else {
		printf("#UD at eip 0x%lx, esp 0x%lx\n", regs->eip, regs->esp);
		/* TODO: pop an "invalid opcode" exception. */
		thread_halt(current);
		assert(current->status == TS_STOPPED);
		return_to_scheduler();
	}
}
Example #4
0
void
sys$sigma0_map_fpage(L4_Fpage_t virt_page, L4_Fpage_t phys_page,
        unsigned int priv)
{
    L4_ThreadId_t           tid;

    L4_MsgTag_t             tag;

    L4_Msg_t                msg;

    L4_MapItem_t            map;

    // Find Pager's ID
    tid = L4_Pager();
    L4_Set_Rights(&phys_page, priv);
    L4_Accept(L4_MapGrantItems(virt_page));
    L4_MsgClear(&msg);
    L4_MsgAppendWord(&msg, (L4_Word_t) phys_page.raw);
    L4_MsgAppendWord(&msg, (L4_Word_t) 0);
    L4_Set_Label(&msg.tag, SIGMA0_REQUEST_LABEL);
    L4_MsgLoad(&msg);

    tag = L4_Call(tid);

    PANIC(L4_IpcFailed(tag), notice(IPC_F_FAILED "IPC failed (error %ld: %s)\n",
            L4_ErrorCode(), L4_ErrorCode_String(L4_ErrorCode())));

    L4_MsgStore(tag, &msg);
    L4_MsgGetMapItem(&msg, 0, &map);

    if (dbg$virtual_memory == 1)
    {
        if (map.X.snd_fpage.raw == L4_Nilpage.raw)
        {
            notice(MEM_I_REJMAP "rejecting mapping\n");
            notice(MEM_I_REJMAP "virtual  $%016lX - $%016lX\n",
                    L4_Address(virt_page), L4_Address(virt_page)
                    + (L4_Size(virt_page) - 1));
            notice(MEM_I_REJMAP "physical $%016lX - $%016lX\n",
                    L4_Address(phys_page), L4_Address(phys_page)
                    + (L4_Size(phys_page) - 1));
        }
        else
        {
            notice(MEM_I_ACCMAP "accepting mapping\n");
            notice(MEM_I_ACCMAP "virtual  $%016lX - $%016lX\n",
                    L4_Address(virt_page), L4_Address(virt_page)
                    + (L4_Size(virt_page) - 1));
            notice(MEM_I_ACCMAP "physical $%016lX - $%016lX\n",
                    L4_Address(phys_page), L4_Address(phys_page)
                    + (L4_Size(phys_page) - 1));
        }
    }

    return;
}
Example #5
0
void
l4_map_page(mm_context_t *context, L4_Fpage_t fpage, unsigned long address,
		unsigned long attrib)
{
	unsigned long dest_addr, src_addr;
	L4_Fpage_t vpage;
	L4_PhysDesc_t pdesc;
	int rwx;

	src_addr = L4_Address(fpage);
#if 0
	if (src_addr >= vmalloc_ms_base && src_addr <= vmalloc_ms_base + vmalloc_ms_size) {
		// Within the vmalloc bounds so use the new depriv mapping
		dest_addr = address & 0xfffff000;
		vpage = L4_Fpage(dest_addr, L4_Size(fpage));
		pdesc = L4_PhysDesc(src_addr + vmalloc_ms_virt_to_phys_offset, attrib);
		rwx = L4_Rights(fpage);

		L4_FpageAddRightsTo(&vpage, rwx);
		L4_MapFpage(context->space_id, vpage, pdesc);
	} else {
#endif

		eas_map(context->eas, fpage, address, attrib);
//	}
}
#endif

#if defined(CONFIG_CELL)
unsigned long last_vstart = -1UL;
unsigned long last_vend, last_seg;

int
okl4_find_segment(unsigned long vaddr, unsigned long *offset, unsigned long *seg)
{
	okl4_env_segments_t *segments = OKL4_ENV_GET_SEGMENTS("SEGMENTS");
	unsigned long i;

	assert(segments);
	for (i = 0; i < segments->num_segments; i++) {
	    if (vaddr >= segments->segments[i].virt_addr &&
		    vaddr <= (segments->segments[i].virt_addr + 
			segments->segments[i].size - 1)) {
			*offset = vaddr - segments->segments[i].virt_addr;
			*seg = segments->segments[i].segment;

			/* Cache lookup */
			last_vstart = segments->segments[i].virt_addr;
			last_vend = last_vstart + segments->segments[i].size - 1;
			last_seg = segments->segments[i].segment;
			return 1;
		}
	}

	return 0;
}
Example #6
0
// FIXME - i386 specific (EM_386)
L4_Word_t elf_load32(AddrSpace_t *space, L4_Word_t image, L4_Word_t size)
{
    Elf32_Ehdr *eh = (Elf32_Ehdr *) image;
    Elf32_Phdr *ph;
    int i;

    assert(space != NULL);

    debug("elf_load32: loading image at %lx (size %lx) for as: %p\n", image, size, space);

    if ((eh->e_ident[EI_MAG0] != ELFMAG0) ||
        (eh->e_ident[EI_MAG1] != ELFMAG1) ||
        (eh->e_ident[EI_MAG2] != ELFMAG2) ||
        (eh->e_ident[EI_MAG3] != ELFMAG3) ||
        (eh->e_type != ET_EXEC) ||
        (eh->e_machine != EM_386) ||
        (eh->e_phoff == 0))
    {
        debug("elf_load32: illegal ELF image at %lx\n", (L4_Word_t) image);
        return 0;
    }

    for (i = 0; i < eh->e_phnum; i++) {
        L4_Fpage_t vp, fp;
        L4_Word_t size;
        uint8_t *src, *dest;

        ph = (Elf32_Phdr *) (image + eh->e_phoff + i * eh->e_phentsize); 
        if (ph->p_type != PT_LOAD) continue;

        assert((ph->p_offset + ph->p_filesz) < size);
        assert(ph->p_filesz <= ph->p_memsz);

        size = intlog2(ph->p_memsz);
        vp = L4_Fpage(ph->p_vaddr, size);
        fp = address_space_request_page(space, vp);
        if (L4_IsNilFpage(fp)) {
            debug("elf_load32: can't allocate memory\n");
            return 0;
        }
        dest = (uint8_t *) L4_Address(fp);
        src = (uint8_t *) (image + ph->p_offset);

        memcpy(dest, src, ph->p_filesz);
        memset(dest + ph->p_filesz, 0, size - ph->p_filesz);
    }
    
    return eh->e_entry;
}
Example #7
0
void
thread_delete(struct thread *thread)
{
    struct pd *pd;
#if defined(CONFIG_SESSION)
    struct session_p_node *sd;
#endif
    pd = thread->owner;
    (void)L4_ThreadControl(thread->id,
                           L4_nilspace,
                           L4_nilthread, L4_nilthread, L4_nilthread, 0, NULL);
    thread_free(thread->id);

    if (
#if defined(CONFIG_EAS)
        thread->eas == NULL &&
#endif
        thread->utcb != (void *)-1UL) {
        /* free local thread no. */
        bfl_free(pd->local_threadno,
                 ((uintptr_t)thread->utcb -
                  L4_Address(pd->space.utcb_area)) >> L4_GetUtcbBits());
    }
Example #8
0
int
l4e_sigma0_map_fpage(L4_Fpage_t virt_page, L4_Fpage_t phys_page)
{
/* 
 * XXX: These two special cases are workarounds for broken superpage
 * support in pistachio.  On ARM, 1M superpages are disabled by
 * pistachio to reduce the size of the mapping database, however due to
 * bugs in the mapping code, any mappings >= 1M get converted into 4K
 * mappings (rather than 64K).  For MIPS, the tlb refill code assumes
 * only 4K mappings are used, even the the pagetable building code will
 * use superpages where possible.  -- alexw
 */
#if defined(ARCH_ARM)
	uintptr_t virt_base = L4_Address(virt_page);
	uintptr_t phys_base = L4_Address(phys_page);
	uintptr_t offset = 0;
	uintptr_t step = L4_Size(virt_page) > 0x10000 ? 0x10000 : L4_Size(virt_page);
	uintptr_t limit = L4_Size(virt_page) - 1;

	for (virt_page = L4_Fpage(virt_base + offset, step),
	     phys_page = L4_Fpage(phys_base + offset, step);
	     offset < limit;
	     offset += step,
	     virt_page = L4_Fpage(virt_base + offset, step),
	     phys_page = L4_Fpage(phys_base + offset, step))
#elif defined(ARCH_MIPS64)
	uintptr_t virt_base = L4_Address(virt_page);
	uintptr_t phys_base = L4_Address(phys_page);
	uintptr_t offset = 0;
	uintptr_t step = 0x1000;
	uintptr_t limit = L4_Size(virt_page) - 1;

	for (virt_page = L4_Fpage(virt_base + offset, step),
	     phys_page = L4_Fpage(phys_base + offset, step);
	     offset < limit;
	     offset += step,
	     virt_page = L4_Fpage(virt_base + offset, step),
	     phys_page = L4_Fpage(phys_base + offset, step))
#endif
	{
		L4_ThreadId_t   tid;
		L4_MsgTag_t     tag;
		L4_Msg_t        msg;
		L4_MapItem_t    map;
		/*
		 * find our pager's ID 
		 */
		tid = L4_Pager();

		L4_Set_Rights(&phys_page, L4_FullyAccessible);
		/* accept fpages */
		L4_Accept(L4_MapGrantItems(virt_page));

		/* send it to our pager */
		L4_MsgClear(&msg);
		L4_MsgAppendWord(&msg, (L4_Word_t) phys_page.raw);
		L4_MsgAppendWord(&msg, (L4_Word_t) 0);
		L4_Set_Label(&msg.tag, SIGMA0_REQUEST_LABEL);

		L4_MsgLoad(&msg);

		/* make the call */
		tag = L4_Call(tid);

		/* check for an error */
		if (L4_IpcFailed(tag)) {
			return 2;
		}

		L4_MsgStore(tag, &msg);
		L4_MsgGetMapItem(&msg, 0, &map);

		/*
		 * rejected mapping? 
		 */
		if (map.X.snd_fpage.raw == L4_Nilpage.raw) {
			return 1;
		}

	}
	return 0;
}