Пример #1
0
void
sys$sigma0_map_fpage(L4_Fpage_t virt_page, L4_Fpage_t phys_page,
        unsigned int priv)
{
    L4_ThreadId_t           tid;

    L4_MsgTag_t             tag;

    L4_Msg_t                msg;

    L4_MapItem_t            map;

    // Find Pager's ID
    tid = L4_Pager();
    L4_Set_Rights(&phys_page, priv);
    L4_Accept(L4_MapGrantItems(virt_page));
    L4_MsgClear(&msg);
    L4_MsgAppendWord(&msg, (L4_Word_t) phys_page.raw);
    L4_MsgAppendWord(&msg, (L4_Word_t) 0);
    L4_Set_Label(&msg.tag, SIGMA0_REQUEST_LABEL);
    L4_MsgLoad(&msg);

    tag = L4_Call(tid);

    PANIC(L4_IpcFailed(tag), notice(IPC_F_FAILED "IPC failed (error %ld: %s)\n",
            L4_ErrorCode(), L4_ErrorCode_String(L4_ErrorCode())));

    L4_MsgStore(tag, &msg);
    L4_MsgGetMapItem(&msg, 0, &map);

    if (dbg$virtual_memory == 1)
    {
        if (map.X.snd_fpage.raw == L4_Nilpage.raw)
        {
            notice(MEM_I_REJMAP "rejecting mapping\n");
            notice(MEM_I_REJMAP "virtual  $%016lX - $%016lX\n",
                    L4_Address(virt_page), L4_Address(virt_page)
                    + (L4_Size(virt_page) - 1));
            notice(MEM_I_REJMAP "physical $%016lX - $%016lX\n",
                    L4_Address(phys_page), L4_Address(phys_page)
                    + (L4_Size(phys_page) - 1));
        }
        else
        {
            notice(MEM_I_ACCMAP "accepting mapping\n");
            notice(MEM_I_ACCMAP "virtual  $%016lX - $%016lX\n",
                    L4_Address(virt_page), L4_Address(virt_page)
                    + (L4_Size(virt_page) - 1));
            notice(MEM_I_ACCMAP "physical $%016lX - $%016lX\n",
                    L4_Address(phys_page), L4_Address(phys_page)
                    + (L4_Size(phys_page) - 1));
        }
    }

    return;
}
Пример #2
0
static void handle_fault(L4_Word_t faddr, L4_Word_t fip, L4_MapItem_t *map)
{
	struct drop_param *param = get_ctx();
	L4_MsgTag_t tag = muidl_get_tag();
	int rwx = tag.X.label & 0x000f;
#if 0
	L4_ThreadId_t from = muidl_get_sender();
	diag("drop_pager: pf in %lu:%lu at %#lx, ip %#lx",
		L4_ThreadNo(from), L4_Version(from), faddr, fip);
#endif
	param->log_top = (param->log_top + 1) % LOG_SIZE;
	param->log[param->log_top] = L4_FpageLog2(faddr, 12);
	L4_Set_Rights(&param->log[param->log_top], rwx);

	int dpos = param->log_top - param->keep;
	if(dpos < 0) dpos += LOG_SIZE;
	assert(dpos >= 0 && dpos < LOG_SIZE);
	L4_Fpage_t drop = param->log[dpos];
	if(!L4_IsNilFpage(drop)
		&& L4_Address(drop) != (faddr & ~PAGE_MASK))
	{
#if 0
		diag("flushing %#lx:%#lx (dpos %d)",
			L4_Address(drop), L4_Size(drop), dpos);
#endif
		L4_Set_Rights(&drop, L4_FullyAccessible);
		L4_FlushFpage(drop);
	}

	/* pass it on. */
	L4_LoadBR(0, L4_CompleteAddressSpace.raw);
	L4_LoadMR(0, (L4_MsgTag_t){ .X.label = 0xffe0 | rwx,
		.X.u = 2 }.raw);
Пример #3
0
void
l4_map_page(mm_context_t *context, L4_Fpage_t fpage, unsigned long address,
		unsigned long attrib)
{
	unsigned long dest_addr, src_addr;
	L4_Fpage_t vpage;
	L4_PhysDesc_t pdesc;
	int rwx;

	src_addr = L4_Address(fpage);
#if 0
	if (src_addr >= vmalloc_ms_base && src_addr <= vmalloc_ms_base + vmalloc_ms_size) {
		// Within the vmalloc bounds so use the new depriv mapping
		dest_addr = address & 0xfffff000;
		vpage = L4_Fpage(dest_addr, L4_Size(fpage));
		pdesc = L4_PhysDesc(src_addr + vmalloc_ms_virt_to_phys_offset, attrib);
		rwx = L4_Rights(fpage);

		L4_FpageAddRightsTo(&vpage, rwx);
		L4_MapFpage(context->space_id, vpage, pdesc);
	} else {
#endif

		eas_map(context->eas, fpage, address, attrib);
//	}
}
#endif

#if defined(CONFIG_CELL)
unsigned long last_vstart = -1UL;
unsigned long last_vend, last_seg;

int
okl4_find_segment(unsigned long vaddr, unsigned long *offset, unsigned long *seg)
{
	okl4_env_segments_t *segments = OKL4_ENV_GET_SEGMENTS("SEGMENTS");
	unsigned long i;

	assert(segments);
	for (i = 0; i < segments->num_segments; i++) {
	    if (vaddr >= segments->segments[i].virt_addr &&
		    vaddr <= (segments->segments[i].virt_addr + 
			segments->segments[i].size - 1)) {
			*offset = vaddr - segments->segments[i].virt_addr;
			*seg = segments->segments[i].segment;

			/* Cache lookup */
			last_vstart = segments->segments[i].virt_addr;
			last_vend = last_vstart + segments->segments[i].size - 1;
			last_seg = segments->segments[i].segment;
			return 1;
		}
	}

	return 0;
}
Пример #4
0
void
sys$sigma0_map(vms$pointer virt_addr, vms$pointer phys_addr, vms$pointer size,
        unsigned int priv)
{
    L4_Fpage_t              ppage;
    L4_Fpage_t              vpage;

    vms$pointer             pbase;
    vms$pointer             pend;
    vms$pointer             vbase;
    vms$pointer             vend;

    vbase = virt_addr;
    vend = vbase + (size - 1);

    pbase = phys_addr;
    pend = pbase + (size - 1);

    if (vbase < vend)
    {
        /* ???? We should map a Fpage greater than size !
        vpage = vms$biggest_fpage(vbase, vbase, vend);
        ppage = vms$biggest_fpage(pbase, pbase, pend);
        */

        vpage = L4_Fpage(vbase, (vend - vbase) + 1);
        ppage = L4_Fpage(pbase, (pend - pbase) + 1);

        PANIC(L4_IsNilFpage(vpage) || L4_IsNilFpage(ppage));

        if (L4_Size(vpage) > L4_Size(ppage))
        {
            vpage = L4_Fpage(vbase, L4_Size(ppage));
        }
        else if (L4_Size(ppage) > L4_Size(vpage))
        {
            ppage = L4_Fpage(pbase, L4_Size(vpage));
        }

        sys$sigma0_map_fpage(vpage, ppage, L4_FullyAccessible);

        vbase += L4_Size(vpage);
        pbase += L4_Size(ppage);
    }

    return;
}
Пример #5
0
int
l4e_sigma0_map(uintptr_t virt_addr, uintptr_t phys_addr, uintptr_t size)
{
	uintptr_t vbase = virt_addr, vend = vbase + size - 1;
	uintptr_t pbase = phys_addr, pend = pbase + size - 1;
	L4_Fpage_t vpage, ppage;

	while (vbase < vend) {
		vpage = l4e_biggest_fpage(vbase, vbase, vend);
		ppage = l4e_biggest_fpage(pbase, pbase, pend);
		if (L4_Size(vpage) > L4_Size(ppage))
			vpage = L4_Fpage(vbase, L4_Size(ppage));
		else if (L4_Size(ppage) > L4_Size(vpage))
			ppage = L4_Fpage(pbase, L4_Size(vpage));
		l4e_sigma0_map_fpage(vpage, ppage);
		vbase += L4_Size(vpage);
		pbase += L4_Size(ppage);
	}
	return 0;
}
Пример #6
0
int
l4e_sigma0_map_fpage(L4_Fpage_t virt_page, L4_Fpage_t phys_page)
{
/* 
 * XXX: These two special cases are workarounds for broken superpage
 * support in pistachio.  On ARM, 1M superpages are disabled by
 * pistachio to reduce the size of the mapping database, however due to
 * bugs in the mapping code, any mappings >= 1M get converted into 4K
 * mappings (rather than 64K).  For MIPS, the tlb refill code assumes
 * only 4K mappings are used, even the the pagetable building code will
 * use superpages where possible.  -- alexw
 */
#if defined(ARCH_ARM)
	uintptr_t virt_base = L4_Address(virt_page);
	uintptr_t phys_base = L4_Address(phys_page);
	uintptr_t offset = 0;
	uintptr_t step = L4_Size(virt_page) > 0x10000 ? 0x10000 : L4_Size(virt_page);
	uintptr_t limit = L4_Size(virt_page) - 1;

	for (virt_page = L4_Fpage(virt_base + offset, step),
	     phys_page = L4_Fpage(phys_base + offset, step);
	     offset < limit;
	     offset += step,
	     virt_page = L4_Fpage(virt_base + offset, step),
	     phys_page = L4_Fpage(phys_base + offset, step))
#elif defined(ARCH_MIPS64)
	uintptr_t virt_base = L4_Address(virt_page);
	uintptr_t phys_base = L4_Address(phys_page);
	uintptr_t offset = 0;
	uintptr_t step = 0x1000;
	uintptr_t limit = L4_Size(virt_page) - 1;

	for (virt_page = L4_Fpage(virt_base + offset, step),
	     phys_page = L4_Fpage(phys_base + offset, step);
	     offset < limit;
	     offset += step,
	     virt_page = L4_Fpage(virt_base + offset, step),
	     phys_page = L4_Fpage(phys_base + offset, step))
#endif
	{
		L4_ThreadId_t   tid;
		L4_MsgTag_t     tag;
		L4_Msg_t        msg;
		L4_MapItem_t    map;
		/*
		 * find our pager's ID 
		 */
		tid = L4_Pager();

		L4_Set_Rights(&phys_page, L4_FullyAccessible);
		/* accept fpages */
		L4_Accept(L4_MapGrantItems(virt_page));

		/* send it to our pager */
		L4_MsgClear(&msg);
		L4_MsgAppendWord(&msg, (L4_Word_t) phys_page.raw);
		L4_MsgAppendWord(&msg, (L4_Word_t) 0);
		L4_Set_Label(&msg.tag, SIGMA0_REQUEST_LABEL);

		L4_MsgLoad(&msg);

		/* make the call */
		tag = L4_Call(tid);

		/* check for an error */
		if (L4_IpcFailed(tag)) {
			return 2;
		}

		L4_MsgStore(tag, &msg);
		L4_MsgGetMapItem(&msg, 0, &map);

		/*
		 * rejected mapping? 
		 */
		if (map.X.snd_fpage.raw == L4_Nilpage.raw) {
			return 1;
		}

	}
	return 0;
}