Beispiel #1
0
/* post processing (e.g., flushing page-table entries) */
void init_arch_3()
{

#if defined(CONFIG_DEBUG_TRACE_INIT)
    printf("CPU features: %x\n", get_cpu_features());
#endif

#if defined(CONFIG_SMP)
    /* init_arch_3 is only executed by the boot cpu */
    boot_cpu = get_apic_cpu_id();

    init_cpu_local_data();
    
    smp_startup_processors();
#endif

    /* Kmem is initialized now. */
    __zero_page = kmem_alloc(PAGE_SIZE);
    zero_memory(__zero_page, PAGE_SIZE);

    /* Flush init section. */
    //kernel_ptdir[0] = 0;
    
    flush_tlb();

#if !defined(CONFIG_X86_APIC)
    /* Do not give out the timer interrupt. */
    interrupt_owner[0] = get_idle_tcb();
    interrupt_owner[2] = get_idle_tcb();
    interrupt_owner[8] = get_idle_tcb();
#endif
}
Beispiel #2
0
void init_video(){
	if (video.mode == TEXT_MODE)
		return;
	ioremap((unsigned long)video.fb, \
		(unsigned long)(video.width * video.height * (video.color / 8)),\
		PAGE_RW);
	flush_tlb();
}
Beispiel #3
0
void start_kernel(){
	setup_memory();
	init_video();
	flush_tlb();
	int *s = (int *)video.fb;
	int i;
	for(i = 0; i < video.width * video.height; ++i){
		*(s + i) = 0x3242;
		}
	while(1){}

}
Beispiel #4
0
void
switchkvm_new(void)
{
  dsb_barrier();
  flush_idcache();
  //cprintf("The phy pgtbase address is %x\n", (uint)v2p(kpgdir));
  set_pgtbase((uint)v2p(kpgdir));   // switch to the kernel page table
  //cprintf("after set_pgtbase\n");
  dsb_barrier();
  flush_tlb();
  //cprintf("after flush_tlb\n");
}
Beispiel #5
0
// Switch TSS and h/w page table to correspond to process p.
void
switchuvm(struct proc *p)
{
  pushcli();
  //cpu->ts.esp0 = (uint)proc->kstack + KSTACKSIZE;
  if(p->pgdir == 0)
    panic("switchuvm: no pgdir");
//cprintf("before copying uvm to kvm kpgdir=%x the first entry: %x\n", kpgdir, kpgdir[0]);
  memmove((void *)kpgdir, (void *)p->pgdir, PGSIZE);  // switch to new user address space
  flush_idcache();
  flush_tlb();
  popcli();
}
Beispiel #6
0
void unprotect_dos_mem( void )
{
	int i;

	for ( i = 0; i < 256; ++i )
	{
		pgd_t *pPgd = pgd_offset( g_psKernelSeg, i * PAGE_SIZE );
		pte_t *pPte = pte_offset( pPgd, i * PAGE_SIZE );

		PTE_VALUE( *pPte ) = ( i * PAGE_SIZE ) | PTE_PRESENT | PTE_WRITE | PTE_USER;
	}
	flush_tlb();
}
Beispiel #7
0
int unmap_page_range(unsigned long addr, unsigned long len)
{
	pde_t *pd;
	unsigned long end = addr + len;

	pd = pde_offset(current->mm->pd, addr);
	while (addr < end) {
		unmap_pte_range(pd, addr, end - addr);
		addr = (addr + PGDIR_SIZE) & PGDIR_MASK;
		pd++;
	}
	flush_tlb();
	return 0;
}
Beispiel #8
0
void flush_tlb_domain(struct domain *d)
{
    /* Update the VTTBR if necessary with the domain d. In this case,
     * it's only necessary to flush TLBs on every CPUs with the current VMID
     * (our domain).
     */
    if ( d != current->domain )
        p2m_load_VTTBR(d);

    flush_tlb();

    if ( d != current->domain )
        p2m_load_VTTBR(current->domain);
}
Beispiel #9
0
int elf_load_segment(char *image, elf32_phdr* seg)
{
	/*FIXME: By know I don't know how to cope p_vaddr AND p_align,
		So I ignore the latter and also p_flags which are only for ONE FRAME */
	UINT i,size=seg->p_filesz;
	for (i=ALIGN_DOWN(seg->p_vaddr);i<seg->p_vaddr+seg->p_memsz;i+=FRAME_SIZE)
		make_page(i,PAGE_FLAG_PRESENT | PAGE_FLAG_WRITE | PAGE_FLAG_USERMODE,current_task->directory,1);
	flush_tlb(); //TODO: If make_page() fails we need a "not enough core"
	if (size>seg->p_memsz) size=seg->p_memsz;
	memcpy((void *)seg->p_vaddr,(void *)((UINT)image+seg->p_offset),size);
	if (seg->p_filesz<size)
		memset((void *)((UINT)image+seg->p_offset+seg->p_filesz),0,size-seg->p_filesz);
	return 0;
}
Beispiel #10
0
/*
 * This routine puts a long into any process space by following the page
 * tables. NOTE! You should check that the long isn't on a page boundary,
 * and that it is in the task area before calling this: this routine does
 * no checking.
 *
 * Now keeps R/W state of page so that a text page stays readonly
 * even if a debugger scribbles breakpoints into it.  -M.U-
 */
static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr,
	unsigned long data)
{
	pgd_t *pgdir;
	pmd_t *pgmiddle;
	pte_t *pgtable;
	unsigned long page;

repeat:
	pgdir = pgd_offset(vma->vm_mm, addr);
	if (!pgd_present(*pgdir)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	if (pgd_bad(*pgdir)) {
		printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
		pgd_clear(pgdir);
		return;
	}
	pgmiddle = pmd_offset(pgdir, addr);
	if (pmd_none(*pgmiddle)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	if (pmd_bad(*pgmiddle)) {
		printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
		pmd_clear(pgmiddle);
		return;
	}
	pgtable = pte_offset(pgmiddle, addr);
	if (!pte_present(*pgtable)) {
		do_no_page(tsk, vma, addr, 1);
		goto repeat;
	}
	page = pte_page(*pgtable);
	if (!pte_write(*pgtable)) {
		do_wp_page(tsk, vma, addr, 1);
		goto repeat;
	}
/* this is a hack for non-kernel-mapped video buffers and similar */
	if (page < high_memory)
		*(unsigned long *) (page + (addr & ~PAGE_MASK)) = data;
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
/* this should also re-instate whatever read-only mode there was before */
	set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	flush_tlb();
}
Beispiel #11
0
void save_resources (tcb_t *current, tcb_t *dest)
{
    TRACEPOINT	(SAVE_RESOURCES, printf ("save_resources: tcb=%p  rc=%p\n",
					 current, current->resources));

    if (current->resources | TR_IPC_MEM)
    {
	ptr_t pdir = current->space->pagedir ();
	pdir[MEM_COPYAREA1 >> PAGEDIR_BITS] = 0;
	pdir[(MEM_COPYAREA1 >> PAGEDIR_BITS) + 1] = 0;

	pdir[MEM_COPYAREA2 >> PAGEDIR_BITS] = 0;
	pdir[(MEM_COPYAREA2 >> PAGEDIR_BITS) + 1] = 0;

	if ((dest == get_idle_tcb ()) || same_address_space (current, dest))
	    flush_tlb ();
    }
Beispiel #12
0
// Switch to the user page table (TTBR0)
void switchuvm (struct proc *p)
{
    uint64 val64;

    pushcli();

    if (p->pgdir == 0) {
        panic("switchuvm: no pgdir");
    }

    val64 = (uint64) V2P(p->pgdir) | 0x00;

    asm("MSR TTBR0_EL1, %[v]": :[v]"r" (val64):);
    flush_tlb();

    popcli();
}
Beispiel #13
0
int p2m_alloc_table(struct domain *d)
{
    struct p2m_domain *p2m = &d->arch.p2m;
    struct page_info *page;
    void *p;

    page = alloc_domheap_pages(NULL, P2M_FIRST_ORDER, 0);
    if ( page == NULL )
        return -ENOMEM;

    spin_lock(&p2m->lock);

    /* Clear both first level pages */
    p = __map_domain_page(page);
    clear_page(p);
    unmap_domain_page(p);

    p = __map_domain_page(page + 1);
    clear_page(p);
    unmap_domain_page(p);

    p2m->first_level = page;

    d->arch.vttbr = page_to_maddr(p2m->first_level)
        | ((uint64_t)p2m->vmid&0xff)<<48;

    p2m_load_VTTBR(d);

    /* Make sure that all TLBs corresponding to the new VMID are flushed
     * before using it
     */
    flush_tlb();

    p2m_load_VTTBR(current->domain);

    spin_unlock(&p2m->lock);

    return 0;
}
Beispiel #14
0
Datei: p2m.c Projekt: Fantu/Xen
void flush_tlb_domain(struct domain *d)
{
    unsigned long flags = 0;

    /* Update the VTTBR if necessary with the domain d. In this case,
     * it's only necessary to flush TLBs on every CPUs with the current VMID
     * (our domain).
     */
    if ( d != current->domain )
    {
        local_irq_save(flags);
        p2m_load_VTTBR(d);
    }

    flush_tlb();

    if ( d != current->domain )
    {
        p2m_load_VTTBR(current->domain);
        local_irq_restore(flags);
    }
}
Beispiel #15
0
/*
 * Write bytes to kernel address space for debugger.
 */
void
db_write_bytes(
	vm_offset_t	addr,
	int		size,
	char		*data,
	task_t		task)
{
	char		*dst;

	pt_entry_t *ptep0 = 0;
	pt_entry_t	oldmap0 = 0;
	vm_offset_t	addr1;
	pt_entry_t *ptep1 = 0;
	pt_entry_t	oldmap1 = 0;
	extern char	etext;

	if ((addr < VM_MIN_KERNEL_ADDRESS) ^
	    ((addr + size) <= VM_MIN_KERNEL_ADDRESS)) {
	    db_error("\ncannot write data into mixed space\n");
	    /* NOTREACHED */
	}
	if (addr < VM_MIN_KERNEL_ADDRESS) {
	    if (task) {
		db_write_bytes_user_space(addr, size, data, task);
		return;
	    } else if (db_current_task() == TASK_NULL) {
		db_printf("\nbad address %x\n", addr);
		db_error(0);
		/* NOTREACHED */
	    }
	}

	if (addr >= VM_MIN_KERNEL_ADDRESS &&
	    addr <= (vm_offset_t)&etext)
	{
	    ptep0 = pmap_pte(kernel_pmap, addr);
	    oldmap0 = *ptep0;
	    *ptep0 |= INTEL_PTE_WRITE;

	    addr1 = i386_trunc_page(addr + size - 1);
	    if (i386_trunc_page(addr) != addr1) {
		/* data crosses a page boundary */

		ptep1 = pmap_pte(kernel_pmap, addr1);
		oldmap1 = *ptep1;
		*ptep1 |= INTEL_PTE_WRITE;
	    }
	    if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
		set_cr4(get_cr4() & ~CR4_PGE);
	    flush_tlb();
	}

	dst = (char *)addr;

	while (--size >= 0)
	    *dst++ = *data++;

	if (ptep0) {
	    *ptep0 = oldmap0;
	    if (ptep1) {
		*ptep1 = oldmap1;
	    }
	    flush_tlb();
	    if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
		set_cr4(get_cr4() | CR4_PGE);
	}
}
Beispiel #16
0
static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
                               p2m_type_t t)
{
    paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT;
    /* xn and write bit will be defined in the switch */
    lpae_t e = (lpae_t) {
        .p2m.af = 1,
        .p2m.sh = LPAE_SH_OUTER,
        .p2m.read = 1,
        .p2m.mattr = mattr,
        .p2m.table = 1,
        .p2m.valid = 1,
        .p2m.type = t,
    };

    BUILD_BUG_ON(p2m_max_real_type > (1 << 4));

    switch (t)
    {
    case p2m_ram_rw:
        e.p2m.xn = 0;
        e.p2m.write = 1;
        break;

    case p2m_ram_ro:
        e.p2m.xn = 0;
        e.p2m.write = 0;
        break;

    case p2m_map_foreign:
    case p2m_grant_map_rw:
    case p2m_mmio_direct:
        e.p2m.xn = 1;
        e.p2m.write = 1;
        break;

    case p2m_grant_map_ro:
    case p2m_invalid:
        e.p2m.xn = 1;
        e.p2m.write = 0;
        break;

    case p2m_max_real_type:
        BUG();
        break;
    }

    ASSERT(!(pa & ~PAGE_MASK));
    ASSERT(!(pa & ~PADDR_MASK));

    e.bits |= pa;

    return e;
}

/* Allocate a new page table page and hook it in via the given entry */
static int p2m_create_table(struct domain *d,
                            lpae_t *entry)
{
    struct p2m_domain *p2m = &d->arch.p2m;
    struct page_info *page;
    void *p;
    lpae_t pte;

    BUG_ON(entry->p2m.valid);

    page = alloc_domheap_page(NULL, 0);
    if ( page == NULL )
        return -ENOMEM;

    page_list_add(page, &p2m->pages);

    p = __map_domain_page(page);
    clear_page(p);
    unmap_domain_page(p);

    pte = mfn_to_p2m_entry(page_to_mfn(page), MATTR_MEM, p2m_invalid);

    write_pte(entry, pte);

    return 0;
}

enum p2m_operation {
    INSERT,
    ALLOCATE,
    REMOVE,
    RELINQUISH,
    CACHEFLUSH,
};

static int apply_p2m_changes(struct domain *d,
                     enum p2m_operation op,
                     paddr_t start_gpaddr,
                     paddr_t end_gpaddr,
                     paddr_t maddr,
                     int mattr,
                     p2m_type_t t)
{
    int rc;
    struct p2m_domain *p2m = &d->arch.p2m;
    lpae_t *first = NULL, *second = NULL, *third = NULL;
    paddr_t addr;
    unsigned long cur_first_page = ~0,
                  cur_first_offset = ~0,
                  cur_second_offset = ~0;
    unsigned long count = 0;
    unsigned int flush = 0;
    bool_t populate = (op == INSERT || op == ALLOCATE);
    lpae_t pte;

    spin_lock(&p2m->lock);

    if ( d != current->domain )
        p2m_load_VTTBR(d);

    addr = start_gpaddr;
    while ( addr < end_gpaddr )
    {
        if ( cur_first_page != p2m_first_level_index(addr) )
        {
            if ( first ) unmap_domain_page(first);
            first = p2m_map_first(p2m, addr);
            if ( !first )
            {
                rc = -EINVAL;
                goto out;
            }
            cur_first_page = p2m_first_level_index(addr);
        }

        if ( !first[first_table_offset(addr)].p2m.valid )
        {
            if ( !populate )
            {
                addr = (addr + FIRST_SIZE) & FIRST_MASK;
                continue;
            }

            rc = p2m_create_table(d, &first[first_table_offset(addr)]);
            if ( rc < 0 )
            {
                printk("p2m_populate_ram: L1 failed\n");
                goto out;
            }
        }

        BUG_ON(!first[first_table_offset(addr)].p2m.valid);

        if ( cur_first_offset != first_table_offset(addr) )
        {
            if (second) unmap_domain_page(second);
            second = map_domain_page(first[first_table_offset(addr)].p2m.base);
            cur_first_offset = first_table_offset(addr);
        }
        /* else: second already valid */

        if ( !second[second_table_offset(addr)].p2m.valid )
        {
            if ( !populate )
            {
                addr = (addr + SECOND_SIZE) & SECOND_MASK;
                continue;
            }

            rc = p2m_create_table(d, &second[second_table_offset(addr)]);
            if ( rc < 0 ) {
                printk("p2m_populate_ram: L2 failed\n");
                goto out;
            }
        }

        BUG_ON(!second[second_table_offset(addr)].p2m.valid);

        if ( cur_second_offset != second_table_offset(addr) )
        {
            /* map third level */
            if (third) unmap_domain_page(third);
            third = map_domain_page(second[second_table_offset(addr)].p2m.base);
            cur_second_offset = second_table_offset(addr);
        }

        pte = third[third_table_offset(addr)];

        flush |= pte.p2m.valid;

        /* TODO: Handle other p2m type
         *
         * It's safe to do the put_page here because page_alloc will
         * flush the TLBs if the page is reallocated before the end of
         * this loop.
         */
        if ( pte.p2m.valid && p2m_is_foreign(pte.p2m.type) )
        {
            unsigned long mfn = pte.p2m.base;

            ASSERT(mfn_valid(mfn));
            put_page(mfn_to_page(mfn));
        }

        /* Allocate a new RAM page and attach */
        switch (op) {
            case ALLOCATE:
                {
                    struct page_info *page;

                    ASSERT(!pte.p2m.valid);
                    rc = -ENOMEM;
                    page = alloc_domheap_page(d, 0);
                    if ( page == NULL ) {
                        printk("p2m_populate_ram: failed to allocate page\n");
                        goto out;
                    }

                    pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t);

                    write_pte(&third[third_table_offset(addr)], pte);
                }
                break;
            case INSERT:
                {
                    pte = mfn_to_p2m_entry(maddr >> PAGE_SHIFT, mattr, t);
                    write_pte(&third[third_table_offset(addr)], pte);
                    maddr += PAGE_SIZE;
                }
                break;
            case RELINQUISH:
            case REMOVE:
                {
                    if ( !pte.p2m.valid )
                    {
                        count++;
                        break;
                    }

                    count += 0x10;

                    memset(&pte, 0x00, sizeof(pte));
                    write_pte(&third[third_table_offset(addr)], pte);
                    count++;
                }
                break;

            case CACHEFLUSH:
                {
                    if ( !pte.p2m.valid || !p2m_is_ram(pte.p2m.type) )
                        break;

                    flush_page_to_ram(pte.p2m.base);
                }
                break;
        }

        /* Preempt every 2MiB (mapped) or 32 MiB (unmapped) - arbitrary */
        if ( op == RELINQUISH && count >= 0x2000 )
        {
            if ( hypercall_preempt_check() )
            {
                p2m->lowest_mapped_gfn = addr >> PAGE_SHIFT;
                rc = -EAGAIN;
                goto out;
            }
            count = 0;
        }

        /* Got the next page */
        addr += PAGE_SIZE;
    }

    if ( flush )
    {
        /* At the beginning of the function, Xen is updating VTTBR
         * with the domain where the mappings are created. In this
         * case it's only necessary to flush TLBs on every CPUs with
         * the current VMID (our domain).
         */
        flush_tlb();
    }

    if ( op == ALLOCATE || op == INSERT )
    {
        unsigned long sgfn = paddr_to_pfn(start_gpaddr);
        unsigned long egfn = paddr_to_pfn(end_gpaddr);

        p2m->max_mapped_gfn = MAX(p2m->max_mapped_gfn, egfn);
        p2m->lowest_mapped_gfn = MIN(p2m->lowest_mapped_gfn, sgfn);
    }

    rc = 0;

out:
    if (third) unmap_domain_page(third);
    if (second) unmap_domain_page(second);
    if (first) unmap_domain_page(first);

    if ( d != current->domain )
        p2m_load_VTTBR(current->domain);

    spin_unlock(&p2m->lock);

    return rc;
}
Beispiel #17
0
// 1:1 map the memory [phy_low, phy_hi] in kernel. We need to
// use 2-level mapping for this block of memory. The rumor has
// it that ARMv6's small brain cannot handle the case that memory
// be mapped in both 1-level page table and 2-level page. For
// initial kernel, we use 1MB mapping, other memory needs to be
// mapped as 4KB pages
void paging_init (uint64 phy_low, uint64 phy_hi)
{
    mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_RW_1_0);
    flush_tlb ();
}
Beispiel #18
0
void do_xcpu_flush_tlb(cpu_mailbox_t * mailbox)
{
    flush_tlb();
    mailbox->set_status(MAILBOX_OK);
}