コード例 #1
0
void
switch_to_physical_rid(VCPU *vcpu)
{
    u64 psr;
    u64 rr;

    switch (vcpu->arch.arch_vmx.mmu_mode) {
    case VMX_MMU_PHY_DT:
        rr = vcpu->arch.metaphysical_rid_dt;
        break;
    case VMX_MMU_PHY_D:
        rr = vcpu->arch.metaphysical_rid_d;
        break;
    default:
        panic_domain(NULL, "bad mmu mode value");
    }
    
    psr = ia64_clear_ic();
    ia64_set_rr(VRN0<<VRN_SHIFT, rr);
    ia64_dv_serialize_data();
    ia64_set_rr(VRN4<<VRN_SHIFT, rr);
    ia64_srlz_d();
    
    ia64_set_psr(psr);
    ia64_srlz_i();
    return;
}
コード例 #2
0
static int
elf64_exec(struct preloaded_file *fp)
{
	struct file_metadata	*md;
	Elf_Ehdr		*hdr;
	struct ia64_pte		pte;
	struct bootinfo		*bi;

	if ((md = file_findmetadata(fp, MODINFOMD_ELFHDR)) == NULL)
		return(EFTYPE);			/* XXX actually EFUCKUP */
	hdr = (Elf_Ehdr *)&(md->md_data);

	/*
	 * Ugly hack, similar to linux. Dump the bootinfo into a
	 * special page reserved in the link map.
	 */
	bi = &bootinfo;
	bzero(bi, sizeof(struct bootinfo));
	bi_load(bi, fp);

	/*
	 * Region 6 is direct mapped UC and region 7 is direct mapped
	 * WC. The details of this is controlled by the Alt {I,D}TLB
	 * handlers. Here we just make sure that they have the largest 
	 * possible page size to minimise TLB usage.
	 */
	ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2));
	ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));

	bzero(&pte, sizeof(pte));
	pte.pte_p = 1;
	pte.pte_ma = PTE_MA_WB;
	pte.pte_a = 1;
	pte.pte_d = 1;
	pte.pte_pl = PTE_PL_KERN;
	pte.pte_ar = PTE_AR_RWX;
	pte.pte_ppn = 0;

	__asm __volatile("mov cr.ifa=%0" :: "r"(IA64_RR_BASE(7)));
	__asm __volatile("mov cr.itir=%0" :: "r"(28 << 2));
	__asm __volatile("srlz.i;;");
	__asm __volatile("itr.i itr[%0]=%1;;"
			 :: "r"(0), "r"(*(u_int64_t*)&pte));
	__asm __volatile("srlz.i;;");
	__asm __volatile("itr.d dtr[%0]=%1;;"
			 :: "r"(0), "r"(*(u_int64_t*)&pte));
	__asm __volatile("srlz.i;;");

	enter_kernel(fp->f_name, hdr->e_entry, bi);
}
コード例 #3
0
void
switch_to_virtual_rid(VCPU *vcpu)
{
    u64 psr;

    psr = ia64_clear_ic();
    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
    ia64_dv_serialize_data();
    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
    ia64_srlz_d();
    ia64_set_psr(psr);
    ia64_srlz_i();
    return;
}
コード例 #4
0
ファイル: exec.c プロジェクト: oza/FreeBSD-7.3-dyntick
static int
elf64_exec(struct preloaded_file *fp)
{
    struct file_metadata	*md;
    Elf_Ehdr		*hdr;
    pt_entry_t		pte;
    uint64_t		bi_addr;

    md = file_findmetadata(fp, MODINFOMD_ELFHDR);
    if (md == NULL)
        return (EINVAL);
    hdr = (Elf_Ehdr *)&(md->md_data);

    bi_load(fp, &bi_addr);

    printf("Entering %s at 0x%lx...\n", fp->f_name, hdr->e_entry);

    ldr_enter(fp->f_name);

    __asm __volatile("rsm psr.ic|psr.i;;");
    __asm __volatile("srlz.i;;");

    /*
     * Region 6 is direct mapped UC and region 7 is direct mapped
     * WC. The details of this is controlled by the Alt {I,D}TLB
     * handlers. Here we just make sure that they have the largest
     * possible page size to minimise TLB usage.
     */
    ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2));
    ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));

    pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
          PTE_PL_KERN | PTE_AR_RWX | PTE_ED;

    __asm __volatile("mov cr.ifa=%0" :: "r"(IA64_RR_BASE(7)));
    __asm __volatile("mov cr.itir=%0" :: "r"(28 << 2));
    __asm __volatile("ptr.i %0,%1" :: "r"(IA64_RR_BASE(7)), "r"(28<<2));
    __asm __volatile("ptr.d %0,%1" :: "r"(IA64_RR_BASE(7)), "r"(28<<2));
    __asm __volatile("srlz.i;;");
    __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(0), "r"(pte));
    __asm __volatile("srlz.i;;");
    __asm __volatile("itr.d dtr[%0]=%1;;" :: "r"(0), "r"(pte));
    __asm __volatile("srlz.i;;");

    enter_kernel(hdr->e_entry, bi_addr);

    /* NOTREACHED */
    return (0);
}
コード例 #5
0
static int __init hugetlb_setup_sz(char *str)
{
	u64 tr_pages;
	unsigned long long size;

	if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
		/*
		 * shouldn't happen, but just in case.
		 */
		tr_pages = 0x15557000UL;

	size = memparse(str, &str);
	if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
		size <= PAGE_SIZE ||
		size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
		printk(KERN_WARNING "Invalid huge page size specified\n");
		return 1;
	}

	hpage_shift = __ffs(size);
	/*
	 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
	 * override here with new page shift.
	 */
	ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
	return 0;
}
コード例 #6
0
IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
{
    u64 rrval;

    if (unlikely(is_reserved_rr_rid(vcpu, val))) {
        gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
        return IA64_RSVDREG_FAULT;
    }

    VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
    switch((u64)(reg>>VRN_SHIFT)) {
    case VRN7:
        if (likely(vcpu == current))
            vmx_switch_rr7(vrrtomrr(vcpu,val),
                           (void *)vcpu->arch.vhpt.hash, pal_vaddr );
       break;
    case VRN4:
        rrval = vrrtomrr(vcpu,val);
        vcpu->arch.metaphysical_saved_rr4 = rrval;
        if (is_virtual_mode(vcpu) && likely(vcpu == current))
            ia64_set_rr(reg,rrval);
        break;
    case VRN0:
        rrval = vrrtomrr(vcpu,val);
        vcpu->arch.metaphysical_saved_rr0 = rrval;
        if (is_virtual_mode(vcpu) && likely(vcpu == current))
            ia64_set_rr(reg,rrval);
        break;
    default:
        if (likely(vcpu == current))
            ia64_set_rr(reg,vrrtomrr(vcpu,val));
        break;
    }

    return (IA64_NO_FAULT);
}
コード例 #7
0
void
vmx_load_all_rr(VCPU *vcpu)
{
	unsigned long rr0, rr4;

	switch (vcpu->arch.arch_vmx.mmu_mode) {
	case VMX_MMU_VIRTUAL:
		rr0 = vcpu->arch.metaphysical_saved_rr0;
		rr4 = vcpu->arch.metaphysical_saved_rr4;
		break;
	case VMX_MMU_PHY_DT:
		rr0 = vcpu->arch.metaphysical_rid_dt;
		rr4 = vcpu->arch.metaphysical_rid_dt;
		break;
	case VMX_MMU_PHY_D:
		rr0 = vcpu->arch.metaphysical_rid_d;
		rr4 = vcpu->arch.metaphysical_rid_d;
		break;
	default:
		panic_domain(NULL, "bad mmu mode value");
	}

	ia64_set_rr((VRN0 << VRN_SHIFT), rr0);
	ia64_dv_serialize_data();
	ia64_set_rr((VRN4 << VRN_SHIFT), rr4);
	ia64_dv_serialize_data();
	ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN3 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN5 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
	ia64_dv_serialize_data();
	vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, VMX(vcpu, vrr[VRN7])));
	ia64_set_pta(VMX(vcpu, mpta));
	vmx_ia64_set_dcr(vcpu);

	ia64_srlz_d();
}
コード例 #8
0
ファイル: kvm-ia64.c プロジェクト: mpalmer/linux-2.6
static inline void vti_set_rr6(unsigned long rr6)
{
	ia64_set_rr(RR6, rr6);
	ia64_srlz_i();
}