Ejemplo n.º 1
0
void
flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long size = end - start;
	unsigned long nbits;

	if (mm != current->active_mm) {
		/* this does happen, but perhaps it's not worth optimizing for? */
#ifdef CONFIG_SMP
		flush_tlb_all();
#else
		mm->context = 0;
#endif
		return;
	}

	nbits = ia64_fls(size + 0xfff);
	while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits))
		++nbits;
	if (nbits > purge.max_bits)
		nbits = purge.max_bits;
	start &= ~((1UL << nbits) - 1);

# ifdef CONFIG_SMP
	platform_global_tlb_purge(start, end, nbits);
# else
	do {
		ia64_ptcl(start, (nbits<<2));
		start += (1UL << nbits);
	} while (start < end);
# endif

	ia64_srlz_i();			/* srlz.i implies srlz.d */
}
Ejemplo n.º 2
0
void
switch_to_physical_rid(VCPU *vcpu)
{
    u64 psr;
    u64 rr;

    switch (vcpu->arch.arch_vmx.mmu_mode) {
    case VMX_MMU_PHY_DT:
        rr = vcpu->arch.metaphysical_rid_dt;
        break;
    case VMX_MMU_PHY_D:
        rr = vcpu->arch.metaphysical_rid_d;
        break;
    default:
        panic_domain(NULL, "bad mmu mode value");
    }
    
    psr = ia64_clear_ic();
    ia64_set_rr(VRN0<<VRN_SHIFT, rr);
    ia64_dv_serialize_data();
    ia64_set_rr(VRN4<<VRN_SHIFT, rr);
    ia64_srlz_d();
    
    ia64_set_psr(psr);
    ia64_srlz_i();
    return;
}
Ejemplo n.º 3
0
static void kvm_flush_icache(unsigned long start, unsigned long len)
{
	int l;

	for (l = 0; l < (len + 32); l += 32)
		ia64_fc(start + l);

	ia64_sync_i();
	ia64_srlz_i();
}
Ejemplo n.º 4
0
extern void
UTILITY_Clear_DCR_PP (
    void
)
{
    ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
    ia64_srlz_i();

    return;
}
Ejemplo n.º 5
0
extern void
UTILITY_Clear_PSR_UP (
    void
)
{
    ia64_rsm(IA64_PSR_UP);
    ia64_srlz_i();

    return;
}
Ejemplo n.º 6
0
extern void
UTILITY_Set_PSR_UP (
    void
)
{
    ia64_ssm(IA64_PSR_UP);
    ia64_srlz_i();

    return;
}
Ejemplo n.º 7
0
void
switch_to_virtual_rid(VCPU *vcpu)
{
    u64 psr;

    psr = ia64_clear_ic();
    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
    ia64_dv_serialize_data();
    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
    ia64_srlz_d();
    ia64_set_psr(psr);
    ia64_srlz_i();
    return;
}
Ejemplo n.º 8
0
void
ia64_sync_icache(vm_offset_t va, size_t sz)
{
	uintptr_t pa;
	size_t cnt, max;

	while (sz > 0) {
		max = sz;
		pa = (uintptr_t)ia64_va2pa(va, &max);
		for (cnt = 0; cnt < max; cnt += 32)
			ia64_fc_i(pa + cnt);
		ia64_sync_i();
		va += max;
		sz -= max;
	}
	ia64_srlz_i();
}
Ejemplo n.º 9
0
void
ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
{
	static spinlock_t ptcg_lock = SPIN_LOCK_UNLOCKED;

	/* HW requires global serialization of ptc.ga.  */
	spin_lock(&ptcg_lock);
	{
		do {
			/*
			 * Flush ALAT entries also.
			 */
			ia64_ptcga(start, (nbits<<2));
			ia64_srlz_i();
			start += (1UL << nbits);
		} while (start < end);
	}
	spin_unlock(&ptcg_lock);
}
Ejemplo n.º 10
0
static void kvm_flush_tlb_all(void)
{
	unsigned long i, j, count0, count1, stride0, stride1, addr;
	long flags;

	addr    = local_cpu_data->ptce_base;
	count0  = local_cpu_data->ptce_count[0];
	count1  = local_cpu_data->ptce_count[1];
	stride0 = local_cpu_data->ptce_stride[0];
	stride1 = local_cpu_data->ptce_stride[1];

	local_irq_save(flags);
	for (i = 0; i < count0; ++i) {
		for (j = 0; j < count1; ++j) {
			ia64_ptce(addr);
			addr += stride1;
		}
		addr += stride0;
	}
	local_irq_restore(flags);
	ia64_srlz_i();			/* srlz.i implies srlz.d */
}
Ejemplo n.º 11
0
static inline void vti_set_rr6(unsigned long rr6)
{
	ia64_set_rr(RR6, rr6);
	ia64_srlz_i();
}
Ejemplo n.º 12
0
void cacheline_flush(char * addr)
{
    ia64_fc(addr);
    ia64_sync_i();
    ia64_srlz_i();
}