Example #1
0
/*  Restore the processor after a set_mtrr_prepare  */
void set_mtrr_done(struct set_mtrr_context *ctxt)
{
    if (use_intel() || is_cpu(CYRIX)) {

        /*  Flush caches and TLBs  */
        wbinvd();

        /*  Restore MTRRdefType  */
        if (use_intel())
            /* Intel (P6) standard MTRRs */
            mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
        else
            /* Cyrix ARRs - everything else was excluded at the top */
            setCx86(CX86_CCR3, ctxt->ccr3);

        /*  Enable caches  */
        write_cr0(read_cr0() & 0xbfffffff);

        /*  Restore value of CR4  */
        if ( cpu_has_pge )
            write_cr4(ctxt->cr4val);
    }
    /*  Re-enable interrupts locally (if enabled previously)  */
    local_irq_restore(ctxt->flags);
}
Example #2
0
/*  Put the processor into a state where MTRRs can be safely set  */
void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
{
    unsigned int cr0;

    /*  Disable interrupts locally  */
    local_irq_save(ctxt->flags);

    if (use_intel() || is_cpu(CYRIX)) {

        /*  Save value of CR4 and clear Page Global Enable (bit 7)  */
        if ( cpu_has_pge ) {
            ctxt->cr4val = read_cr4();
            write_cr4(ctxt->cr4val & ~X86_CR4_PGE);
        }

        /*  Disable and flush caches. Note that wbinvd flushes the TLBs as
            a side-effect  */
        cr0 = read_cr0() | 0x40000000;
        wbinvd();
        write_cr0(cr0);
        wbinvd();

        if (use_intel())
            /*  Save MTRR state */
            rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
        else
            /* Cyrix ARRs - everything else were excluded at the top */
            ctxt->ccr3 = getCx86(CX86_CCR3);
    }
}
Example #3
0
static void __init init_other_cpus(void)
{
	if (use_intel())
		get_mtrr_state();

	/* bring up the other processors */
	set_mtrr(~0U,0,0,0);

	if (use_intel()) {
		finalize_mtrr_state();
		mtrr_state_warn();
	}
}
void mtrr_bp_restore(void)
{
	if (!use_intel())
		return;

	mtrr_if->set_all();
}
void set_mtrr_aps_delayed_init(void)
{
	if (!use_intel())
		return;

	mtrr_aps_delayed_init = true;
}
/*
 * MTRR initialization for all AP's
 */
void mtrr_aps_init(void)
{
	if (!use_intel())
		return;

	set_mtrr(~0U, 0, 0, 0);
	mtrr_aps_delayed_init = false;
}
Example #7
0
int mtrr_del_page(int reg, unsigned long base, unsigned long size)
{
	int i, max;
	mtrr_type ltype;
	unsigned long lbase;
	unsigned int lsize;
	int error = -EINVAL;

	if (!mtrr_if)
		return -ENXIO;

	max = num_var_ranges;
	/* No CPU hotplug when we change MTRR entries */
	lock_cpu_hotplug();
	mutex_lock(&mtrr_mutex);
	if (reg < 0) {
		/*  Search for existing MTRR  */
		for (i = 0; i < max; ++i) {
			mtrr_if->get(i, &lbase, &lsize, &ltype);
			if (lbase == base && lsize == size) {
				reg = i;
				break;
			}
		}
		if (reg < 0) {
			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
			       size);
			goto out;
		}
	}
	if (reg >= max) {
		printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
		goto out;
	}
	if (is_cpu(CYRIX) && !use_intel()) {
		if ((reg == 3) && arr3_protected) {
			printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
			goto out;
		}
	}
	mtrr_if->get(reg, &lbase, &lsize, &ltype);
	if (lsize < 1) {
		printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
		goto out;
	}
	if (usage_table[reg] < 1) {
		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
		goto out;
	}
	if (--usage_table[reg] < 1)
		set_mtrr(reg, 0, 0, 0);
	error = reg;
 out:
	mutex_unlock(&mtrr_mutex);
	unlock_cpu_hotplug();
	return error;
}
Example #8
0
void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
{
    if (use_intel())
        /*  Disable MTRRs, and set the default type to uncached  */
        mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL,
                   ctxt->deftype_hi);
    else if (is_cpu(CYRIX))
        /* Cyrix ARRs - everything else were excluded at the top */
        setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
}
Example #9
0
/*  This function returns the number of variable MTRRs  */
void __init set_num_var_ranges(void)
{
	unsigned long config = 0, dummy;

	if (use_intel()) {
		rdmsr(MTRRcap_MSR, config, dummy);
	} else if (is_cpu(AMD))
		config = 2;
	else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
		config = 8;
	num_var_ranges = config & 0xff;
}
Example #10
0
/*
 * Delayed MTRR initialization for all AP's
 */
void mtrr_aps_init(void)
{
	if (!use_intel())
		return;

	/*
	 * Check if someone has requested the delay of AP MTRR initialization,
	 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
	 * then we are done.
	 */
	if (!mtrr_aps_delayed_init)
		return;

	set_mtrr(~0U, 0, 0, 0);
	mtrr_aps_delayed_init = false;
}
Example #11
0
static int __init mtrr_init_finialize(void)
{
	if (!mtrr_if)
		return 0;
	if (use_intel())
		mtrr_state_warn();
	else {
		/* The CPUs haven't MTRR and seemes not support SMP. They have
		 * specific drivers, we use a tricky method to support
		 * suspend/resume for them.
		 * TBD: is there any system with such CPU which supports
		 * suspend/resume?  if no, we should remove the code.
		 */
		sysdev_driver_register(&cpu_sysdev_class,
			&mtrr_sysdev_driver);
	}
	return 0;
}
void mtrr_ap_init(void)
{
	if (!use_intel() || mtrr_aps_delayed_init)
		return;
	/*
	 * Ideally we should hold mtrr_mutex here to avoid mtrr entries
	 * changed, but this routine will be called in cpu boot time,
	 * holding the lock breaks it.
	 *
	 * This routine is called in two cases:
	 *
	 *   1. very earily time of software resume, when there absolutely
	 *      isn't mtrr entry changes;
	 *
	 *   2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
	 *      lock to prevent mtrr entry changes
	 */
	set_mtrr(~0U, 0, 0, 0);
}
Example #13
0
void mtrr_ap_init(void)
{
	unsigned long flags;

	if (!mtrr_if || !use_intel())
		return;
	/*
	 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
	 * but this routine will be called in cpu boot time, holding the lock
	 * breaks it. This routine is called in two cases: 1.very earily time
	 * of software resume, when there absolutely isn't mtrr entry changes;
	 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
	 * prevent mtrr entry changes
	 */
	local_irq_save(flags);

	mtrr_if->set_all();

	local_irq_restore(flags);
}
static int __init mtrr_init_finialize(void)
{
	if (!mtrr_if)
		return 0;

	if (use_intel()) {
		if (!changed_by_mtrr_cleanup)
			mtrr_state_warn();
		return 0;
	}

	/*
	 * The CPU has no MTRR and seems to not support SMP. They have
	 * specific drivers, we use a tricky method to support
	 * suspend/resume for them.
	 *
	 * TBD: is there any system with such CPU which supports
	 * suspend/resume? If no, we should remove the code.
	 */
	sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver);

	return 0;
}
/**
 * mtrr_bp_init - initialize mtrrs on the boot CPU
 *
 * This needs to be called early; before any of the other CPUs are
 * initialized (i.e. before smp_init()).
 *
 */
void __init mtrr_bp_init(void)
{
	u32 phys_addr;

	init_ifs();

	phys_addr = 32;

	if (cpu_has_mtrr) {
		mtrr_if = &generic_mtrr_ops;
		size_or_mask = 0xff000000;			/* 36 bits */
		size_and_mask = 0x00f00000;
		phys_addr = 36;

		/*
		 * This is an AMD specific MSR, but we assume(hope?) that
		 * Intel will implement it to when they extend the address
		 * bus of the Xeon.
		 */
		if (cpuid_eax(0x80000000) >= 0x80000008) {
			phys_addr = cpuid_eax(0x80000008) & 0xff;
			/* CPUID workaround for Intel 0F33/0F34 CPU */
			if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
			    boot_cpu_data.x86 == 0xF &&
			    boot_cpu_data.x86_model == 0x3 &&
			    (boot_cpu_data.x86_mask == 0x3 ||
			     boot_cpu_data.x86_mask == 0x4))
				phys_addr = 36;

			size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
			size_and_mask = ~size_or_mask & 0xfffff00000ULL;
		} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
			   boot_cpu_data.x86 == 6) {
			/*
			 * VIA C* family have Intel style MTRRs,
			 * but don't support PAE
			 */
			size_or_mask = 0xfff00000;		/* 32 bits */
			size_and_mask = 0;
			phys_addr = 32;
		}
	} else {
		switch (boot_cpu_data.x86_vendor) {
		case X86_VENDOR_AMD:
			if (cpu_has_k6_mtrr) {
				/* Pre-Athlon (K6) AMD CPU MTRRs */
				mtrr_if = mtrr_ops[X86_VENDOR_AMD];
				size_or_mask = 0xfff00000;	/* 32 bits */
				size_and_mask = 0;
			}
			break;
		case X86_VENDOR_CENTAUR:
			if (cpu_has_centaur_mcr) {
				mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
				size_or_mask = 0xfff00000;	/* 32 bits */
				size_and_mask = 0;
			}
			break;
		case X86_VENDOR_CYRIX:
			if (cpu_has_cyrix_arr) {
				mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
				size_or_mask = 0xfff00000;	/* 32 bits */
				size_and_mask = 0;
			}
			break;
		default:
			break;
		}
	}

	if (mtrr_if) {
		set_num_var_ranges();
		init_table();
		if (use_intel()) {
			get_mtrr_state();

			if (mtrr_cleanup(phys_addr)) {
				changed_by_mtrr_cleanup = 1;
				mtrr_if->set_all();
			}
		}
	}
}