Exemple #1
0
/* May not be __init: called during resume */
void syscall32_cpu_init(void)
{
    /* Load these always in case some future AMD CPU supports
       SYSENTER from compat mode too. */
    checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
    checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
    checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);

    wrmsrl(MSR_CSTAR, ia32_cstar_target);
}
Exemple #2
0
/* May not be __init: called during resume */
void syscall32_cpu_init(void)
{
	if (use_sysenter < 0)
 		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);

	/* Load these always in case some future AMD CPU supports
	   SYSENTER from compat mode too. */
	checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
	checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
	checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);

	wrmsrl(MSR_CSTAR, ia32_cstar_target);
}
Exemple #3
0
static void setup_k7_watchdog(void)
{
	int i;
	unsigned int evntsel;

	/* No check, so can start with slow frequency */
	nmi_hz = 1; 

	/* XXX should check these in EFER */

	nmi_perfctr_msr = MSR_K7_PERFCTR0;

	for(i = 0; i < 4; ++i) {
		/* Simulator may not support it */
		if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL))
			return;
		wrmsrl(MSR_K7_PERFCTR0+i, 0UL);
	}

	evntsel = K7_EVNTSEL_INT
		| K7_EVNTSEL_OS
		| K7_EVNTSEL_USR
		| K7_NMI_EVENT;

	wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
	wrmsrl(MSR_K7_PERFCTR0, -((u64)cpu_khz*1000) / nmi_hz);
	apic_write(APIC_LVTPC, APIC_DM_NMI);
	evntsel |= K7_EVNTSEL_ENABLE;
	wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
}
Exemple #4
0
static void setup_k7_watchdog(void)
{
	int i;
	unsigned int evntsel;

	nmi_perfctr_msr = MSR_K7_PERFCTR0;

	for(i = 0; i < 4; ++i) {
		/* Simulator may not support it */
		if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL)) {
			nmi_perfctr_msr = 0;
			return;
		}
		wrmsrl(MSR_K7_PERFCTR0+i, 0UL);
	}

	evntsel = K7_EVNTSEL_INT
		| K7_EVNTSEL_OS
		| K7_EVNTSEL_USR
		| K7_NMI_EVENT;

	wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
	wrmsrl(MSR_K7_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
	apic_write(APIC_LVTPC, APIC_DM_NMI);
	evntsel |= K7_EVNTSEL_ENABLE;
	wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
}
Exemple #5
0
static void p6_pmu_enable_event(struct perf_event *event)
{
	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
	struct hw_perf_event *hwc = &event->hw;
	u64 val;

	val = hwc->config;
	if (cpuc->enabled)
		val |= ARCH_PERFMON_EVENTSEL_ENABLE;

	(void)checking_wrmsrl(hwc->config_base, val);
}
int vmadump_restore_cpu(cr_rstrt_proc_req_t *ctx, struct file *file,
			struct pt_regs *regs) {
    struct vmadump_restore_tmps *x86tmps;
    struct thread_struct *threadtmp;
    struct pt_regs *regtmp;
    int r;
    int idx, i, cpu;
    uint16_t fsindex, gsindex;
#if HAVE_STRUCT_N_DESC_STRUCT
    struct n_desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
#else
    struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
#endif

    /* XXX: Note allocation assumes i387tmp and threadtmp are never active at the same time */
    x86tmps = kmalloc(sizeof(*x86tmps), GFP_KERNEL);
    if (!x86tmps) return -ENOMEM;
    regtmp = VMAD_REGTMP(x86tmps);
    threadtmp = VMAD_THREADTMP(x86tmps);

    r = read_kern(ctx, file, regtmp, sizeof(*regtmp));
    if (r != sizeof(*regtmp)) goto bad_read;

    /* Don't let the user pick funky segments */
    if ((regtmp->cs != __USER_CS && regtmp->cs != __USER32_CS) &&
	(regtmp->ss != __USER_DS && regtmp->ss != __USER32_DS)) {
	r = -EINVAL;
	goto bad_read;
    }

    /* Set our process type */
    if (regtmp->cs == __USER32_CS)
	set_thread_flag(TIF_IA32);
    else
	clear_thread_flag(TIF_IA32);	

    /* Only restore bottom 9 bits of eflags.  Restoring anything else
     * is bad bad mojo for security. (0x200 = interrupt enable) */
#if HAVE_PT_REGS_EFLAGS
    regtmp->eflags = 0x200 | (regtmp->eflags & 0x000000FF);
#elif HAVE_PT_REGS_FLAGS
    regtmp->flags = 0x200 | (regtmp->flags & 0x000000FF);
#else
    #error
#endif
    memcpy(regs, regtmp, sizeof(*regtmp));

    /* Restore FPU info (and later general "extended state") */
    r = vmadump_restore_i387(ctx, file, VMAD_I387TMP(x86tmps));
    if (r < 0) goto bad_read;
	
    /* XXX FIX ME: RESTORE DEBUG INFORMATION ?? */
    /* Here we read it but ignore it. */
    r = vmadump_restore_debugreg(ctx, file);
    if (r < 0) goto bad_read;

    /* user(r)sp, since we don't use the ptrace entry path in BLCR */
#if HAVE_THREAD_USERSP
    r = read_kern(ctx, file, &threadtmp->usersp, sizeof(threadtmp->usersp));
    if (r != sizeof(threadtmp->usersp)) goto bad_read;
    current->thread.usersp = threadtmp->usersp;
    vmad_write_oldrsp(threadtmp->usersp);
#elif HAVE_THREAD_USERRSP
    r = read_kern(ctx, file, &threadtmp->userrsp, sizeof(threadtmp->userrsp));
    if (r != sizeof(threadtmp->userrsp)) goto bad_read;
    current->thread.userrsp = threadtmp->userrsp;
    vmad_write_oldrsp(threadtmp->userrsp);
#else
    #error
#endif

    /*-- restore segmentation related stuff */

    /* Restore FS_BASE MSR */
    r = read_kern(ctx, file, &threadtmp->fs, sizeof(threadtmp->fs));
    if (r != sizeof(threadtmp->fs)) goto bad_read;
    if (threadtmp->fs >= TASK_SIZE) {
	r = -EINVAL;
	goto bad_read;
    }
    current->thread.fs = threadtmp->fs;
    if ((r = checking_wrmsrl(MSR_FS_BASE, threadtmp->fs)))
	goto bad_read;
	
    /* Restore GS_KERNEL_BASE MSR */
    r = read_kern(ctx, file, &threadtmp->gs, sizeof(threadtmp->gs));
    if (r != sizeof(threadtmp->gs)) goto bad_read;
    if (threadtmp->gs >= TASK_SIZE) {
	r = -EINVAL;
	goto bad_read;
    }
    current->thread.gs = threadtmp->gs;
    if ((r = checking_wrmsrl(MSR_KERNEL_GS_BASE, threadtmp->gs)))
	goto bad_read;

    /* Restore 32 bit segment stuff */
    r = read_kern(ctx, file, &fsindex, sizeof(fsindex));
    if (r != sizeof(fsindex)) goto bad_read;

    r = read_kern(ctx, file, &gsindex, sizeof(gsindex));
    if (r != sizeof(gsindex)) goto bad_read;

    r = read_kern(ctx, file, tls_array, sizeof(tls_array));
    if (r != sizeof(tls_array)) goto bad_read;

    /* Sanitize fs, gs.  These segment descriptors should load one
     * of the TLS entries and have DPL = 3.  If somebody is doing
     * some other LDT monkey business, I'm currently not
     * supporting that here.  Also, I'm presuming that the offsets
     * to the GDT_ENTRY_TLS_MIN is the same in both kernels. */
    idx = fsindex >> 3;
    if (idx<GDT_ENTRY_TLS_MIN || idx>GDT_ENTRY_TLS_MAX || (fsindex&7) != 3)
	fsindex = 0;
    idx = gsindex >> 3;
    if (idx<GDT_ENTRY_TLS_MIN || idx>GDT_ENTRY_TLS_MAX || (gsindex&7) != 3)
	gsindex = 0;

    /* Sanitize the TLS entries...
     * Make sure the following bits are set/not set:
     *  bit 12   : S    =  1    (code/data - not system)
     *  bit 13-14: DPL  = 11    (priv level = 3 (user))
     *  bit 21   :      =  0    (reserved)
     *
     * If the entry isn't valid, zero the whole descriptor.
     */
    for (i=0; i < GDT_ENTRY_TLS_ENTRIES; i++) {
	if (tls_array[i].b != 0 && 
	    (tls_array[i].b & 0x00207000) != 0x00007000) {
	    r = -EINVAL;
	    goto bad_read;
	}
    }

    /* Ok load this crap */
    cpu = get_cpu();	/* load_TLS can't get pre-empted. */
    memcpy(current->thread.tls_array, tls_array,
	   sizeof(current->thread.tls_array));
    current->thread.fsindex = fsindex;
    current->thread.gsindex = gsindex;
    load_TLS(&current->thread, cpu);

    loadsegment(fs, current->thread.fsindex);
    load_gs_index(current->thread.gsindex);
    put_cpu();

    /* In case cr_restart and child don't have same ABI */
    if (regtmp->cs == __USER32_CS) {
	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);
    } else {
	loadsegment(ds, __USER_DS);
	loadsegment(es, __USER_DS);
    }

#if HAVE_THREAD_INFO_SYSENTER_RETURN
    {
	void *sysenter_return;
	r = read_kern(ctx, file, &sysenter_return, sizeof(sysenter_return));
	if (r != sizeof(sysenter_return)) goto bad_read;
	current_thread_info()->sysenter_return = sysenter_return;
    }
#endif
    
    kfree(x86tmps);
    return 0;

 bad_read:
    kfree(x86tmps);
    if (r >= 0) r = -EIO;
    return r;
}