Exemple #1
0
/*
 * Duplicate parent state in child
 * for U**X fork.
 */
kern_return_t
machine_thread_dup(
    thread_t		parent,
    thread_t		child
)
{
	
	pcb_t		parent_pcb;
	pcb_t		child_pcb;

	if ((child_pcb = child->machine.pcb) == NULL ||
	    (parent_pcb = parent->machine.pcb) == NULL)
		return (KERN_FAILURE);
	/*
	 * Copy over the x86_saved_state registers
	 */
	if (cpu_mode_is64bit()) {
		if (thread_is_64bit(parent))
			bcopy(USER_REGS64(parent), USER_REGS64(child), sizeof(x86_saved_state64_t));
		else
			bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state_compat32_t));
	} else
		bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state32_t));

	/*
	 * Check to see if parent is using floating point
	 * and if so, copy the registers to the child
	 */
	fpu_dup_fxstate(parent, child);

#ifdef	MACH_BSD
	/*
	 * Copy the parent's cthread id and USER_CTHREAD descriptor, if 32-bit.
	 */
	child_pcb->cthread_self = parent_pcb->cthread_self;
	if (!thread_is_64bit(parent))
		child_pcb->cthread_desc = parent_pcb->cthread_desc;

	/*
	 * FIXME - should a user specified LDT, TSS and V86 info
	 * be duplicated as well?? - probably not.
	 */
	// duplicate any use LDT entry that was set I think this is appropriate.
        if (parent_pcb->uldt_selector!= 0) {
	        child_pcb->uldt_selector = parent_pcb->uldt_selector;
		child_pcb->uldt_desc = parent_pcb->uldt_desc;
	}
#endif

	return (KERN_SUCCESS);
}
Exemple #2
0
void
kdp_getstate(
    x86_thread_state32_t	*state
)
{
    static x86_thread_state32_t	null_state;
    x86_saved_state32_t	*saved_state;
    
    saved_state = (x86_saved_state32_t *)kdp.saved_state;
    
    *state = null_state;	
    state->eax = saved_state->eax;
    state->ebx = saved_state->ebx;
    state->ecx = saved_state->ecx;
    state->edx = saved_state->edx;
    state->edi = saved_state->edi;
    state->esi = saved_state->esi;
    state->ebp = saved_state->ebp;

    if ((saved_state->cs & SEL_PL) == SEL_PL_K) { /* Kernel state? */
	    if (cpu_mode_is64bit())
		    state->esp = (uint32_t) saved_state->uesp;
	    else
		    state->esp = ((uint32_t)saved_state) + offsetof(x86_saved_state_t, ss_32) + sizeof(x86_saved_state32_t);
        state->ss = KERNEL_DS;
    } else {
    	state->esp = saved_state->uesp;
    	state->ss = saved_state->ss;
    }

    state->eflags = saved_state->efl;
    state->eip = saved_state->eip;
    state->cs = saved_state->cs;
    state->ds = saved_state->ds;
    state->es = saved_state->es;
    state->fs = saved_state->fs;
    state->gs = saved_state->gs;
}
Exemple #3
0
void
mca_dump(void)
{
	mca_state_t	*mca_state = current_cpu_datap()->cpu_mca_state;
	uint64_t	deadline;
	unsigned int	i = 0;

	/*
	 * Capture local MCA registers to per-cpu data.
	 */
	mca_save_state(mca_state);

	/*
	 * Serialize: the first caller controls dumping MCA registers,
	 * other threads spin meantime.
	 */
	simple_lock(&mca_lock);
	if (mca_dump_state > CLEAR) {
		simple_unlock(&mca_lock);
		while (mca_dump_state == DUMPING)
			cpu_pause();
		return;
	}
	mca_dump_state = DUMPING;
	simple_unlock(&mca_lock);

	/*
	 * Wait for all other hardware threads to save their state.
	 * Or timeout.
	 */
	deadline = mach_absolute_time() + LockTimeOut;
	while (mach_absolute_time() < deadline && i < real_ncpus) {
		if (!cpu_datap(i)->cpu_mca_state->mca_is_saved) {
			cpu_pause();
			continue;
		}
		i += 1;
	}

	/*
	 * Report machine-check capabilities:
	 */
	kdb_printf(
		"Machine-check capabilities 0x%016qx:\n", ia32_mcg_cap.u64);

	mca_report_cpu_info();

	kdb_printf(
		" %d error-reporting banks\n%s%s%s", mca_error_bank_count,
		IF(mca_control_MSR_present,
		   " control MSR present\n"),
		IF(mca_threshold_status_present,
		   " threshold-based error status present\n"),
		IF(mca_cmci_present,
		   " extended corrected memory error handling present\n"));
	if (mca_extended_MSRs_present)
		kdb_printf(
			" %d extended MSRs present\n", mca_extended_MSRs_count);
 
	/*
	 * Dump all processor state:
	 */
	for (i = 0; i < real_ncpus; i++) {
		mca_state_t		*mcsp = cpu_datap(i)->cpu_mca_state;
		ia32_mcg_status_t	status;

		kdb_printf("Processor %d: ", i);
		if (mcsp == NULL ||
		    mcsp->mca_is_saved == FALSE ||
		    mcsp->mca_mcg_status.u64 == 0) {
			kdb_printf("no machine-check status reported\n");
			continue;
		}
		if (!mcsp->mca_is_valid) {
			kdb_printf("no valid machine-check state\n");
			continue;
		}
		status = mcsp->mca_mcg_status;
		kdb_printf(
			"machine-check status 0x%016qx:\n%s%s%s", status.u64,
			IF(status.bits.ripv, " restart IP valid\n"),
			IF(status.bits.eipv, " error IP valid\n"),
			IF(status.bits.mcip, " machine-check in progress\n"));

		mca_cpu_dump_error_banks(mcsp);
	}

	/*
	 * Dump any extended machine state:
	 */
	if (mca_extended_MSRs_present) {
		if (cpu_mode_is64bit())
			mca_dump_64bit_state();
		else
			mca_dump_32bit_state();
	}

	/* Update state to release any other threads. */
	mca_dump_state = DUMPED;
}
Exemple #4
0
static void
commpage_init_cpu_capabilities( void )
{
	uint64_t bits;
	int cpus;
	ml_cpu_info_t cpu_info;

	bits = 0;
	ml_cpu_get_info(&cpu_info);
	
	switch (cpu_info.vector_unit) {
		case 9:
			bits |= kHasAVX1_0;
			/* fall thru */
		case 8:
			bits |= kHasSSE4_2;
			/* fall thru */
		case 7:
			bits |= kHasSSE4_1;
			/* fall thru */
		case 6:
			bits |= kHasSupplementalSSE3;
			/* fall thru */
		case 5:
			bits |= kHasSSE3;
			/* fall thru */
		case 4:
			bits |= kHasSSE2;
			/* fall thru */
		case 3:
			bits |= kHasSSE;
			/* fall thru */
		case 2:
			bits |= kHasMMX;
		default:
			break;
	}
	switch (cpu_info.cache_line_size) {
		case 128:
			bits |= kCache128;
			break;
		case 64:
			bits |= kCache64;
			break;
		case 32:
			bits |= kCache32;
			break;
		default:
			break;
	}
	cpus = commpage_cpus();			// how many CPUs do we have

	bits |= (cpus << kNumCPUsShift);

	bits |= kFastThreadLocalStorage;	// we use %gs for TLS

#define setif(_bits, _bit, _condition) \
	if (_condition) _bits |= _bit

	setif(bits, kUP,         cpus == 1);
	setif(bits, k64Bit,      cpu_mode_is64bit());
	setif(bits, kSlow,       tscFreq <= SLOW_TSC_THRESHOLD);

	setif(bits, kHasAES,     cpuid_features() &
					CPUID_FEATURE_AES);
	setif(bits, kHasF16C,    cpuid_features() &
					CPUID_FEATURE_F16C);
	setif(bits, kHasRDRAND,  cpuid_features() &
					CPUID_FEATURE_RDRAND);
	setif(bits, kHasFMA,     cpuid_features() &
					CPUID_FEATURE_FMA);

	setif(bits, kHasBMI1,    cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_BMI1);
	setif(bits, kHasBMI2,    cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_BMI2);
	setif(bits, kHasRTM,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_RTM);
	setif(bits, kHasHLE,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_HLE);
	setif(bits, kHasAVX2_0,  cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_AVX2);
	setif(bits, kHasRDSEED,  cpuid_features() &
					CPUID_LEAF7_FEATURE_RDSEED);
	setif(bits, kHasADX,     cpuid_features() &
					CPUID_LEAF7_FEATURE_ADX);
	
	setif(bits, kHasMPX,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_MPX);
	setif(bits, kHasSGX,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_SGX);
	uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE);
	setif(bits, kHasENFSTRG, (misc_enable & 1ULL) &&
				 (cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_ERMS));
	
	_cpu_capabilities = bits;		// set kernel version for use by drivers etc
}
Exemple #5
0
static void
commpage_init_cpu_capabilities( void )
{
	uint64_t bits;
	int cpus;
	ml_cpu_info_t cpu_info;

	bits = 0;
	ml_cpu_get_info(&cpu_info);

	switch (cpu_info.vector_unit) {
		case 9:
			bits |= kHasAVX1_0;
			/* fall thru */
		case 8:
			bits |= kHasSSE4_2;
			/* fall thru */
		case 7:
			bits |= kHasSSE4_1;
			/* fall thru */
		case 6:
			bits |= kHasSupplementalSSE3;
			/* fall thru */
		case 5:
			bits |= kHasSSE3;
			/* fall thru */
		case 4:
			bits |= kHasSSE2;
			/* fall thru */
		case 3:
			bits |= kHasSSE;
			/* fall thru */
		case 2:
			bits |= kHasMMX;
		default:
			break;
	}
	switch (cpu_info.cache_line_size) {
		case 128:
			bits |= kCache128;
			break;
		case 64:
			bits |= kCache64;
			break;
		case 32:
			bits |= kCache32;
			break;
		default:
			break;
	}
	cpus = commpage_cpus();			// how many CPUs do we have

	/** Sinetek: by default we'd like some reasonable values,
	 **  so that the userspace runs correctly.
	 **
	 ** On Mountain Lion, kHasSSE4_2 provides vanilla SSE2 routines.
	 ** On Mavericks, we need a bit more support: SSE3, SSE3X.
	 **/
	if (IsAmdCPU()) {
		bits |= kHasSSE4_2;
		bits &= ~kHasSupplementalSSE3;
#define MAVERICKS_AMD
#ifdef MAVERICKS_AMD
		bits |= kHasSSE3;
	//	bits |= kHasSupplementalSSE3;
		bits &= ~kHasSSE4_2;
#endif
	}

	bits |= (cpus << kNumCPUsShift);

	bits |= kFastThreadLocalStorage;	// we use %gs for TLS

#define setif(_bits, _bit, _condition) \
	if (_condition) _bits |= _bit

	setif(bits, kUP,         cpus == 1);
	setif(bits, k64Bit,      cpu_mode_is64bit());
	setif(bits, kSlow,       tscFreq <= SLOW_TSC_THRESHOLD);

	setif(bits, kHasAES,     cpuid_features() &
					CPUID_FEATURE_AES);
	setif(bits, kHasF16C,    cpuid_features() &
					CPUID_FEATURE_F16C);
	setif(bits, kHasRDRAND,  cpuid_features() &
					CPUID_FEATURE_RDRAND);
	setif(bits, kHasFMA,     cpuid_features() &
					CPUID_FEATURE_FMA);

	setif(bits, kHasBMI1,    cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_BMI1);
	setif(bits, kHasBMI2,    cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_BMI2);
	setif(bits, kHasRTM,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_RTM);
	setif(bits, kHasHLE,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_HLE);
	setif(bits, kHasAVX2_0,  cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_AVX2);
	
	uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE);
	setif(bits, kHasENFSTRG, (misc_enable & 1ULL) &&
				 (cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_ENFSTRG));
	
	_cpu_capabilities = bits;		// set kernel version for use by drivers etc
}
Exemple #6
0
void	pmap_pcid_configure(void) {
	int ccpu = cpu_number();
	uintptr_t cr4 = get_cr4();
	boolean_t pcid_present = FALSE;

	pmap_pcid_log("PCID configure invoked on CPU %d\n", ccpu);
	pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() !=0);
	pmap_assert(cpu_mode_is64bit());

	if (PE_parse_boot_argn("-pmap_pcid_disable", &pmap_pcid_disabled, sizeof (pmap_pcid_disabled))) {
		pmap_pcid_log("PMAP: PCID feature disabled\n");
		printf("PMAP: PCID feature disabled, %u\n", pmap_pcid_disabled);
		kprintf("PMAP: PCID feature disabled %u\n", pmap_pcid_disabled);
	}
	 /* no_shared_cr3+PCID is currently unsupported */
#if	DEBUG
	if (pmap_pcid_disabled == FALSE)
		no_shared_cr3 = FALSE;
	else
		no_shared_cr3 = TRUE;
#else
	if (no_shared_cr3)
		pmap_pcid_disabled = TRUE;
#endif
	if (pmap_pcid_disabled || no_shared_cr3) {
		unsigned i;
		/* Reset PCID status, as we may have picked up
		 * strays if discovered prior to platform
		 * expert initialization.
		 */
		for (i = 0; i < real_ncpus; i++) {
			if (cpu_datap(i)) {
				cpu_datap(i)->cpu_pmap_pcid_enabled = FALSE;
			}
			pmap_pcid_ncpus = 0;
		}
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE;
		return;
	}
	/* DRKTODO: assert if features haven't been discovered yet. Redundant
	 * invocation of cpu_mode_init and descendants masks this for now.
	 */
	if ((cpuid_features() & CPUID_FEATURE_PCID))
		pcid_present = TRUE;
	else {
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE;
		pmap_pcid_log("PMAP: PCID not detected CPU %d\n", ccpu);
		return;
	}
	if ((cr4 & (CR4_PCIDE | CR4_PGE)) == (CR4_PCIDE|CR4_PGE)) {
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE;
		pmap_pcid_log("PMAP: PCID already enabled %d\n", ccpu);
		return;
	}
	if (pcid_present == TRUE) {
		pmap_pcid_log("Pre-PCID:CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, cr4);

		if (cpu_number() >= PMAP_PCID_MAX_CPUS) {
			panic("PMAP_PCID_MAX_CPUS %d\n", cpu_number());
		}
		if ((get_cr4() & CR4_PGE) == 0) {
			set_cr4(get_cr4() | CR4_PGE);
			pmap_pcid_log("Toggled PGE ON (CPU: %d\n", ccpu);
		}
		set_cr4(get_cr4() | CR4_PCIDE);
		pmap_pcid_log("Post PCID: CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, get_cr4());
		tlb_flush_global();
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE;

		if (OSIncrementAtomic(&pmap_pcid_ncpus) == machine_info.max_cpus) {
			pmap_pcid_log("All PCIDs enabled: real_ncpus: %d, pmap_pcid_ncpus: %d\n", real_ncpus, pmap_pcid_ncpus);
		}
		cpu_datap(ccpu)->cpu_pmap_pcid_coherentp =
		    cpu_datap(ccpu)->cpu_pmap_pcid_coherentp_kernel =
		    &(kernel_pmap->pmap_pcid_coherency_vector[ccpu]);
		cpu_datap(ccpu)->cpu_pcid_refcounts[0] = 1;
	}
}
Exemple #7
0
void
mca_dump(void)
{
	ia32_mcg_status_t	status;
	mca_state_t		*mca_state = current_cpu_datap()->cpu_mca_state;

	/*
	 * Capture local MCA registers to per-cpu data.
	 */
	mca_save_state(mca_state);

	/*
	 * Serialize in case of multiple simultaneous machine-checks.
	 * Only the first caller is allowed to dump MCA registers,
	 * other threads spin meantime.
	 */
	simple_lock(&mca_lock);
	if (mca_dump_state > CLEAR) {
		simple_unlock(&mca_lock);
		while (mca_dump_state == DUMPING)
			cpu_pause();
		return;
	}
	mca_dump_state = DUMPING;
	simple_unlock(&mca_lock);

	/*
	 * Report machine-check capabilities:
	 */
	kdb_printf(
		"Machine-check capabilities (cpu %d) 0x%016qx:\n",
		cpu_number(), ia32_mcg_cap.u64);

	mca_report_cpu_info();

	kdb_printf(
		" %d error-reporting banks\n%s%s%s", mca_error_bank_count,
		IF(mca_control_MSR_present,
		   " control MSR present\n"),
		IF(mca_threshold_status_present,
		   " threshold-based error status present\n"),
		IF(mca_cmci_present,
		   " extended corrected memory error handling present\n"));
	if (mca_extended_MSRs_present)
		kdb_printf(
			" %d extended MSRs present\n", mca_extended_MSRs_count);
 
	/*
	 * Report machine-check status:
	 */
	status.u64 = rdmsr64(IA32_MCG_STATUS);
	kdb_printf(
		"Machine-check status 0x%016qx:\n%s%s%s", status.u64,
		IF(status.bits.ripv, " restart IP valid\n"),
		IF(status.bits.eipv, " error IP valid\n"),
		IF(status.bits.mcip, " machine-check in progress\n"));

	/*
	 * Dump error-reporting registers:
	 */
	mca_dump_error_banks(mca_state);

	/*
	 * Dump any extended machine state:
	 */
	if (mca_extended_MSRs_present) {
		if (cpu_mode_is64bit())
			mca_dump_64bit_state();
		else
			mca_dump_32bit_state();
	}

	/* Update state to release any other threads. */
	mca_dump_state = DUMPED;
}
cpu_data_t *
cpu_data_alloc(boolean_t is_boot_cpu)
{
	int		ret;
	cpu_data_t	*cdp;

	if (is_boot_cpu) {
		assert(real_ncpus == 1);
		simple_lock_init(&cpu_lock, 0);
		cdp = &cpu_data_master;
		if (cdp->cpu_processor == NULL) {
			cdp->cpu_processor = cpu_processor_alloc(TRUE);
			cdp->cpu_pmap = pmap_cpu_alloc(TRUE);
			cdp->cpu_this = cdp;
			cdp->cpu_is64bit = FALSE;
			cdp->cpu_int_stack_top = (vm_offset_t) low_eintstack;
			cpu_desc_init(cdp, TRUE);
			fast_syscall_init();
		}
		return cdp;
	}

	/* Check count before making allocations */
	if (real_ncpus >= max_ncpus)
		return NULL;

	/*
	 * Allocate per-cpu data:
	 */
	ret = kmem_alloc(kernel_map, 
			 (vm_offset_t *) &cdp, sizeof(cpu_data_t));
	if (ret != KERN_SUCCESS) {
		printf("cpu_data_alloc() failed, ret=%d\n", ret);
		goto abort;
	}
	bzero((void*) cdp, sizeof(cpu_data_t));
	cdp->cpu_this = cdp;

	/* Propagate mode */
	cdp->cpu_is64bit = cpu_mode_is64bit();

	/*
	 * Allocate interrupt stack:
	 */
	ret = kmem_alloc(kernel_map, 
			 (vm_offset_t *) &cdp->cpu_int_stack_top,
			 INTSTACK_SIZE);
	if (ret != KERN_SUCCESS) {
		printf("cpu_data_alloc() int stack failed, ret=%d\n", ret);
		goto abort;
	}
	bzero((void*) cdp->cpu_int_stack_top, INTSTACK_SIZE);
	cdp->cpu_int_stack_top += INTSTACK_SIZE;

	/*
	 * Allocate descriptor table:
	 * Size depends on cpu mode.
	 */
	ret = kmem_alloc(kernel_map, 
			 (vm_offset_t *) &cdp->cpu_desc_tablep,
			 cdp->cpu_is64bit ? sizeof(cpu_desc_table64_t)
					  : sizeof(cpu_desc_table_t));
	if (ret != KERN_SUCCESS) {
		printf("cpu_data_alloc() desc_table failed, ret=%d\n", ret);
		goto abort;
	}

	/*
	 * Allocate LDT
	 */
	ret = kmem_alloc(kernel_map, 
			 (vm_offset_t *) &cdp->cpu_ldtp,
			 sizeof(struct real_descriptor) * LDTSZ);
	if (ret != KERN_SUCCESS) {
		printf("cpu_data_alloc() ldt failed, ret=%d\n", ret);
		goto abort;
	}

	/* Machine-check shadow register allocation. */
	mca_cpu_alloc(cdp);

	simple_lock(&cpu_lock);
	if (real_ncpus >= max_ncpus) {
		simple_unlock(&cpu_lock);
		goto abort;
	}
	cpu_data_ptr[real_ncpus] = cdp;
	cdp->cpu_number = real_ncpus;
	real_ncpus++;
	simple_unlock(&cpu_lock);

	kprintf("cpu_data_alloc(%d) %p desc_table: %p "
		"ldt: %p "
		"int_stack: 0x%x-0x%x\n",
		cdp->cpu_number, cdp, cdp->cpu_desc_tablep, cdp->cpu_ldtp,
		cdp->cpu_int_stack_top - INTSTACK_SIZE, cdp->cpu_int_stack_top);

	return cdp;

abort:
	if (cdp) {
		if (cdp->cpu_desc_tablep)
			kfree((void *) cdp->cpu_desc_tablep,
				sizeof(*cdp->cpu_desc_tablep));
		if (cdp->cpu_int_stack_top)
			kfree((void *) (cdp->cpu_int_stack_top - INTSTACK_SIZE),
				INTSTACK_SIZE);
		kfree((void *) cdp, sizeof(*cdp));
	}
	return NULL;
}